X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?p=palacios.git;a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fsvm.c;h=7ba76ebe2746c6a7ffad858a5db5d863b82fe495;hp=9434e496c4fa8e8b3027d8f7ea8fcfb4418777a6;hb=c06413341bf1dca02f22c0502fa5c2d1c2c11eab;hpb=5081e94397ad27675827df5a3e1fb64e80105374 diff --git a/palacios/src/palacios/svm.c b/palacios/src/palacios/svm.c index 9434e49..7ba76eb 100644 --- a/palacios/src/palacios/svm.c +++ b/palacios/src/palacios/svm.c @@ -36,8 +36,6 @@ #include -#include - #include #include @@ -45,10 +43,16 @@ #include + +// This is a global pointer to the host's VMCB +static addr_t host_vmcbs[CONFIG_MAX_CPUS] = {0}; + + + extern void v3_stgi(); extern void v3_clgi(); //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs); -extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs); +extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb); static vmcb_t * Allocate_VMCB() { @@ -86,6 +90,7 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) { ctrl_area->svm_instrs.CLGI = 1; ctrl_area->svm_instrs.SKINIT = 1; ctrl_area->svm_instrs.RDTSCP = 1; + ctrl_area->svm_instrs.CPUID = 1; ctrl_area->svm_instrs.ICEBP = 1; ctrl_area->svm_instrs.WBINVD = 1; ctrl_area->svm_instrs.MONITOR = 1; @@ -122,7 +127,8 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) { vm_info->vm_regs.rdx = 0x00000f00; - guest_state->cr0 = 0x60000010; + + guest_state->cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode guest_state->cs.selector = 0xf000; @@ -171,13 +177,11 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) { ctrl_area->instrs.IOIO_PROT = 1; - v3_init_svm_msr_map(vm_info); ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data); ctrl_area->instrs.MSR_PROT = 1; - PrintDebug("Exiting on interrupts\n"); ctrl_area->guest_ctrl.V_INTR_MASKING = 1; ctrl_area->instrs.INTR = 1; @@ -262,9 +266,6 @@ static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config return 0; } - - -// can we start a kernel thread here... static int start_svm_guest(struct guest_info *info) { // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data)); // vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); @@ -276,39 +277,14 @@ static int start_svm_guest(struct guest_info *info) { //PrintDebugVMCB((vmcb_t*)(info->vmm_data)); info->run_state = VM_RUNNING; - + rdtscll(info->yield_start_cycle); + + while (1) { ullong_t tmp_tsc; - -#ifdef __V3_64BIT__ - -#define MSR_LSTAR 0xc0000082 -#define MSR_CSTAR 0xc0000083 -#define MSR_SF_MASK 0xc0000084 -#define MSR_GS_BASE 0xc0000101 -#define MSR_KERNGS_BASE 0xc0000102 - struct v3_msr host_cstar; - struct v3_msr host_lstar; - struct v3_msr host_syscall_mask; - struct v3_msr host_gs_base; - struct v3_msr host_kerngs_base; - -#else - -#define MSR_SYSENTER_CS 0x00000174 -#define MSR_SYSENTER_ESP 0x00000175 -#define MSR_SYSENTER_EIP 0x00000176 - - struct v3_msr host_sysenter_cs; - struct v3_msr host_sysenter_esp; - struct v3_msr host_sysenter_eip; - -#endif - -#define MSR_STAR 0xc0000081 - struct v3_msr host_star; - + // Conditionally yield the CPU if the timeslice has expired + v3_yield_cond(info); /* PrintDebug("SVM Entry to CS=%p rip=%p...\n", @@ -316,62 +292,36 @@ static int start_svm_guest(struct guest_info *info) { (void *)(addr_t)info->rip); */ + // disable global interrupts for vm state transition + v3_clgi(); + -#ifdef __V3_64BIT__ - v3_get_msr(MSR_SF_MASK, &(host_syscall_mask.hi), &(host_syscall_mask.lo)); - v3_get_msr(MSR_LSTAR, &(host_lstar.hi), &(host_lstar.lo)); - v3_get_msr(MSR_CSTAR, &(host_cstar.hi), &(host_cstar.lo)); - v3_get_msr(MSR_GS_BASE, &(host_gs_base.hi), &(host_gs_base.lo)); - v3_get_msr(MSR_KERNGS_BASE, &(host_kerngs_base.hi), &(host_kerngs_base.lo)); -#else - v3_get_msr(MSR_SYSENTER_CS, &(host_sysenter_cs.hi), &(host_sysenter_cs.lo)); - v3_get_msr(MSR_SYSENTER_ESP, &(host_sysenter_esp.hi), &(host_sysenter_esp.lo)); - v3_get_msr(MSR_SYSENTER_EIP, &(host_sysenter_eip.hi), &(host_sysenter_eip.lo)); -#endif - v3_get_msr(MSR_STAR, &(host_star.hi), &(host_star.lo)); rdtscll(info->time_state.cached_host_tsc); // guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc; - v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs)); + v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]); rdtscll(tmp_tsc); - -#ifdef __V3_64BIT__ - v3_set_msr(MSR_SF_MASK, host_syscall_mask.hi, host_syscall_mask.lo); - v3_set_msr(MSR_LSTAR, host_lstar.hi, host_lstar.lo); - v3_set_msr(MSR_CSTAR, host_cstar.hi, host_cstar.lo); - v3_set_msr(MSR_GS_BASE, host_gs_base.hi, host_gs_base.lo); - v3_set_msr(MSR_KERNGS_BASE, host_kerngs_base.hi, host_kerngs_base.lo); -#else - v3_set_msr(MSR_SYSENTER_CS, host_sysenter_cs.hi, host_sysenter_cs.lo); - v3_set_msr(MSR_SYSENTER_ESP, host_sysenter_esp.hi, host_sysenter_esp.lo); - v3_set_msr(MSR_SYSENTER_EIP, host_sysenter_eip.hi, host_sysenter_eip.lo); -#endif - v3_set_msr(MSR_STAR, host_star.hi, host_star.lo); //PrintDebug("SVM Returned\n"); + // reenable global interrupts after vm exit + v3_stgi(); + + + // Conditionally yield the CPU if the timeslice has expired + v3_yield_cond(info); v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc); num_exits++; - - //PrintDebug("Turning on global interrupts\n"); - v3_stgi(); - v3_clgi(); if ((num_exits % 5000) == 0) { PrintDebug("SVM Exit number %d\n", num_exits); - - if (info->enable_profiler) { - v3_print_profile(info); - } } - - if (v3_handle_svm_exit(info) != 0) { vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); addr_t host_addr; @@ -391,17 +341,18 @@ static int start_svm_guest(struct guest_info *info) { PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2)); PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4)); + linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs)); + if (info->mem_mode == PHYSICAL_MEM) { guest_pa_to_host_va(info, linear_addr, &host_addr); } else if (info->mem_mode == VIRTUAL_MEM) { guest_va_to_host_va(info, linear_addr, &host_addr); } - PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr); PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr); - PrintTraceMemDump((uchar_t *)host_addr, 15); + v3_dump_mem((uint8_t *)host_addr, 15); break; } @@ -418,13 +369,13 @@ static int start_svm_guest(struct guest_info *info) { int v3_is_svm_capable() { // Dinda uint_t vm_cr_low = 0, vm_cr_high = 0; - addr_t eax = 0, ebx = 0, ecx = 0, edx = 0; + uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; - v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx); + v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx); + PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx); - if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) { + if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) { PrintDebug("SVM Not Available\n"); return 0; } else { @@ -437,7 +388,7 @@ int v3_is_svm_capable() { v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx); + PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx); if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) { PrintDebug("SVM BIOS Disabled, not unlockable\n"); @@ -450,10 +401,10 @@ int v3_is_svm_capable() { PrintDebug("SVM is available and enabled.\n"); v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax); - PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx); - PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx); - PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx); + PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax); + PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx); + PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx); + PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx); if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) { @@ -468,11 +419,11 @@ int v3_is_svm_capable() { } static int has_svm_nested_paging() { - addr_t eax = 0, ebx = 0, ecx = 0, edx = 0; + uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx); + //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx); if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) { PrintDebug("SVM Nested Paging not supported\n"); @@ -484,11 +435,9 @@ static int has_svm_nested_paging() { } - -void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { +void v3_init_svm_cpu(int cpu_id) { reg_ex_t msr; - void * host_state; - extern v3_cpu_arch_t v3_cpu_type; + extern v3_cpu_arch_t v3_cpu_types[]; // Enable SVM on the CPU v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low)); @@ -497,24 +446,27 @@ void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { PrintDebug("SVM Enabled\n"); - // Setup the host state save area - host_state = V3_AllocPages(4); - + host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4); /* 64-BIT-ISSUE */ // msr.e_reg.high = 0; - //msr.e_reg.low = (uint_t)host_state; - msr.r_reg = (addr_t)host_state; + //msr.e_reg.low = (uint_t)host_vmcb; + msr.r_reg = host_vmcbs[cpu_id]; - PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_state); + PrintDebug("Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]); v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low); + if (has_svm_nested_paging() == 1) { - v3_cpu_type = V3_SVM_REV3_CPU; + v3_cpu_types[cpu_id] = V3_SVM_REV3_CPU; } else { - v3_cpu_type = V3_SVM_CPU; + v3_cpu_types[cpu_id] = V3_SVM_CPU; } +} + + +void v3_init_svm_hooks(struct v3_ctrl_ops * vmm_ops) { // Setup the SVM specific vmm operations vmm_ops->init_guest = &init_svm_guest; @@ -575,126 +527,63 @@ void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { -/*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) { - vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb); - vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb); - uint_t i; - - - guest_state->rsp = vm_info.vm_regs.rsp; - guest_state->rip = vm_info.rip; - - - //ctrl_area->instrs.instrs.CR0 = 1; - ctrl_area->cr_reads.cr0 = 1; - ctrl_area->cr_writes.cr0 = 1; - - guest_state->efer |= EFER_MSR_svm_enable; - guest_state->rflags = 0x00000002; // The reserved bit is always 1 - ctrl_area->svm_instrs.VMRUN = 1; - // guest_state->cr0 = 0x00000001; // PE - ctrl_area->guest_ASID = 1; - - - ctrl_area->exceptions.de = 1; - ctrl_area->exceptions.df = 1; - ctrl_area->exceptions.pf = 1; - ctrl_area->exceptions.ts = 1; - ctrl_area->exceptions.ss = 1; - ctrl_area->exceptions.ac = 1; - ctrl_area->exceptions.mc = 1; - ctrl_area->exceptions.gp = 1; - ctrl_area->exceptions.ud = 1; - ctrl_area->exceptions.np = 1; - ctrl_area->exceptions.of = 1; - ctrl_area->exceptions.nmi = 1; - - guest_state->cs.selector = 0x0000; - guest_state->cs.limit=~0u; - guest_state->cs.base = guest_state->cs.selector<<4; - guest_state->cs.attrib.raw = 0xf3; - - - struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL}; - for ( i = 0; segregs[i] != NULL; i++) { - struct vmcb_selector * seg = segregs[i]; +#if 0 +/* + * Test VMSAVE/VMLOAD Latency + */ +#define vmsave ".byte 0x0F,0x01,0xDB ; " +#define vmload ".byte 0x0F,0x01,0xDA ; " +{ + uint32_t start_lo, start_hi; + uint32_t end_lo, end_hi; + uint64_t start, end; - seg->selector = 0x0000; - seg->base = seg->selector << 4; - seg->attrib.raw = 0xf3; - seg->limit = ~0u; - } - - if (vm_info.io_map.num_ports > 0) { - struct vmm_io_hook * iter; - addr_t io_port_bitmap; + __asm__ __volatile__ ( + "rdtsc ; " + "movl %%eax, %%esi ; " + "movl %%edx, %%edi ; " + "movq %%rcx, %%rax ; " + vmsave + "rdtsc ; " + : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi) + : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0) + ); - io_port_bitmap = (addr_t)V3_AllocPages(3); - memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3); + start = start_hi; + start <<= 32; + start += start_lo; - ctrl_area->IOPM_BASE_PA = io_port_bitmap; + end = end_hi; + end <<= 32; + end += end_lo; + + PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start)); + + __asm__ __volatile__ ( + "rdtsc ; " + "movl %%eax, %%esi ; " + "movl %%edx, %%edi ; " + "movq %%rcx, %%rax ; " + vmload + "rdtsc ; " + : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi) + : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0) + ); + + start = start_hi; + start <<= 32; + start += start_lo; - //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap); + end = end_hi; + end <<= 32; + end += end_lo; - FOREACH_IO_HOOK(vm_info.io_map, iter) { - ushort_t port = iter->port; - uchar_t * bitmap = (uchar_t *)io_port_bitmap; - bitmap += (port / 8); - PrintDebug("Setting Bit in block %x\n", bitmap); - *bitmap |= 1 << (port % 8); + PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start)); } + /* End Latency Test */ - - //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2); - - ctrl_area->instrs.IOIO_PROT = 1; - } - - ctrl_area->instrs.INTR = 1; - - - - if (vm_info.page_mode == SHADOW_PAGING) { - PrintDebug("Creating initial shadow page table\n"); - vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff); - PrintDebug("Created\n"); - - guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3; - - ctrl_area->cr_reads.cr3 = 1; - ctrl_area->cr_writes.cr3 = 1; - - - ctrl_area->instrs.INVLPG = 1; - ctrl_area->instrs.INVLPGA = 1; - - guest_state->g_pat = 0x7040600070406ULL; - - guest_state->cr0 |= 0x80000000; - } else if (vm_info.page_mode == NESTED_PAGING) { - // Flush the TLB on entries/exits - //ctrl_area->TLB_CONTROL = 1; - - // Enable Nested Paging - //ctrl_area->NP_ENABLE = 1; - - //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE)); - - // Set the Nested Page Table pointer - // ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables); - // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables); - - // ctrl_area->N_CR3 = Get_CR3(); - // guest_state->cr3 |= (Get_CR3() & 0xfffff000); - - // guest_state->g_pat = 0x7040600070406ULL; - } - - - -} -*/ +#endif