X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fsvm.c;h=5fadae1d07c7b81457c78bfd71b36c9455eef26f;hb=e23a867d9dcecc70fc76adc0f89404dbc50e5b78;hp=ae275c165f32edeaf5f5a83eb24a8c44af43c616;hpb=9928ec9289de6658a2830370b1d1b514833515d0;p=palacios.git diff --git a/palacios/src/palacios/svm.c b/palacios/src/palacios/svm.c index ae275c1..5fadae1 100644 --- a/palacios/src/palacios/svm.c +++ b/palacios/src/palacios/svm.c @@ -266,9 +266,6 @@ static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config return 0; } - - -// can we start a kernel thread here... static int start_svm_guest(struct guest_info *info) { // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data)); // vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); @@ -280,10 +277,14 @@ static int start_svm_guest(struct guest_info *info) { //PrintDebugVMCB((vmcb_t*)(info->vmm_data)); info->run_state = VM_RUNNING; - + rdtscll(info->yield_start_cycle); + + while (1) { ullong_t tmp_tsc; + // Conditionally yield the CPU if the timeslice has expired + v3_yield_cond(info); /* PrintDebug("SVM Entry to CS=%p rip=%p...\n", @@ -292,6 +293,7 @@ static int start_svm_guest(struct guest_info *info) { */ // disable global interrupts for vm state transition + v3_clgi(); @@ -321,6 +323,9 @@ static int start_svm_guest(struct guest_info *info) { } + // Conditionally yield the CPU if the timeslice has expired + v3_yield_cond(info); + if (v3_handle_svm_exit(info) != 0) { vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); addr_t host_addr; @@ -340,13 +345,14 @@ static int start_svm_guest(struct guest_info *info) { PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2)); PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4)); + linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs)); + if (info->mem_mode == PHYSICAL_MEM) { guest_pa_to_host_va(info, linear_addr, &host_addr); } else if (info->mem_mode == VIRTUAL_MEM) { guest_va_to_host_va(info, linear_addr, &host_addr); } - PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr); PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr); @@ -369,11 +375,11 @@ int v3_is_svm_capable() { uint_t vm_cr_low = 0, vm_cr_high = 0; addr_t eax = 0, ebx = 0, ecx = 0, edx = 0; - v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx); + v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx); + PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=%p\n", (void *)ecx); - if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) { + if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) { PrintDebug("SVM Not Available\n"); return 0; } else { @@ -386,7 +392,7 @@ int v3_is_svm_capable() { v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx); + PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx); if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) { PrintDebug("SVM BIOS Disabled, not unlockable\n"); @@ -399,10 +405,10 @@ int v3_is_svm_capable() { PrintDebug("SVM is available and enabled.\n"); v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax); - PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx); - PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx); - PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx); + PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=%p\n", (void *)eax); + PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=%p\n", (void *)ebx); + PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=%p\n", (void *)ecx); + PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx); if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) { @@ -421,7 +427,7 @@ static int has_svm_nested_paging() { v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx); + //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx); if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) { PrintDebug("SVM Nested Paging not supported\n"); @@ -445,11 +451,9 @@ void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { PrintDebug("SVM Enabled\n"); - // Setup the host state save area host_vmcb = V3_AllocPages(4); - /* 64-BIT-ISSUE */ // msr.e_reg.high = 0; //msr.e_reg.low = (uint_t)host_vmcb; @@ -458,8 +462,6 @@ void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_vmcb); v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low); - - /* * Test VMSAVE/VMLOAD Latency */ @@ -489,12 +491,8 @@ void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { end <<= 32; end += end_lo; - PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start)); - - - __asm__ __volatile__ ( "rdtsc ; " "movl %%eax, %%esi ; " @@ -516,12 +514,7 @@ void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start)); - - - } - - /* End Latency Test */ if (has_svm_nested_paging() == 1) {