X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fsvm.c;h=d2444ac915e13224d7a7a866039d7587e56cf813;hb=d34450b1e6fe3c2e1295c268c1722c669ba8d545;hp=5220c835a55be32c4321123df7b6311e13763311;hpb=1316eb77b6d89245583446dab3345f749c415764;p=palacios.git diff --git a/palacios/src/palacios/svm.c b/palacios/src/palacios/svm.c index 5220c83..d2444ac 100644 --- a/palacios/src/palacios/svm.c +++ b/palacios/src/palacios/svm.c @@ -277,10 +277,14 @@ static int start_svm_guest(struct guest_info *info) { //PrintDebugVMCB((vmcb_t*)(info->vmm_data)); info->run_state = VM_RUNNING; - + rdtscll(info->yield_start_cycle); + + while (1) { ullong_t tmp_tsc; + // Conditionally yield the CPU if the timeslice has expired + v3_yield_cond(info); /* PrintDebug("SVM Entry to CS=%p rip=%p...\n", @@ -306,6 +310,11 @@ static int start_svm_guest(struct guest_info *info) { // reenable global interrupts after vm exit v3_stgi(); + + // Conditionally yield the CPU if the timeslice has expired + v3_yield_cond(info); + + v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc); num_exits++; @@ -317,7 +326,6 @@ static int start_svm_guest(struct guest_info *info) { } } - if (v3_handle_svm_exit(info) != 0) { vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); addr_t host_addr; @@ -337,13 +345,14 @@ static int start_svm_guest(struct guest_info *info) { PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2)); PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4)); + linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs)); + if (info->mem_mode == PHYSICAL_MEM) { guest_pa_to_host_va(info, linear_addr, &host_addr); } else if (info->mem_mode == VIRTUAL_MEM) { guest_va_to_host_va(info, linear_addr, &host_addr); } - PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr); PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr); @@ -366,11 +375,11 @@ int v3_is_svm_capable() { uint_t vm_cr_low = 0, vm_cr_high = 0; addr_t eax = 0, ebx = 0, ecx = 0, edx = 0; - v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx); + v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx); + PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=%p\n", (void *)ecx); - if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) { + if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) { PrintDebug("SVM Not Available\n"); return 0; } else { @@ -418,7 +427,7 @@ static int has_svm_nested_paging() { v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx); + //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx); if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) { PrintDebug("SVM Nested Paging not supported\n"); @@ -442,11 +451,9 @@ void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { PrintDebug("SVM Enabled\n"); - // Setup the host state save area host_vmcb = V3_AllocPages(4); - /* 64-BIT-ISSUE */ // msr.e_reg.high = 0; //msr.e_reg.low = (uint_t)host_vmcb; @@ -455,8 +462,6 @@ void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_vmcb); v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low); - - /* * Test VMSAVE/VMLOAD Latency */ @@ -486,12 +491,8 @@ void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { end <<= 32; end += end_lo; - PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start)); - - - __asm__ __volatile__ ( "rdtsc ; " "movl %%eax, %%esi ; " @@ -513,12 +514,7 @@ void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start)); - - - } - - /* End Latency Test */ if (has_svm_nested_paging() == 1) {