X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmx.c;h=24b0089030ba5b76ed54431a0f90d33e00ae6499;hb=5ab8dd9d463c5e1aa61910d6014fe8b770e1e301;hp=29f1ccf8e05780fba4fc60ee86bab8f44bd08110;hpb=5bf6d0c260240e314876a2fca8e3fd56bd6a1029;p=palacios-OLD.git diff --git a/palacios/src/palacios/vmx.c b/palacios/src/palacios/vmx.c index 29f1ccf..24b0089 100644 --- a/palacios/src/palacios/vmx.c +++ b/palacios/src/palacios/vmx.c @@ -28,14 +28,26 @@ #include #include #include +#include #include #include #include #include -static addr_t host_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0}; +#include + +#ifndef CONFIG_DEBUG_VMX +#undef PrintDebug +#define PrintDebug(fmt, args...) +#endif + + +/* These fields contain the hardware feature sets supported by the local CPU */ +static struct vmx_hw_info hw_info; +static addr_t active_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0}; +static addr_t host_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0}; extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs); extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs); @@ -43,7 +55,7 @@ extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, str static int inline check_vmcs_write(vmcs_field_t field, addr_t val) { int ret = 0; - ret = vmcs_write(field,val); + ret = vmcs_write(field, val); if (ret != VMX_SUCCESS) { PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret); @@ -65,54 +77,10 @@ static int inline check_vmcs_read(vmcs_field_t field, void * val) { return ret; } -#if 0 -// For the 32 bit reserved bit fields -// MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR -static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) { - v3_msr_t mask_msr; - - PrintDebug("sanitize_bits1 (MSR:%x)\n", msr_num); - - v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo); - PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo); - - val |= mask_msr.lo; - val |= mask_msr.hi; - - return val; -} - - - -static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) { - v3_msr_t msr0, msr1; - addr_t msr0_val, msr1_val; - - PrintDebug("sanitize_bits2 (MSR0=%x, MSR1=%x)\n", msr_num0, msr_num1); - - v3_get_msr(msr_num0, &msr0.hi, &msr0.lo); - v3_get_msr(msr_num1, &msr1.hi, &msr1.lo); - - // This generates a mask that is the natural bit width of the CPU - msr0_val = msr0.value; - msr1_val = msr1.value; - - PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val); - - val |= msr0_val; - val |= msr1_val; - - return val; -} - - - -#endif static addr_t allocate_vmcs() { - reg_ex_t msr; struct vmcs_data * vmcs_page = NULL; PrintDebug("Allocating page\n"); @@ -120,10 +88,8 @@ static addr_t allocate_vmcs() { vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1)); memset(vmcs_page, 0, 4096); - v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low)); - - vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision; - PrintDebug("VMX Revision: 0x%x\n",vmcs_page->revision); + vmcs_page->revision = hw_info.basic_info.revision; + PrintDebug("VMX Revision: 0x%x\n", vmcs_page->revision); return (addr_t)V3_PAddr((void *)vmcs_page); } @@ -134,8 +100,13 @@ static addr_t allocate_vmcs() { static int init_vmcs_bios(struct guest_info * info, struct vmx_data * vmx_state) { int vmx_ret = 0; + // disable global interrupts for vm state initialization + v3_disable_ints(); + PrintDebug("Loading VMCS\n"); vmx_ret = vmcs_load(vmx_state->vmcs_ptr_phys); + active_vmcs_ptrs[V3_Get_CPU()] = vmx_info->vmcs_ptr_phys; + vmx_state->state = VMX_UNLAUNCHED; if (vmx_ret != VMX_SUCCESS) { PrintError("VMPTRLD failed\n"); @@ -143,6 +114,18 @@ static int init_vmcs_bios(struct guest_info * info, struct vmx_data * vmx_state) } + /*** Setup default state from HW ***/ + + vmx_state->pin_ctrls.value = hw_info.pin_ctrls.def_val; + vmx_state->pri_proc_ctrls.value = hw_info.proc_ctrls.def_val; + vmx_state->exit_ctrls.value = hw_info.exit_ctrls.def_val; + vmx_state->entry_ctrls.value = hw_info.entry_ctrls.def_val;; + + /* Print Control MSRs */ + PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)hw_info.cr0.value); + PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)hw_info.cr4.value); + + /******* Setup Host State **********/ @@ -198,68 +181,43 @@ static int init_vmcs_bios(struct guest_info * info, struct vmx_data * vmx_state) /********** Setup and VMX Control Fields from MSR ***********/ - /* Setup IO map */ - v3_init_vmx_io_map(info); - v3_init_vmx_msr_map(info); - - struct v3_msr tmp_msr; - v3_get_msr(VMX_PINBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo)); /* Add external interrupts, NMI exiting, and virtual NMI */ - vmx_state->pin_ctrls.value = tmp_msr.lo; vmx_state->pin_ctrls.nmi_exit = 1; vmx_state->pin_ctrls.ext_int_exit = 1; - v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo)); - - vmx_state->pri_proc_ctrls.value = tmp_msr.lo; vmx_state->pri_proc_ctrls.use_io_bitmap = 1; vmx_state->pri_proc_ctrls.hlt_exit = 1; vmx_state->pri_proc_ctrls.invlpg_exit = 1; vmx_state->pri_proc_ctrls.use_msr_bitmap = 1; vmx_state->pri_proc_ctrls.pause_exit = 1; + vmx_state->pri_proc_ctrls.tsc_offset = 1; +#ifdef CONFIG_TIME_VIRTUALIZE_TSC + vmx_state->pri_proc_ctrls.rdtsc_exit = 1; +#endif - vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(info->io_map.arch_data)); + /* Setup IO map */ + vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(info->vm_info->io_map.arch_data)); vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR, - (addr_t)V3_PAddr(info->io_map.arch_data) + PAGE_SIZE_4KB); + (addr_t)V3_PAddr(info->vm_info->io_map.arch_data) + PAGE_SIZE_4KB); + + + vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(info->vm_info->msr_map.arch_data)); - vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(info->msr_map.arch_data)); - v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo)); - vmx_state->exit_ctrls.value = tmp_msr.lo; vmx_state->exit_ctrls.host_64_on = 1; if ((vmx_state->exit_ctrls.save_efer == 1) || (vmx_state->exit_ctrls.ld_efer == 1)) { vmx_state->ia32e_avail = 1; } - v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo)); - vmx_state->entry_ctrls.value = tmp_msr.lo; - { - struct vmx_exception_bitmap excp_bmap; - excp_bmap.value = 0; - - excp_bmap.pf = 1; - - vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, excp_bmap.value); - } /******* Setup VMXAssist guest state ***********/ info->rip = 0xd0000; info->vm_regs.rsp = 0x80000; - - struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags); - flags->rsvd1 = 1; - - /* Print Control MSRs */ - v3_get_msr(VMX_CR0_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo)); - PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)tmp_msr.value); - - v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo)); - PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)tmp_msr.value); - + info->ctrl_regs.rflags->rsvd1 = 1; #define GUEST_CR0 0x80000031 #define GUEST_CR4 0x00002000 @@ -291,6 +249,9 @@ static int init_vmcs_bios(struct guest_info * info, struct vmx_data * vmx_state) /* Add CR exits */ vmx_state->pri_proc_ctrls.cr3_ld_exit = 1; vmx_state->pri_proc_ctrls.cr3_str_exit = 1; + + /* Add page fault exits */ + vmx_state->excp_bmap.pf = 1; } // Setup segment registers @@ -341,7 +302,7 @@ static int init_vmcs_bios(struct guest_info * info, struct vmx_data * vmx_state) #define VMXASSIST_GDT 0x10000 addr_t vmxassist_gdt = 0; - if (guest_pa_to_host_va(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) { + if (v3_gpa_to_hva(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) { PrintError("Could not find VMXASSIST GDT destination\n"); return -1; } @@ -375,24 +336,44 @@ static int init_vmcs_bios(struct guest_info * info, struct vmx_data * vmx_state) extern uint8_t v3_vmxassist_end[]; addr_t vmxassist_dst = 0; - if (guest_pa_to_host_va(info, VMXASSIST_START, &vmxassist_dst) == -1) { + if (v3_gpa_to_hva(info, VMXASSIST_START, &vmxassist_dst) == -1) { PrintError("Could not find VMXASSIST destination\n"); return -1; } memcpy((void *)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start); + + + vmx_state->assist_state = VMXASSIST_DISABLED; } - /*** Write all the info to the VMCS ***/ + + + /* Sanity check ctrl/reg fields against hw_defaults */ + + + + /*** Write all the info to the VMCS ***/ + + { #define DEBUGCTL_MSR 0x1d9 - v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo)); - vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value); + struct v3_msr tmp_msr; + v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo)); + vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value); + info->dbg_regs.dr7 = 0x400; + } - info->dbg_regs.dr7 = 0x400; +#ifdef __V3_64BIT__ vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL); - +#else + vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffUL); + vmx_ret |= check_vmcs_write(VMCS_LINK_PTR_HIGH, (addr_t)0xffffffffUL); +#endif + + + if (v3_update_vmcs_ctrl_fields(info)) { PrintError("Could not write control fields!\n"); @@ -405,7 +386,11 @@ static int init_vmcs_bios(struct guest_info * info, struct vmx_data * vmx_state) } - vmx_state->state = VMXASSIST_DISABLED; + + // reenable global interrupts for vm state initialization now + // that the vm state is initialized. If another VM kicks us off, + // it'll update our vmx state so that we know to reload ourself + v3_enable_ints(); return 0; } @@ -415,6 +400,7 @@ int v3_init_vmx_vmcs(struct guest_info * info, v3_vm_class_t vm_class) { int vmx_ret = 0; vmx_state = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data)); + memset(vmx_state, 0, sizeof(struct vmx_data)); PrintDebug("vmx_data pointer: %p\n", (void *)vmx_state); @@ -424,6 +410,7 @@ int v3_init_vmx_vmcs(struct guest_info * info, v3_vm_class_t vm_class) { PrintDebug("VMCS pointer: %p\n", (void *)(vmx_state->vmcs_ptr_phys)); info->vmm_data = vmx_state; + vmx_state->state = VMX_UNLAUNCHED; PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data); @@ -448,17 +435,29 @@ int v3_init_vmx_vmcs(struct guest_info * info, v3_vm_class_t vm_class) { return 0; } + +int v3_deinit_vmx_vmcs(struct guest_info * core) { + struct vmx_data * vmx_state = core->vmm_data; + + V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1); + + V3_Free(vmx_state); + + return 0; +} + + static int update_irq_exit_state(struct guest_info * info) { struct vmx_exit_idt_vec_info idt_vec_info; check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value)); - if ((info->intr_state.irq_started == 1) && (idt_vec_info.valid == 0)) { + if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) { #ifdef CONFIG_DEBUG_INTERRUPTS PrintDebug("Calling v3_injecting_intr\n"); #endif - info->intr_state.irq_started = 0; - v3_injecting_intr(info, info->intr_state.irq_vector, V3_EXTERNAL_IRQ); + info->intr_core_state.irq_started = 0; + v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ); } return 0; @@ -466,9 +465,11 @@ static int update_irq_exit_state(struct guest_info * info) { static int update_irq_entry_state(struct guest_info * info) { struct vmx_exit_idt_vec_info idt_vec_info; + struct vmcs_interrupt_state intr_core_state; struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data); check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value)); + check_vmcs_read(VMCS_GUEST_INT_STATE, &(intr_core_state)); /* Check for pending exceptions to inject */ if (v3_excp_pending(info)) { @@ -492,15 +493,16 @@ static int update_irq_entry_state(struct guest_info * info) { int_info.valid = 1; #ifdef CONFIG_DEBUG_INTERRUPTS - PrintDebug("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)info->rip); + PrintDebug("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip); #endif check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value); v3_injecting_excp(info, int_info.vector); - } else if (((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) { + } else if ((((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) && + (intr_core_state.val == 0)) { - if ((info->intr_state.irq_started == 1) && (idt_vec_info.valid == 1)) { + if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) { #ifdef CONFIG_DEBUG_INTERRUPTS PrintDebug("IRQ pending from previous injection\n"); @@ -523,21 +525,21 @@ static int update_irq_entry_state(struct guest_info * info) { switch (v3_intr_pending(info)) { case V3_EXTERNAL_IRQ: { - info->intr_state.irq_vector = v3_get_intr(info); - ent_int.vector = info->intr_state.irq_vector; + info->intr_core_state.irq_vector = v3_get_intr(info); + ent_int.vector = info->intr_core_state.irq_vector; ent_int.type = 0; ent_int.error_code = 0; ent_int.valid = 1; #ifdef CONFIG_DEBUG_INTERRUPTS PrintDebug("Injecting Interrupt %d at exit %u(EIP=%p)\n", - info->intr_state.irq_vector, + info->intr_core_state.irq_vector, (uint32_t)info->num_exits, - (void *)info->rip); + (void *)(addr_t)info->rip); #endif check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value); - info->intr_state.irq_started = 1; + info->intr_core_state.irq_started = 1; break; } @@ -587,6 +589,35 @@ static int update_irq_entry_state(struct guest_info * info) { } + +static struct vmx_exit_info exit_log[10]; + +static void print_exit_log(struct guest_info * info) { + int cnt = info->num_exits % 10; + int i = 0; + + + V3_Print("\nExit Log (%d total exits):\n", (uint32_t)info->num_exits); + + for (i = 0; i < 10; i++) { + struct vmx_exit_info * tmp = &exit_log[cnt]; + + V3_Print("%d:\texit_reason = %p\n", i, (void *)(addr_t)tmp->exit_reason); + V3_Print("\texit_qual = %p\n", (void *)tmp->exit_qual); + V3_Print("\tint_info = %p\n", (void *)(addr_t)tmp->int_info); + V3_Print("\tint_err = %p\n", (void *)(addr_t)tmp->int_err); + V3_Print("\tinstr_info = %p\n", (void *)(addr_t)tmp->instr_info); + + cnt--; + + if (cnt == -1) { + cnt = 9; + } + + } + +} + /* * CAUTION and DANGER!!! * @@ -597,39 +628,64 @@ static int update_irq_entry_state(struct guest_info * info) { */ int v3_vmx_enter(struct guest_info * info) { int ret = 0; - uint64_t tmp_tsc = 0; + uint32_t tsc_offset_low, tsc_offset_high; struct vmx_exit_info exit_info; + struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data); // Conditionally yield the CPU if the timeslice has expired v3_yield_cond(info); + // Perform any additional yielding needed for time adjustment + v3_adjust_time(info); - // v3_print_guest_state(info); + // Update timer devices prior to entering VM. + v3_update_timers(info); // disable global interrupts for vm state transition v3_disable_ints(); + + if (active_vmcs_ptrs[V3_Get_CPU()] != vmx_info->vmcs_ptr_phys) { + vmcs_load(vmx_info->vmcs_ptr_phys); + active_vmcs_ptrs[V3_Get_CPU()] = vmx_info->vmcs_ptr_phys; + } + + v3_vmx_restore_vmcs(info); -#ifdef CONFIG_SYMBIOTIC - if (info->sym_state.sym_call_active == 0) { +#ifdef CONFIG_SYMCALL + if (info->sym_core_state.symcall_state.sym_call_active == 0) { update_irq_entry_state(info); } #else update_irq_entry_state(info); #endif + { + addr_t guest_cr3; + vmcs_read(VMCS_GUEST_CR3, &guest_cr3); + vmcs_write(VMCS_GUEST_CR3, guest_cr3); + } + + // Perform last-minute time bookkeeping prior to entering the VM + v3_time_enter_vm(info); - rdtscll(info->time_state.cached_host_tsc); + tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff); + tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff); + check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high); + check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low); - if (info->run_state == VM_STOPPED) { - info->run_state = VM_RUNNING; + + if (vmx_info->state == VMX_UNLAUNCHED) { + vmx_info->state = VMX_LAUNCHED; + info->vm_info->run_state = VM_RUNNING; ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs)); } else { + V3_ASSERT(vmx_info->state != VMX_UNLAUNCHED); ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs)); } - + // PrintDebug("VMX Exit: ret=%d\n", ret); if (ret != VMX_SUCCESS) { @@ -641,15 +697,16 @@ int v3_vmx_enter(struct guest_info * info) { return -1; } - rdtscll(tmp_tsc); + // Immediate exit from VM time bookkeeping + v3_time_exit_vm(info); info->num_exits++; - v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc); - /* Update guest state */ v3_vmx_save_vmcs(info); + // info->cpl = info->segments.cs.selector & 0x3; + info->mem_mode = v3_get_vm_mem_mode(info); info->cpu_mode = v3_get_vm_cpu_mode(info); @@ -664,15 +721,28 @@ int v3_vmx_enter(struct guest_info * info) { //PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_info.exit_reason, exit_info.exit_qual); + exit_log[info->num_exits % 10] = exit_info; -#ifdef CONFIG_SYMBIOTIC - if (info->sym_state.sym_call_active == 0) { + +#ifdef CONFIG_SYMCALL + if (info->sym_core_state.symcall_state.sym_call_active == 0) { update_irq_exit_state(info); } #else update_irq_exit_state(info); #endif + if (exit_info.exit_reason == VMEXIT_INTR_WINDOW) { + // This is a special case whose only job is to inject an interrupt + vmcs_read(VMCS_PROC_CTRLS, &(vmx_info->pri_proc_ctrls.value)); + vmx_info->pri_proc_ctrls.int_wndw_exit = 0; + vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value); + +#ifdef CONFIG_DEBUG_INTERRUPTS + PrintDebug("Interrupts available again! (RIP=%llx)\n", info->rip); +#endif + } + // reenable global interrupts after vm exit v3_enable_ints(); @@ -688,29 +758,72 @@ int v3_vmx_enter(struct guest_info * info) { } -int v3_start_vmx_guest(struct guest_info* info) { +int v3_start_vmx_guest(struct guest_info * info) { + + PrintDebug("Starting VMX core %u\n", info->cpu_id); + + if (info->cpu_id == 0) { + info->core_run_state = CORE_RUNNING; + info->vm_info->run_state = VM_RUNNING; + } else { + + PrintDebug("VMX core %u: Waiting for core initialization\n", info->cpu_id); + + while (info->core_run_state == CORE_STOPPED) { + v3_yield(info); + //PrintDebug("VMX core %u: still waiting for INIT\n",info->cpu_id); + } + + PrintDebug("VMX core %u initialized\n", info->cpu_id); + } + + PrintDebug("VMX core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n", + info->cpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base), + info->segments.cs.limit, (void *)(info->rip)); - PrintDebug("Launching VMX guest\n"); - rdtscll(info->time_state.cached_host_tsc); + PrintDebug("VMX core %u: Launching VMX VM\n", info->cpu_id); + v3_start_time(info); while (1) { + + if (info->vm_info->run_state == VM_STOPPED) { + info->core_run_state = CORE_STOPPED; + break; + } + if (v3_vmx_enter(info) == -1) { v3_print_vmcs(); + print_exit_log(info); return -1; } + + + if (info->vm_info->run_state == VM_STOPPED) { + info->core_run_state = CORE_STOPPED; + break; + } +/* if ((info->num_exits % 5000) == 0) { - V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits); + V3_Print("VMX Exit number %d\n", (uint32_t)info->num_exits); } +*/ + } return 0; } + + +#define VMX_FEATURE_CONTROL_MSR 0x0000003a +#define CPUID_VMX_FEATURES 0x00000005 /* LOCK and VMXON */ +#define CPUID_1_ECX_VTXFLAG 0x00000020 + int v3_is_vmx_capable() { v3_msr_t feature_msr; uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; @@ -724,7 +837,7 @@ int v3_is_vmx_capable() { PrintDebug("MSRREGlow: 0x%.8x\n", feature_msr.lo); - if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) { + if ((feature_msr.lo & CPUID_VMX_FEATURES) != CPUID_VMX_FEATURES) { PrintDebug("VMX is locked -- enable in the BIOS\n"); return 0; } @@ -737,49 +850,23 @@ int v3_is_vmx_capable() { return 1; } -static int has_vmx_nested_paging() { - return 0; -} + + void v3_init_vmx_cpu(int cpu_id) { extern v3_cpu_arch_t v3_cpu_types[]; - struct v3_msr tmp_msr; - uint64_t ret = 0; - v3_get_msr(VMX_CR4_FIXED0_MSR,&(tmp_msr.hi),&(tmp_msr.lo)); - - __asm__ __volatile__ ( - "movq %%cr4, %%rbx;" - "orq $0x00002000, %%rbx;" - "movq %%rbx, %0;" - : "=m"(ret) - : - : "%rbx" - ); - - if ((~ret & tmp_msr.value) == 0) { - __asm__ __volatile__ ( - "movq %0, %%cr4;" - : - : "q"(ret) - ); - } else { - PrintError("Invalid CR4 Settings!\n"); - return; + if (cpu_id == 0) { + if (v3_init_vmx_hw(&hw_info) == -1) { + PrintError("Could not initialize VMX hardware features on cpu %d\n", cpu_id); + return; + } } - __asm__ __volatile__ ( - "movq %%cr0, %%rbx; " - "orq $0x00000020,%%rbx; " - "movq %%rbx, %%cr0;" - : - : - : "%rbx" - ); - // - // Should check and return Error here.... + + enable_vmx(); // Setup VMXON Region @@ -787,7 +874,7 @@ void v3_init_vmx_cpu(int cpu_id) { PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]); - if (v3_enable_vmx(host_vmcs_ptrs[cpu_id]) == VMX_SUCCESS) { + if (vmx_on(host_vmcs_ptrs[cpu_id]) == VMX_SUCCESS) { PrintDebug("VMX Enabled\n"); } else { PrintError("VMX initialization failure\n"); @@ -795,11 +882,14 @@ void v3_init_vmx_cpu(int cpu_id) { } - if (has_vmx_nested_paging() == 1) { - v3_cpu_types[cpu_id] = V3_VMX_EPT_CPU; - } else { - v3_cpu_types[cpu_id] = V3_VMX_CPU; - } + v3_cpu_types[cpu_id] = V3_VMX_CPU; + } + +void v3_deinit_vmx_cpu(int cpu_id) { + extern v3_cpu_arch_t v3_cpu_types[]; + v3_cpu_types[cpu_id] = V3_INVALID_CPU; + V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1); +}