2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2011, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmx.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_handler.h>
24 #include <palacios/vmcs.h>
25 #include <palacios/vmx_lowlevel.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_ctrl_regs.h>
28 #include <palacios/vmm_config.h>
29 #include <palacios/vmm_time.h>
30 #include <palacios/vm_guest_mem.h>
31 #include <palacios/vmm_direct_paging.h>
32 #include <palacios/vmx_io.h>
33 #include <palacios/vmx_msr.h>
34 #include <palacios/vmm_decoder.h>
35 #include <palacios/vmm_barrier.h>
36 #include <palacios/vmm_timeout.h>
37 #include <palacios/vmm_debug.h>
39 #ifdef V3_CONFIG_CHECKPOINT
40 #include <palacios/vmm_checkpoint.h>
43 #include <palacios/vmx_ept.h>
44 #include <palacios/vmx_assist.h>
45 #include <palacios/vmx_hw_info.h>
47 #ifdef V3_CONFIG_MEM_TRACK
48 #include <palacios/vmm_mem_track.h>
51 #ifndef V3_CONFIG_DEBUG_VMX
53 #define PrintDebug(fmt, args...)
57 /* These fields contain the hardware feature sets supported by the local CPU */
58 static struct vmx_hw_info hw_info;
60 extern v3_cpu_arch_t v3_mach_type;
62 static addr_t host_vmcs_ptrs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
64 extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
65 extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
67 static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
70 ret = vmcs_write(field, val);
72 if (ret != VMX_SUCCESS) {
73 PrintError(VM_NONE, VCORE_NONE, "VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
83 static int inline check_vmcs_read(vmcs_field_t field, void * val) {
86 ret = vmcs_read(field, val);
88 if (ret != VMX_SUCCESS) {
89 PrintError(VM_NONE, VCORE_NONE, "VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
98 static addr_t allocate_vmcs() {
100 struct vmcs_data * vmcs_page = NULL;
102 PrintDebug(VM_NONE, VCORE_NONE, "Allocating page\n");
104 temp = V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
106 PrintError(VM_NONE, VCORE_NONE, "Cannot allocate VMCS\n");
109 vmcs_page = (struct vmcs_data *)V3_VAddr(temp);
110 memset(vmcs_page, 0, 4096);
112 vmcs_page->revision = hw_info.basic_info.revision;
113 PrintDebug(VM_NONE, VCORE_NONE, "VMX Revision: 0x%x\n", vmcs_page->revision);
115 return (addr_t)V3_PAddr((void *)vmcs_page);
120 static int debug_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * src, void * priv_data) {
121 struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
122 V3_Print(core->vm_info, core, "\n\nEFER READ (val = %p)\n", (void *)efer->value);
124 v3_print_guest_state(core);
128 src->value = efer->value;
132 static int debug_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
133 struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
134 V3_Print(core->vm_info, core, "\n\nEFER WRITE (old_val = %p) (new_val = %p)\n", (void *)efer->value, (void *)src.value);
136 v3_print_guest_state(core);
139 efer->value = src.value;
146 static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) {
149 /* Get Available features */
150 struct vmx_pin_ctrls avail_pin_ctrls;
151 avail_pin_ctrls.value = v3_vmx_get_ctrl_features(&(hw_info.pin_ctrls));
155 // disable global interrupts for vm state initialization
158 PrintDebug(core->vm_info, core, "Loading VMCS\n");
159 vmx_ret = vmcs_load(vmx_state->vmcs_ptr_phys);
160 vmx_state->state = VMX_UNLAUNCHED;
162 if (vmx_ret != VMX_SUCCESS) {
163 PrintError(core->vm_info, core, "VMPTRLD failed\n");
168 /*** Setup default state from HW ***/
170 vmx_state->pin_ctrls.value = hw_info.pin_ctrls.def_val;
171 vmx_state->pri_proc_ctrls.value = hw_info.proc_ctrls.def_val;
172 vmx_state->exit_ctrls.value = hw_info.exit_ctrls.def_val;
173 vmx_state->entry_ctrls.value = hw_info.entry_ctrls.def_val;
174 vmx_state->sec_proc_ctrls.value = hw_info.sec_proc_ctrls.def_val;
176 /* Print Control MSRs */
177 V3_Print(core->vm_info, core, "CR0 MSR: req_val=%p, req_mask=%p\n", (void *)(addr_t)hw_info.cr0.req_val, (void *)(addr_t)hw_info.cr0.req_mask);
178 V3_Print(core->vm_info, core, "CR4 MSR: req_val=%p, req_mask=%p\n", (void *)(addr_t)hw_info.cr4.req_val, (void *)(addr_t)hw_info.cr4.req_mask);
182 /******* Setup Host State **********/
184 /* Cache GDTR, IDTR, and TR in host struct */
187 /********** Setup VMX Control Fields ***********/
189 /* Add external interrupts, NMI exiting, and virtual NMI */
190 vmx_state->pin_ctrls.nmi_exit = 1;
191 vmx_state->pin_ctrls.virt_nmi = 1;
192 vmx_state->pin_ctrls.ext_int_exit = 1;
196 /* We enable the preemption timer by default to measure accurate guest time */
197 if (avail_pin_ctrls.active_preempt_timer) {
198 V3_Print(core->vm_info, core, "VMX Preemption Timer is available\n");
199 vmx_state->pin_ctrls.active_preempt_timer = 1;
200 vmx_state->exit_ctrls.save_preempt_timer = 1;
203 // we want it to use this when halting
204 vmx_state->pri_proc_ctrls.hlt_exit = 1;
206 // cpuid tells it that it does not have these instructions
207 vmx_state->pri_proc_ctrls.monitor_exit = 1;
208 vmx_state->pri_proc_ctrls.mwait_exit = 1;
210 // we don't need to handle a pause, although this is where
211 // we could pull out of a spin lock acquire or schedule to find its partner
212 vmx_state->pri_proc_ctrls.pause_exit = 0;
214 vmx_state->pri_proc_ctrls.tsc_offset = 1;
215 #ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
216 vmx_state->pri_proc_ctrls.rdtsc_exit = 1;
220 vmx_state->pri_proc_ctrls.use_io_bitmap = 1;
221 vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(core->vm_info->io_map.arch_data));
222 vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR,
223 (addr_t)V3_PAddr(core->vm_info->io_map.arch_data) + PAGE_SIZE_4KB);
226 vmx_state->pri_proc_ctrls.use_msr_bitmap = 1;
227 vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data));
232 // Ensure host runs in 64-bit mode at each VM EXIT
233 vmx_state->exit_ctrls.host_64_on = 1;
238 // Restore host's EFER register on each VM EXIT
239 vmx_state->exit_ctrls.ld_efer = 1;
241 // Save/restore guest's EFER register to/from VMCS on VM EXIT/ENTRY
242 vmx_state->exit_ctrls.save_efer = 1;
243 vmx_state->entry_ctrls.ld_efer = 1;
245 vmx_state->exit_ctrls.save_pat = 1;
246 vmx_state->exit_ctrls.ld_pat = 1;
247 vmx_state->entry_ctrls.ld_pat = 1;
249 /* Temporary GPF trap */
250 // vmx_state->excp_bmap.gp = 1;
252 // Setup Guests initial PAT field
253 vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
255 // Capture CR8 mods so that we can keep the apic_tpr correct
256 vmx_state->pri_proc_ctrls.cr8_ld_exit = 1;
257 vmx_state->pri_proc_ctrls.cr8_str_exit = 1;
261 if (core->shdw_pg_mode == SHADOW_PAGING) {
262 PrintDebug(core->vm_info, core, "Creating initial shadow page table\n");
264 if (v3_init_passthrough_pts(core) == -1) {
265 PrintError(core->vm_info, core, "Could not initialize passthrough page tables\n");
269 #define CR0_PE 0x00000001
270 #define CR0_PG 0x80000000
271 #define CR0_WP 0x00010000 // To ensure mem hooks work
272 #define CR0_NE 0x00000020
273 vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP | CR0_NE));
276 // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
277 vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE );
279 v3_activate_passthrough_pt(core);
281 // vmx_state->pinbased_ctrls |= NMI_EXIT;
284 vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
285 vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
287 // Note that we intercept cr4.pae writes
288 // and we have cr4 read-shadowed to the shadow pager's cr4
290 vmx_state->pri_proc_ctrls.invlpg_exit = 1;
292 /* Add page fault exits */
293 vmx_state->excp_bmap.pf = 1;
296 v3_vmxassist_init(core, vmx_state);
298 // Hook all accesses to EFER register
299 v3_hook_msr(core->vm_info, EFER_MSR,
300 &v3_handle_efer_read,
301 &v3_handle_efer_write,
304 } else if ((core->shdw_pg_mode == NESTED_PAGING) &&
305 (v3_mach_type == V3_VMX_EPT_CPU)) {
307 #define CR0_PE 0x00000001
308 #define CR0_PG 0x80000000
309 #define CR0_WP 0x00010000 // To ensure mem hooks work
310 #define CR0_NE 0x00000020
311 vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP | CR0_NE));
313 // vmx_state->pinbased_ctrls |= NMI_EXIT;
315 // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
316 vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
318 /* Disable CR exits */
319 vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
320 vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
322 vmx_state->pri_proc_ctrls.invlpg_exit = 0;
324 /* Add page fault exits */
325 // vmx_state->excp_bmap.pf = 1; // This should never happen..., enabled to catch bugs
328 v3_vmxassist_init(core, vmx_state);
331 vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
332 vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
336 if (v3_init_nested_paging_core(core, &hw_info) == -1) {
337 PrintError(core->vm_info, core, "Error initializing EPT\n");
341 // Hook all accesses to EFER register
342 v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
344 } else if ((core->shdw_pg_mode == NESTED_PAGING) &&
345 (v3_mach_type == V3_VMX_EPT_UG_CPU)) {
347 // For now we will assume that unrestricted guest mode is assured w/ EPT
350 core->vm_regs.rsp = 0x00;
352 core->vm_regs.rdx = 0x00000f00;
353 core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
354 core->ctrl_regs.cr0 = 0x60010030;
355 core->ctrl_regs.cr4 = 0x00002010; // Enable VMX and PSE flag
358 core->segments.cs.selector = 0xf000;
359 core->segments.cs.limit = 0xffff;
360 core->segments.cs.base = 0x0000000f0000LL;
362 // (raw attributes = 0xf3)
363 core->segments.cs.type = 0xb;
364 core->segments.cs.system = 0x1;
365 core->segments.cs.dpl = 0x0;
366 core->segments.cs.present = 1;
370 struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds),
371 &(core->segments.es), &(core->segments.fs),
372 &(core->segments.gs), NULL};
374 for ( i = 0; segregs[i] != NULL; i++) {
375 struct v3_segment * seg = segregs[i];
377 seg->selector = 0x0000;
378 // seg->base = seg->selector << 4;
379 seg->base = 0x00000000;
387 // seg->granularity = 1;
392 core->segments.gdtr.limit = 0x0000ffff;
393 core->segments.gdtr.base = 0x0000000000000000LL;
395 core->segments.idtr.limit = 0x0000ffff;
396 core->segments.idtr.base = 0x0000000000000000LL;
398 core->segments.ldtr.selector = 0x0000;
399 core->segments.ldtr.limit = 0x0000ffff;
400 core->segments.ldtr.base = 0x0000000000000000LL;
401 core->segments.ldtr.type = 0x2;
402 core->segments.ldtr.present = 1;
404 core->segments.tr.selector = 0x0000;
405 core->segments.tr.limit = 0x0000ffff;
406 core->segments.tr.base = 0x0000000000000000LL;
407 core->segments.tr.type = 0xb;
408 core->segments.tr.present = 1;
410 // core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
411 core->dbg_regs.dr7 = 0x0000000000000400LL;
414 vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
415 vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
416 vmx_state->sec_proc_ctrls.unrstrct_guest = 1; // enable unrestricted guest operation
419 /* Disable shadow paging stuff */
420 vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
421 vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
423 vmx_state->pri_proc_ctrls.invlpg_exit = 0;
426 // Cause VM_EXIT whenever the CR4.VMXE bit is set
427 vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
428 #define CR0_NE 0x00000020
429 #define CR0_CD 0x40000000
430 vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, CR0_NE | CR0_CD);
431 ((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->ne = 1;
432 ((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->cd = 0;
434 if (v3_init_nested_paging_core(core, &hw_info) == -1) {
435 PrintError(core->vm_info, core, "Error initializing EPT\n");
439 // Hook all accesses to EFER register
440 // v3_hook_msr(core->vm_info, EFER_MSR, &debug_efer_read, &debug_efer_write, core);
441 v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
443 PrintError(core->vm_info, core, "Invalid Virtual paging mode (pg_mode=%d) (mach_type=%d)\n", core->shdw_pg_mode, v3_mach_type);
450 // Setup SYSCALL/SYSENTER MSRs in load/store area
452 // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
455 struct vmcs_msr_save_area * msr_entries = NULL;
456 int max_msrs = (hw_info.misc_info.max_msr_cache_size + 1) * 4;
459 V3_Print(core->vm_info, core, "Setting up MSR load/store areas (max_msr_count=%d)\n", max_msrs);
462 PrintError(core->vm_info, core, "Max MSR cache size is too small (%d)\n", max_msrs);
466 vmx_state->msr_area_paddr = (addr_t)V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
468 if (vmx_state->msr_area_paddr == (addr_t)NULL) {
469 PrintError(core->vm_info, core, "could not allocate msr load/store area\n");
473 msr_entries = (struct vmcs_msr_save_area *)V3_VAddr((void *)(vmx_state->msr_area_paddr));
474 vmx_state->msr_area = msr_entries; // cache in vmx_info
476 memset(msr_entries, 0, PAGE_SIZE);
478 msr_entries->guest_star.index = IA32_STAR_MSR;
479 msr_entries->guest_lstar.index = IA32_LSTAR_MSR;
480 msr_entries->guest_fmask.index = IA32_FMASK_MSR;
481 msr_entries->guest_kern_gs.index = IA32_KERN_GS_BASE_MSR;
483 msr_entries->host_star.index = IA32_STAR_MSR;
484 msr_entries->host_lstar.index = IA32_LSTAR_MSR;
485 msr_entries->host_fmask.index = IA32_FMASK_MSR;
486 msr_entries->host_kern_gs.index = IA32_KERN_GS_BASE_MSR;
488 msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_CNT, 4);
489 msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_CNT, 4);
490 msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_CNT, 4);
492 msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_ADDR, (addr_t)V3_PAddr(msr_entries->guest_msrs));
493 msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_ADDR, (addr_t)V3_PAddr(msr_entries->guest_msrs));
494 msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_ADDR, (addr_t)V3_PAddr(msr_entries->host_msrs));
497 msr_ret |= v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
498 msr_ret |= v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
499 msr_ret |= v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
500 msr_ret |= v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
503 // IMPORTANT: These MSRs appear to be cached by the hardware....
504 msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
505 msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
506 msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
508 msr_ret |= v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
509 msr_ret |= v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
511 msr_ret |= v3_hook_msr(core->vm_info, IA32_PAT_MSR, NULL, NULL, NULL);
513 // Not sure what to do about this... Does not appear to be an explicit hardware cache version...
514 msr_ret |= v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
517 PrintError(core->vm_info, core, "Error configuring MSR save/restore area\n");
524 /* Sanity check ctrl/reg fields against hw_defaults */
529 /*** Write all the info to the VMCS ***/
533 // IS THIS NECESSARY???
534 #define DEBUGCTL_MSR 0x1d9
535 struct v3_msr tmp_msr;
536 v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
537 vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
538 core->dbg_regs.dr7 = 0x400;
543 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
545 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffUL);
546 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR_HIGH, (addr_t)0xffffffffUL);
549 if (vmx_ret != VMX_SUCCESS) {
550 PrintError(core->vm_info, core, "Error configuring VMX\n");
556 if (v3_update_vmcs_ctrl_fields(core)) {
557 PrintError(core->vm_info, core, "Could not write control fields!\n");
562 if (v3_update_vmcs_host_state(core)) {
563 PrintError(core->vm_info, core, "Could not write host state\n");
568 // reenable global interrupts for vm state initialization now
569 // that the vm state is initialized. If another VM kicks us off,
570 // it'll update our vmx state so that we know to reload ourself
577 static void __init_vmx_vmcs(void * arg) {
578 struct guest_info * core = arg;
579 struct vmx_data * vmx_state = NULL;
582 vmx_state = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
585 PrintError(core->vm_info, core, "Unable to allocate in initializing vmx vmcs\n");
589 memset(vmx_state, 0, sizeof(struct vmx_data));
591 PrintDebug(core->vm_info, core, "vmx_data pointer: %p\n", (void *)vmx_state);
593 PrintDebug(core->vm_info, core, "Allocating VMCS\n");
594 vmx_state->vmcs_ptr_phys = allocate_vmcs();
596 PrintDebug(core->vm_info, core, "VMCS pointer: %p\n", (void *)(vmx_state->vmcs_ptr_phys));
598 core->vmm_data = vmx_state;
599 vmx_state->state = VMX_UNLAUNCHED;
601 PrintDebug(core->vm_info, core, "Initializing VMCS (addr=%p)\n", core->vmm_data);
603 // TODO: Fix vmcs fields so they're 32-bit
605 PrintDebug(core->vm_info, core, "Clearing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
606 vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
608 if (vmx_ret != VMX_SUCCESS) {
609 PrintError(core->vm_info, core, "VMCLEAR failed\n");
613 if (core->vm_info->vm_class == V3_PC_VM) {
614 PrintDebug(core->vm_info, core, "Initializing VMCS\n");
615 if (init_vmcs_bios(core, vmx_state) == -1) {
616 PrintError(core->vm_info, core, "Error initializing VMCS to BIOS state\n");
620 PrintError(core->vm_info, core, "Invalid VM Class\n");
624 PrintDebug(core->vm_info, core, "Serializing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
625 vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
627 if (vmx_ret != VMX_SUCCESS) {
628 PrintError(core->vm_info,core,"VMCS Clear failed\n");
632 core->core_run_state = CORE_STOPPED;
638 int v3_init_vmx_vmcs(struct guest_info * core, v3_vm_class_t vm_class) {
639 extern v3_cpu_arch_t v3_cpu_types[];
641 if (v3_cpu_types[V3_Get_CPU()] == V3_INVALID_CPU) {
644 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
645 if (v3_cpu_types[i] != V3_INVALID_CPU) {
650 if (i == V3_CONFIG_MAX_CPUS) {
651 PrintError(core->vm_info, core, "Could not find VALID CPU for VMX guest initialization\n");
655 V3_Call_On_CPU(i, __init_vmx_vmcs, core);
658 __init_vmx_vmcs(core);
661 if (core->core_run_state != CORE_STOPPED) {
662 PrintError(core->vm_info, core, "Error initializing VMX Core\n");
670 int v3_deinit_vmx_vmcs(struct guest_info * core) {
671 struct vmx_data * vmx_state = core->vmm_data;
673 V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1);
674 V3_FreePages(V3_PAddr(vmx_state->msr_area), 1);
683 #ifdef V3_CONFIG_CHECKPOINT
685 * JRL: This is broken
687 int v3_vmx_save_core(struct guest_info * core, void * ctx){
688 struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
690 // note that the vmcs pointer is an HPA, but we need an HVA
691 if (v3_chkpt_save(ctx, "vmcs_data", PAGE_SIZE_4KB,
692 V3_VAddr((void*) (vmx_info->vmcs_ptr_phys)))) {
693 PrintError(core->vm_info, core, "Could not save vmcs data for VMX\n");
700 int v3_vmx_load_core(struct guest_info * core, void * ctx){
701 struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
702 struct cr0_32 * shadow_cr0;
703 addr_t vmcs_page_paddr; //HPA
705 vmcs_page_paddr = (addr_t) V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
707 if (!vmcs_page_paddr) {
708 PrintError(core->vm_info, core, "Could not allocate space for a vmcs in VMX\n");
712 if (v3_chkpt_load(ctx, "vmcs_data", PAGE_SIZE_4KB,
713 V3_VAddr((void *)vmcs_page_paddr)) == -1) {
714 PrintError(core->vm_info, core, "Could not load vmcs data for VMX\n");
715 V3_FreePages((void*)vmcs_page_paddr,1);
719 vmcs_clear(vmx_info->vmcs_ptr_phys);
721 // Probably need to delete the old one...
722 V3_FreePages((void*)(vmx_info->vmcs_ptr_phys),1);
724 vmcs_load(vmcs_page_paddr);
726 v3_vmx_save_vmcs(core);
728 shadow_cr0 = (struct cr0_32 *)&(core->ctrl_regs.cr0);
731 /* Get the CPU mode to set the guest_ia32e entry ctrl */
733 if (core->shdw_pg_mode == SHADOW_PAGING) {
734 if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) {
735 if (v3_activate_shadow_pt(core) == -1) {
736 PrintError(core->vm_info, core, "Failed to activate shadow page tables\n");
740 if (v3_activate_passthrough_pt(core) == -1) {
741 PrintError(core->vm_info, core, "Failed to activate passthrough page tables\n");
752 void v3_flush_vmx_vm_core(struct guest_info * core) {
753 struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
754 vmcs_clear(vmx_info->vmcs_ptr_phys);
755 vmx_info->state = VMX_UNLAUNCHED;
760 static int update_irq_exit_state(struct guest_info * info) {
761 struct vmx_exit_idt_vec_info idt_vec_info;
763 check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
765 if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
766 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
767 V3_Print(info->vm_info, info, "Calling v3_injecting_intr\n");
769 info->intr_core_state.irq_started = 0;
770 v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
776 static int update_irq_entry_state(struct guest_info * info) {
777 struct vmx_exit_idt_vec_info idt_vec_info;
778 struct vmcs_interrupt_state intr_core_state;
779 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
781 check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
782 check_vmcs_read(VMCS_GUEST_INT_STATE, &(intr_core_state));
784 /* Check for pending exceptions to inject */
785 if (v3_excp_pending(info)) {
786 struct vmx_entry_int_info int_info;
789 // In VMX, almost every exception is hardware
790 // Software exceptions are pretty much only for breakpoint or overflow
792 int_info.vector = v3_get_excp_number(info);
794 if (info->excp_state.excp_error_code_valid) {
795 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
796 int_info.error_code = 1;
798 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
799 V3_Print(info->vm_info, info, "Injecting exception %d with error code %x\n",
800 int_info.vector, info->excp_state.excp_error_code);
805 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
806 V3_Print(info->vm_info, info, "Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
808 check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
810 v3_injecting_excp(info, int_info.vector);
812 } else if ((((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) &&
813 (intr_core_state.val == 0)) {
815 if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
817 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
818 V3_Print(info->vm_info, info, "IRQ pending from previous injection\n");
821 // Copy the IDT vectoring info over to reinject the old interrupt
822 if (idt_vec_info.error_code == 1) {
823 uint32_t err_code = 0;
825 check_vmcs_read(VMCS_IDT_VECTOR_ERR, &err_code);
826 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, err_code);
829 idt_vec_info.undef = 0;
830 check_vmcs_write(VMCS_ENTRY_INT_INFO, idt_vec_info.value);
833 struct vmx_entry_int_info ent_int;
836 switch (v3_intr_pending(info)) {
837 case V3_EXTERNAL_IRQ: {
839 int irq = v3_get_intr(info);
845 info->intr_core_state.irq_vector = irq;
846 ent_int.vector = info->intr_core_state.irq_vector;
848 ent_int.error_code = 0;
851 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
852 V3_Print(info->vm_info, info, "Injecting Interrupt %d at exit %u(EIP=%p)\n",
853 info->intr_core_state.irq_vector,
854 (uint32_t)info->num_exits,
855 (void *)(addr_t)info->rip);
858 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
859 info->intr_core_state.irq_started = 1;
864 PrintDebug(info->vm_info, info, "Injecting NMI\n");
869 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
872 case V3_SOFTWARE_INTR:
873 PrintDebug(info->vm_info, info, "Injecting software interrupt\n");
877 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
881 // Not sure what to do here, Intel doesn't have virtual IRQs
882 // May be the same as external interrupts/IRQs
885 case V3_INVALID_INTR:
890 } else if ((v3_intr_pending(info)) && (vmx_info->pri_proc_ctrls.int_wndw_exit == 0)) {
891 // Enable INTR window exiting so we know when IF=1
894 check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
896 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
897 V3_Print(info->vm_info, info, "Enabling Interrupt-Window exiting: %d\n", instr_len);
900 vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
901 check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
910 static struct vmx_exit_info exit_log[10];
911 static uint64_t rip_log[10];
915 static void print_exit_log(struct guest_info * info) {
916 int cnt = info->num_exits % 10;
920 V3_Print(info->vm_info, info, "\nExit Log (%d total exits):\n", (uint32_t)info->num_exits);
922 for (i = 0; i < 10; i++) {
923 struct vmx_exit_info * tmp = &exit_log[cnt];
925 V3_Print(info->vm_info, info, "%d:\texit_reason = %p\n", i, (void *)(addr_t)tmp->exit_reason);
926 V3_Print(info->vm_info, info, "\texit_qual = %p\n", (void *)tmp->exit_qual);
927 V3_Print(info->vm_info, info, "\tint_info = %p\n", (void *)(addr_t)tmp->int_info);
928 V3_Print(info->vm_info, info, "\tint_err = %p\n", (void *)(addr_t)tmp->int_err);
929 V3_Print(info->vm_info, info, "\tinstr_info = %p\n", (void *)(addr_t)tmp->instr_info);
930 V3_Print(info->vm_info, info, "\tguest_linear_addr= %p\n", (void *)(addr_t)tmp->guest_linear_addr);
931 V3_Print(info->vm_info, info, "\tRIP = %p\n", (void *)rip_log[cnt]);
945 v3_vmx_config_tsc_virtualization(struct guest_info * info) {
946 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
948 if (info->time_state.flags & VM_TIME_TRAP_RDTSC) {
949 if (!vmx_info->pri_proc_ctrls.rdtsc_exit) {
950 vmx_info->pri_proc_ctrls.rdtsc_exit = 1;
951 check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
955 uint32_t tsc_offset_low, tsc_offset_high;
957 if (vmx_info->pri_proc_ctrls.rdtsc_exit) {
958 vmx_info->pri_proc_ctrls.rdtsc_exit = 0;
959 check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
962 if (info->time_state.flags & VM_TIME_TSC_PASSTHROUGH) {
965 tsc_offset = v3_tsc_host_offset(&info->time_state);
967 tsc_offset_high = (uint32_t)(( tsc_offset >> 32) & 0xffffffff);
968 tsc_offset_low = (uint32_t)(tsc_offset & 0xffffffff);
970 check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
971 check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
977 * CAUTION and DANGER!!!
979 * The VMCS CANNOT(!!) be accessed outside of the cli/sti calls inside this function
980 * When exectuing a symbiotic call, the VMCS WILL be overwritten, so any dependencies
981 * on its contents will cause things to break. The contents at the time of the exit WILL
982 * change before the exit handler is executed.
984 int v3_vmx_enter(struct guest_info * info) {
986 struct vmx_exit_info exit_info;
987 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
988 uint64_t guest_cycles = 0;
990 // Conditionally yield the CPU if the timeslice has expired
993 #ifdef V3_CONFIG_MEM_TRACK
994 v3_mem_track_entry(info);
998 v3_handle_hvm_entry(info);
1001 // Update timer devices late after being in the VM so that as much
1002 // of the time in the VM is accounted for as possible. Also do it before
1003 // updating IRQ entry state so that any interrupts the timers raise get
1004 // handled on the next VM entry.
1005 v3_advance_time(info, NULL);
1006 v3_update_timers(info);
1008 // disable global interrupts for vm state transition
1011 if (vmcs_store() != vmx_info->vmcs_ptr_phys) {
1012 vmcs_clear(vmx_info->vmcs_ptr_phys);
1013 vmcs_load(vmx_info->vmcs_ptr_phys);
1014 vmx_info->state = VMX_UNLAUNCHED;
1017 v3_vmx_restore_vmcs(info);
1020 #ifdef V3_CONFIG_SYMCALL
1021 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
1022 update_irq_entry_state(info);
1025 update_irq_entry_state(info);
1030 vmcs_read(VMCS_GUEST_CR3, &guest_cr3);
1031 vmcs_write(VMCS_GUEST_CR3, guest_cr3);
1035 // Perform last-minute time setup prior to entering the VM
1036 v3_vmx_config_tsc_virtualization(info);
1038 if (v3_update_vmcs_host_state(info)) {
1040 PrintError(info->vm_info, info, "Could not write host state\n");
1044 if (vmx_info->pin_ctrls.active_preempt_timer) {
1045 /* Preemption timer is active */
1046 uint32_t preempt_window = 0xffffffff;
1048 if (info->timeouts.timeout_active) {
1049 preempt_window = info->timeouts.next_timeout;
1052 check_vmcs_write(VMCS_PREEMPT_TIMER, preempt_window);
1055 V3_FP_ENTRY_RESTORE(info);
1058 uint64_t entry_tsc = 0;
1059 uint64_t exit_tsc = 0;
1061 #ifdef V3_CONFIG_PWRSTAT_TELEMETRY
1062 v3_pwrstat_telemetry_enter(info);
1065 #ifdef V3_CONFIG_PMU_TELEMETRY
1066 v3_pmu_telemetry_enter(info);
1069 if (vmx_info->state == VMX_UNLAUNCHED) {
1070 vmx_info->state = VMX_LAUNCHED;
1072 ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
1076 V3_ASSERT(info->vm_info, info,vmx_info->state != VMX_UNLAUNCHED);
1078 ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
1082 guest_cycles = exit_tsc - entry_tsc;
1084 #ifdef V3_CONFIG_PMU_TELEMETRY
1085 v3_pmu_telemetry_exit(info);
1088 #ifdef V3_CONFIG_PWRSTAT_TELEMETRY
1089 v3_pwrstat_telemetry_exit(info);
1093 // PrintDebug(info->vm_info, info, "VMX Exit: ret=%d\n", ret);
1095 if (ret != VMX_SUCCESS) {
1097 vmcs_read(VMCS_INSTR_ERR, &error);
1101 PrintError(info->vm_info, info, "VMENTRY Error: %d (launch_ret = %d)\n", error, ret);
1108 V3_FP_EXIT_SAVE(info);
1110 /* If we have the preemption time, then use it to get more accurate guest time */
1111 if (vmx_info->pin_ctrls.active_preempt_timer) {
1112 uint32_t cycles_left = 0;
1113 check_vmcs_read(VMCS_PREEMPT_TIMER, &(cycles_left));
1115 if (info->timeouts.timeout_active) {
1116 guest_cycles = info->timeouts.next_timeout - cycles_left;
1118 guest_cycles = 0xffffffff - cycles_left;
1122 // Immediate exit from VM time bookkeeping
1123 v3_advance_time(info, &guest_cycles);
1125 /* Update guest state */
1126 v3_vmx_save_vmcs(info);
1128 // info->cpl = info->segments.cs.selector & 0x3;
1130 info->mem_mode = v3_get_vm_mem_mode(info);
1131 info->cpu_mode = v3_get_vm_cpu_mode(info);
1135 check_vmcs_read(VMCS_EXIT_INSTR_LEN, &(exit_info.instr_len));
1136 check_vmcs_read(VMCS_EXIT_INSTR_INFO, &(exit_info.instr_info));
1137 check_vmcs_read(VMCS_EXIT_REASON, &(exit_info.exit_reason));
1138 check_vmcs_read(VMCS_EXIT_QUAL, &(exit_info.exit_qual));
1139 check_vmcs_read(VMCS_EXIT_INT_INFO, &(exit_info.int_info));
1140 check_vmcs_read(VMCS_EXIT_INT_ERR, &(exit_info.int_err));
1141 check_vmcs_read(VMCS_GUEST_LINEAR_ADDR, &(exit_info.guest_linear_addr));
1143 if (info->shdw_pg_mode == NESTED_PAGING) {
1144 check_vmcs_read(VMCS_GUEST_PHYS_ADDR, &(exit_info.ept_fault_addr));
1146 exit_info.ept_fault_addr = 0;
1149 //PrintDebug(info->vm_info, info, "VMX Exit taken, id-qual: %u-%lu\n", exit_info.exit_reason, exit_info.exit_qual);
1151 exit_log[info->num_exits % 10] = exit_info;
1152 rip_log[info->num_exits % 10] = get_addr_linear(info, info->rip, &(info->segments.cs));
1154 #ifdef V3_CONFIG_SYMCALL
1155 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
1156 update_irq_exit_state(info);
1159 update_irq_exit_state(info);
1162 if (exit_info.exit_reason == VMX_EXIT_INTR_WINDOW) {
1163 // This is a special case whose only job is to inject an interrupt
1164 vmcs_read(VMCS_PROC_CTRLS, &(vmx_info->pri_proc_ctrls.value));
1165 vmx_info->pri_proc_ctrls.int_wndw_exit = 0;
1166 vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
1168 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
1169 V3_Print(info->vm_info, info, "Interrupts available again! (RIP=%llx)\n", info->rip);
1174 // Lastly we check for an NMI exit, and reinject if so
1176 struct vmx_basic_exit_info * basic_info = (struct vmx_basic_exit_info *)&(exit_info.exit_reason);
1178 if (basic_info->reason == VMX_EXIT_INFO_EXCEPTION_OR_NMI) {
1179 if ((uint8_t)exit_info.int_info == 2) {
1185 // reenable global interrupts after vm exit
1188 // Conditionally yield the CPU if the timeslice has expired
1190 v3_advance_time(info, NULL);
1191 v3_update_timers(info);
1193 if (v3_handle_vmx_exit(info, &exit_info) == -1) {
1194 PrintError(info->vm_info, info, "Error in VMX exit handler (Exit reason=%x)\n", exit_info.exit_reason);
1198 if (info->timeouts.timeout_active) {
1199 /* Check to see if any timeouts have expired */
1200 v3_handle_timeouts(info, guest_cycles);
1203 #ifdef V3_CONFIG_HVM
1204 v3_handle_hvm_exit(info);
1207 #ifdef V3_CONFIG_MEM_TRACK
1208 v3_mem_track_exit(info);
1215 int v3_start_vmx_guest(struct guest_info * info) {
1217 PrintDebug(info->vm_info, info, "Starting VMX core %u\n", info->vcpu_id);
1219 #ifdef V3_CONFIG_MULTIBOOT
1220 if (v3_setup_multiboot_core_for_boot(info)) {
1221 PrintError(info->vm_info, info, "Failed to setup Multiboot core...\n");
1226 #ifdef V3_CONFIG_HVM
1227 if (v3_setup_hvm_hrt_core_for_boot(info)) {
1228 PrintError(info->vm_info, info, "Failed to setup HRT core...\n");
1234 if (info->core_run_state == CORE_STOPPED) {
1235 if (info->vcpu_id == 0) {
1236 info->core_run_state = CORE_RUNNING;
1239 PrintDebug(info->vm_info, info, "VMX core %u: Waiting for core initialization\n", info->vcpu_id);
1243 while (info->core_run_state == CORE_STOPPED) {
1245 if (info->vm_info->run_state == VM_STOPPED) {
1246 // The VM was stopped before this core was initialized.
1250 V3_STILL_NO_WORK(info);
1251 //PrintDebug(info->vm_info, info, "VMX core %u: still waiting for INIT\n",info->vcpu_id);
1254 V3_HAVE_WORK_AGAIN(info);
1256 PrintDebug(info->vm_info, info, "VMX core %u initialized\n", info->vcpu_id);
1258 // We'll be paranoid about race conditions here
1259 v3_wait_at_barrier(info);
1263 PrintDebug(info->vm_info, info, "VMX core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
1264 info->vcpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
1265 info->segments.cs.limit, (void *)(info->rip));
1268 PrintDebug(info->vm_info, info, "VMX core %u: Launching VMX VM on logical core %u\n", info->vcpu_id, info->pcpu_id);
1270 v3_start_time(info);
1273 if (info->vm_info->run_state == VM_STOPPED) {
1274 info->core_run_state = CORE_STOPPED;
1280 #ifdef V3_CONFIG_HVM
1281 if (v3_handle_hvm_reset(info) > 0) {
1286 #ifdef V3_CONFIG_MULTIBOOT
1287 if (v3_handle_multiboot_reset(info) > 0) {
1292 #ifdef V3_CONFIG_PMU_TELEMETRY
1293 v3_pmu_telemetry_start(info);
1296 #ifdef V3_CONFIG_PWRSTAT_TELEMETRY
1297 v3_pwrstat_telemetry_start(info);
1301 if (v3_vmx_enter(info) == -1) {
1304 addr_t linear_addr = 0;
1306 info->vm_info->run_state = VM_ERROR;
1308 V3_Print(info->vm_info, info, "VMX core %u: VMX ERROR!!\n", info->vcpu_id);
1310 v3_print_guest_state(info);
1312 V3_Print(info->vm_info, info, "VMX core %u\n", info->vcpu_id);
1314 linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
1316 if (info->mem_mode == PHYSICAL_MEM) {
1317 if (v3_gpa_to_hva(info, linear_addr, &host_addr)) {
1318 PrintError(info->vm_info, info, "Cannot translate address\n");
1321 } else if (info->mem_mode == VIRTUAL_MEM) {
1322 if (v3_gva_to_hva(info, linear_addr, &host_addr)) {
1323 PrintError(info->vm_info, info, "Cannot translate address\n");
1328 V3_Print(info->vm_info, info, "VMX core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
1330 V3_Print(info->vm_info, info, "VMX core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
1331 v3_dump_mem((uint8_t *)host_addr, 15);
1333 v3_print_stack(info);
1337 print_exit_log(info);
1341 v3_wait_at_barrier(info);
1344 if (info->vm_info->run_state == VM_STOPPED) {
1345 info->core_run_state = CORE_STOPPED;
1349 if ((info->num_exits % 5000) == 0) {
1350 V3_Print(info->vm_info, info, "VMX Exit number %d\n", (uint32_t)info->num_exits);
1356 #ifdef V3_CONFIG_PMU_TELEMETRY
1357 v3_pmu_telemetry_end(info);
1360 #ifdef V3_CONFIG_PWRSTAT_TELEMETRY
1361 v3_pwrstat_telemetry_end(info);
1370 #define VMX_FEATURE_CONTROL_MSR 0x0000003a
1371 #define CPUID_VMX_FEATURES 0x00000005 /* LOCK and VMXON */
1372 #define CPUID_1_ECX_VTXFLAG 0x00000020
1374 int v3_is_vmx_capable() {
1375 v3_msr_t feature_msr;
1376 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1378 v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
1380 PrintDebug(VM_NONE, VCORE_NONE, "ECX: 0x%x\n", ecx);
1382 if (ecx & CPUID_1_ECX_VTXFLAG) {
1383 v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
1385 PrintDebug(VM_NONE, VCORE_NONE, "MSRREGlow: 0x%.8x\n", feature_msr.lo);
1387 if ((feature_msr.lo & CPUID_VMX_FEATURES) != CPUID_VMX_FEATURES) {
1388 PrintDebug(VM_NONE, VCORE_NONE, "VMX is locked -- enable in the BIOS\n");
1393 PrintDebug(VM_NONE, VCORE_NONE, "VMX not supported on this cpu\n");
1401 int v3_reset_vmx_vm_core(struct guest_info * core, addr_t rip) {
1404 if ((core->shdw_pg_mode == NESTED_PAGING) &&
1405 (v3_mach_type == V3_VMX_EPT_UG_CPU)) {
1408 core->segments.cs.selector = rip << 8;
1409 core->segments.cs.limit = 0xffff;
1410 core->segments.cs.base = rip << 12;
1412 core->vm_regs.rdx = core->vcpu_id;
1413 core->vm_regs.rbx = rip;
1421 void v3_init_vmx_cpu(int cpu_id) {
1422 addr_t vmx_on_region = 0;
1423 extern v3_cpu_arch_t v3_mach_type;
1424 extern v3_cpu_arch_t v3_cpu_types[];
1426 if (v3_mach_type == V3_INVALID_CPU) {
1427 if (v3_init_vmx_hw(&hw_info) == -1) {
1428 PrintError(VM_NONE, VCORE_NONE, "Could not initialize VMX hardware features on cpu %d\n", cpu_id);
1436 // Setup VMXON Region
1437 vmx_on_region = allocate_vmcs();
1440 if (vmx_on(vmx_on_region) == VMX_SUCCESS) {
1441 V3_Print(VM_NONE, VCORE_NONE, "VMX Enabled\n");
1442 host_vmcs_ptrs[cpu_id] = vmx_on_region;
1444 V3_Print(VM_NONE, VCORE_NONE, "VMX already enabled\n");
1445 V3_FreePages((void *)vmx_on_region, 1);
1448 PrintDebug(VM_NONE, VCORE_NONE, "VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);
1451 struct vmx_sec_proc_ctrls sec_proc_ctrls;
1452 sec_proc_ctrls.value = v3_vmx_get_ctrl_features(&(hw_info.sec_proc_ctrls));
1454 if (sec_proc_ctrls.enable_ept == 0) {
1455 V3_Print(VM_NONE, VCORE_NONE, "VMX EPT (Nested) Paging not supported\n");
1456 v3_cpu_types[cpu_id] = V3_VMX_CPU;
1457 } else if (sec_proc_ctrls.unrstrct_guest == 0) {
1458 V3_Print(VM_NONE, VCORE_NONE, "VMX EPT (Nested) Paging supported\n");
1459 v3_cpu_types[cpu_id] = V3_VMX_EPT_CPU;
1461 V3_Print(VM_NONE, VCORE_NONE, "VMX EPT (Nested) Paging + Unrestricted guest supported\n");
1462 v3_cpu_types[cpu_id] = V3_VMX_EPT_UG_CPU;
1469 void v3_deinit_vmx_cpu(int cpu_id) {
1470 extern v3_cpu_arch_t v3_cpu_types[];
1471 v3_cpu_types[cpu_id] = V3_INVALID_CPU;
1473 if (host_vmcs_ptrs[cpu_id] != 0) {
1474 V3_Print(VM_NONE, VCORE_NONE, "Disabling VMX\n");
1476 if (vmx_off() != VMX_SUCCESS) {
1477 PrintError(VM_NONE, VCORE_NONE, "Error executing VMXOFF\n");
1480 V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1);
1482 host_vmcs_ptrs[cpu_id] = 0;