2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2011, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmx.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_handler.h>
24 #include <palacios/vmcs.h>
25 #include <palacios/vmx_lowlevel.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_ctrl_regs.h>
28 #include <palacios/vmm_config.h>
29 #include <palacios/vmm_time.h>
30 #include <palacios/vm_guest_mem.h>
31 #include <palacios/vmm_direct_paging.h>
32 #include <palacios/vmx_io.h>
33 #include <palacios/vmx_msr.h>
34 #include <palacios/vmm_decoder.h>
35 #include <palacios/vmm_barrier.h>
37 #ifdef V3_CONFIG_CHECKPOINT
38 #include <palacios/vmm_checkpoint.h>
41 #include <palacios/vmx_ept.h>
42 #include <palacios/vmx_assist.h>
43 #include <palacios/vmx_hw_info.h>
45 #ifndef V3_CONFIG_DEBUG_VMX
47 #define PrintDebug(fmt, args...)
51 /* These fields contain the hardware feature sets supported by the local CPU */
52 static struct vmx_hw_info hw_info;
54 extern v3_cpu_arch_t v3_cpu_types[];
56 static addr_t host_vmcs_ptrs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
58 extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
59 extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
61 static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
64 ret = vmcs_write(field, val);
66 if (ret != VMX_SUCCESS) {
67 PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
77 static int inline check_vmcs_read(vmcs_field_t field, void * val) {
80 ret = vmcs_read(field, val);
82 if (ret != VMX_SUCCESS) {
83 PrintError("VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
92 static addr_t allocate_vmcs() {
93 struct vmcs_data * vmcs_page = NULL;
95 PrintDebug("Allocating page\n");
97 vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
98 memset(vmcs_page, 0, 4096);
100 vmcs_page->revision = hw_info.basic_info.revision;
101 PrintDebug("VMX Revision: 0x%x\n", vmcs_page->revision);
103 return (addr_t)V3_PAddr((void *)vmcs_page);
108 static int debug_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * src, void * priv_data) {
109 struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
110 V3_Print("\n\nEFER READ\n");
112 v3_print_guest_state(core);
114 src->value = efer->value;
118 static int debug_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
119 struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
120 V3_Print("\n\nEFER WRITE\n");
122 v3_print_guest_state(core);
124 efer->value = src.value;
127 struct vmx_data * vmx_state = core->vmm_data;
129 V3_Print("Trapping page faults and GPFs\n");
130 vmx_state->excp_bmap.pf = 1;
131 vmx_state->excp_bmap.gp = 1;
133 check_vmcs_write(VMCS_EXCP_BITMAP, vmx_state->excp_bmap.value);
141 static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) {
144 // disable global interrupts for vm state initialization
147 PrintDebug("Loading VMCS\n");
148 vmx_ret = vmcs_load(vmx_state->vmcs_ptr_phys);
149 vmx_state->state = VMX_UNLAUNCHED;
151 if (vmx_ret != VMX_SUCCESS) {
152 PrintError("VMPTRLD failed\n");
157 /*** Setup default state from HW ***/
159 vmx_state->pin_ctrls.value = hw_info.pin_ctrls.def_val;
160 vmx_state->pri_proc_ctrls.value = hw_info.proc_ctrls.def_val;
161 vmx_state->exit_ctrls.value = hw_info.exit_ctrls.def_val;
162 vmx_state->entry_ctrls.value = hw_info.entry_ctrls.def_val;
163 vmx_state->sec_proc_ctrls.value = hw_info.sec_proc_ctrls.def_val;
165 /* Print Control MSRs */
166 PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)hw_info.cr0.value);
167 PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)hw_info.cr4.value);
171 /******* Setup Host State **********/
173 /* Cache GDTR, IDTR, and TR in host struct */
176 /********** Setup VMX Control Fields ***********/
178 /* Add external interrupts, NMI exiting, and virtual NMI */
179 vmx_state->pin_ctrls.nmi_exit = 1;
180 vmx_state->pin_ctrls.ext_int_exit = 1;
183 vmx_state->pri_proc_ctrls.hlt_exit = 1;
186 vmx_state->pri_proc_ctrls.pause_exit = 0;
187 vmx_state->pri_proc_ctrls.tsc_offset = 1;
188 #ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
189 vmx_state->pri_proc_ctrls.rdtsc_exit = 1;
193 vmx_state->pri_proc_ctrls.use_io_bitmap = 1;
194 vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(core->vm_info->io_map.arch_data));
195 vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR,
196 (addr_t)V3_PAddr(core->vm_info->io_map.arch_data) + PAGE_SIZE_4KB);
199 vmx_state->pri_proc_ctrls.use_msr_bitmap = 1;
200 vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data));
205 // Ensure host runs in 64-bit mode at each VM EXIT
206 vmx_state->exit_ctrls.host_64_on = 1;
211 // Restore host's EFER register on each VM EXIT
212 vmx_state->exit_ctrls.ld_efer = 1;
214 // Save/restore guest's EFER register to/from VMCS on VM EXIT/ENTRY
215 vmx_state->exit_ctrls.save_efer = 1;
216 vmx_state->entry_ctrls.ld_efer = 1;
218 vmx_state->exit_ctrls.save_pat = 1;
219 vmx_state->exit_ctrls.ld_pat = 1;
220 vmx_state->entry_ctrls.ld_pat = 1;
222 /* Temporary GPF trap */
223 vmx_state->excp_bmap.gp = 1;
225 // Setup Guests initial PAT field
226 vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
229 if (core->shdw_pg_mode == SHADOW_PAGING) {
230 PrintDebug("Creating initial shadow page table\n");
232 if (v3_init_passthrough_pts(core) == -1) {
233 PrintError("Could not initialize passthrough page tables\n");
237 #define CR0_PE 0x00000001
238 #define CR0_PG 0x80000000
239 #define CR0_WP 0x00010000 // To ensure mem hooks work
240 vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
243 // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
244 vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
246 core->ctrl_regs.cr3 = core->direct_map_pt;
248 // vmx_state->pinbased_ctrls |= NMI_EXIT;
251 vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
252 vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
254 vmx_state->pri_proc_ctrls.invlpg_exit = 1;
256 /* Add page fault exits */
257 vmx_state->excp_bmap.pf = 1;
260 v3_vmxassist_init(core, vmx_state);
262 // Hook all accesses to EFER register
263 v3_hook_msr(core->vm_info, EFER_MSR,
264 &v3_handle_efer_read,
265 &v3_handle_efer_write,
268 } else if ((core->shdw_pg_mode == NESTED_PAGING) &&
269 (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_CPU)) {
271 #define CR0_PE 0x00000001
272 #define CR0_PG 0x80000000
273 #define CR0_WP 0x00010000 // To ensure mem hooks work
274 vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
276 // vmx_state->pinbased_ctrls |= NMI_EXIT;
278 // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
279 vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
281 /* Disable CR exits */
282 vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
283 vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
285 vmx_state->pri_proc_ctrls.invlpg_exit = 0;
287 /* Add page fault exits */
288 // vmx_state->excp_bmap.pf = 1; // This should never happen..., enabled to catch bugs
291 v3_vmxassist_init(core, vmx_state);
294 vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
295 vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
299 if (v3_init_ept(core, &hw_info) == -1) {
300 PrintError("Error initializing EPT\n");
304 // Hook all accesses to EFER register
305 v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
307 } else if ((core->shdw_pg_mode == NESTED_PAGING) &&
308 (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
310 // For now we will assume that unrestricted guest mode is assured w/ EPT
313 core->vm_regs.rsp = 0x00;
315 core->vm_regs.rdx = 0x00000f00;
316 core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
317 core->ctrl_regs.cr0 = 0x00000030;
318 core->ctrl_regs.cr4 = 0x00002010; // Enable VMX and PSE flag
321 core->segments.cs.selector = 0xf000;
322 core->segments.cs.limit = 0xffff;
323 core->segments.cs.base = 0x0000000f0000LL;
325 // (raw attributes = 0xf3)
326 core->segments.cs.type = 0xb;
327 core->segments.cs.system = 0x1;
328 core->segments.cs.dpl = 0x0;
329 core->segments.cs.present = 1;
333 struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds),
334 &(core->segments.es), &(core->segments.fs),
335 &(core->segments.gs), NULL};
337 for ( i = 0; segregs[i] != NULL; i++) {
338 struct v3_segment * seg = segregs[i];
340 seg->selector = 0x0000;
341 // seg->base = seg->selector << 4;
342 seg->base = 0x00000000;
350 // seg->granularity = 1;
355 core->segments.gdtr.limit = 0x0000ffff;
356 core->segments.gdtr.base = 0x0000000000000000LL;
358 core->segments.idtr.limit = 0x0000ffff;
359 core->segments.idtr.base = 0x0000000000000000LL;
361 core->segments.ldtr.selector = 0x0000;
362 core->segments.ldtr.limit = 0x0000ffff;
363 core->segments.ldtr.base = 0x0000000000000000LL;
364 core->segments.ldtr.type = 0x2;
365 core->segments.ldtr.present = 1;
367 core->segments.tr.selector = 0x0000;
368 core->segments.tr.limit = 0x0000ffff;
369 core->segments.tr.base = 0x0000000000000000LL;
370 core->segments.tr.type = 0xb;
371 core->segments.tr.present = 1;
373 // core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
374 core->dbg_regs.dr7 = 0x0000000000000400LL;
377 vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
378 vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
379 vmx_state->sec_proc_ctrls.unrstrct_guest = 1; // enable unrestricted guest operation
382 /* Disable shadow paging stuff */
383 vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
384 vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
386 vmx_state->pri_proc_ctrls.invlpg_exit = 0;
389 // Cause VM_EXIT whenever the CR4.VMXE bit is set
390 vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
393 if (v3_init_ept(core, &hw_info) == -1) {
394 PrintError("Error initializing EPT\n");
398 // Hook all accesses to EFER register
399 //v3_hook_msr(core->vm_info, EFER_MSR, &debug_efer_read, &debug_efer_write, core);
400 v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
402 PrintError("Invalid Virtual paging mode\n");
409 // Setup SYSCALL/SYSENTER MSRs in load/store area
411 // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
414 struct vmcs_msr_save_area * msr_entries = NULL;
415 int max_msrs = (hw_info.misc_info.max_msr_cache_size + 1) * 4;
418 V3_Print("Setting up MSR load/store areas (max_msr_count=%d)\n", max_msrs);
421 PrintError("Max MSR cache size is too small (%d)\n", max_msrs);
425 vmx_state->msr_area_paddr = (addr_t)V3_AllocPages(1);
427 if (vmx_state->msr_area_paddr == (addr_t)NULL) {
428 PrintError("could not allocate msr load/store area\n");
432 msr_entries = (struct vmcs_msr_save_area *)V3_VAddr((void *)(vmx_state->msr_area_paddr));
433 vmx_state->msr_area = msr_entries; // cache in vmx_info
435 memset(msr_entries, 0, PAGE_SIZE);
437 msr_entries->guest_star.index = IA32_STAR_MSR;
438 msr_entries->guest_lstar.index = IA32_LSTAR_MSR;
439 msr_entries->guest_fmask.index = IA32_FMASK_MSR;
440 msr_entries->guest_kern_gs.index = IA32_KERN_GS_BASE_MSR;
442 msr_entries->host_star.index = IA32_STAR_MSR;
443 msr_entries->host_lstar.index = IA32_LSTAR_MSR;
444 msr_entries->host_fmask.index = IA32_FMASK_MSR;
445 msr_entries->host_kern_gs.index = IA32_KERN_GS_BASE_MSR;
447 msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_CNT, 4);
448 msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_CNT, 4);
449 msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_CNT, 4);
451 msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_ADDR, (addr_t)V3_PAddr(msr_entries->guest_msrs));
452 msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_ADDR, (addr_t)V3_PAddr(msr_entries->guest_msrs));
453 msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_ADDR, (addr_t)V3_PAddr(msr_entries->host_msrs));
456 msr_ret |= v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
457 msr_ret |= v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
458 msr_ret |= v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
459 msr_ret |= v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
462 // IMPORTANT: These MSRs appear to be cached by the hardware....
463 msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
464 msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
465 msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
467 msr_ret |= v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
468 msr_ret |= v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
470 msr_ret |= v3_hook_msr(core->vm_info, IA32_PAT_MSR, NULL, NULL, NULL);
472 // Not sure what to do about this... Does not appear to be an explicit hardware cache version...
473 msr_ret |= v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
476 PrintError("Error configuring MSR save/restore area\n");
483 /* Sanity check ctrl/reg fields against hw_defaults */
488 /*** Write all the info to the VMCS ***/
492 // IS THIS NECESSARY???
493 #define DEBUGCTL_MSR 0x1d9
494 struct v3_msr tmp_msr;
495 v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
496 vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
497 core->dbg_regs.dr7 = 0x400;
502 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
504 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffUL);
505 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR_HIGH, (addr_t)0xffffffffUL);
511 if (v3_update_vmcs_ctrl_fields(core)) {
512 PrintError("Could not write control fields!\n");
517 if (v3_update_vmcs_host_state(core)) {
518 PrintError("Could not write host state\n");
523 // reenable global interrupts for vm state initialization now
524 // that the vm state is initialized. If another VM kicks us off,
525 // it'll update our vmx state so that we know to reload ourself
531 int v3_init_vmx_vmcs(struct guest_info * core, v3_vm_class_t vm_class) {
532 struct vmx_data * vmx_state = NULL;
535 vmx_state = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
536 memset(vmx_state, 0, sizeof(struct vmx_data));
538 PrintDebug("vmx_data pointer: %p\n", (void *)vmx_state);
540 PrintDebug("Allocating VMCS\n");
541 vmx_state->vmcs_ptr_phys = allocate_vmcs();
543 PrintDebug("VMCS pointer: %p\n", (void *)(vmx_state->vmcs_ptr_phys));
545 core->vmm_data = vmx_state;
546 vmx_state->state = VMX_UNLAUNCHED;
548 PrintDebug("Initializing VMCS (addr=%p)\n", core->vmm_data);
550 // TODO: Fix vmcs fields so they're 32-bit
552 PrintDebug("Clearing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
553 vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
555 if (vmx_ret != VMX_SUCCESS) {
556 PrintError("VMCLEAR failed\n");
560 if (vm_class == V3_PC_VM) {
561 PrintDebug("Initializing VMCS\n");
562 if (init_vmcs_bios(core, vmx_state) == -1) {
563 PrintError("Error initializing VMCS to BIOS state\n");
567 PrintError("Invalid VM Class\n");
571 PrintDebug("Serializing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
572 vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
578 int v3_deinit_vmx_vmcs(struct guest_info * core) {
579 struct vmx_data * vmx_state = core->vmm_data;
581 V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1);
582 V3_FreePages(V3_PAddr(vmx_state->msr_area), 1);
591 #ifdef V3_CONFIG_CHECKPOINT
593 * JRL: This is broken
595 int v3_vmx_save_core(struct guest_info * core, void * ctx){
596 uint64_t vmcs_ptr = vmcs_store();
598 v3_chkpt_save(ctx, "vmcs_data", PAGE_SIZE, (void *)vmcs_ptr);
603 int v3_vmx_load_core(struct guest_info * core, void * ctx){
604 struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
605 struct cr0_32 * shadow_cr0;
606 char vmcs[PAGE_SIZE_4KB];
608 v3_chkpt_load(ctx, "vmcs_data", PAGE_SIZE_4KB, vmcs);
610 vmcs_clear(vmx_info->vmcs_ptr_phys);
611 vmcs_load((addr_t)vmcs);
613 v3_vmx_save_vmcs(core);
615 shadow_cr0 = (struct cr0_32 *)&(core->ctrl_regs.cr0);
618 /* Get the CPU mode to set the guest_ia32e entry ctrl */
620 if (core->shdw_pg_mode == SHADOW_PAGING) {
621 if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) {
622 if (v3_activate_shadow_pt(core) == -1) {
623 PrintError("Failed to activate shadow page tables\n");
627 if (v3_activate_passthrough_pt(core) == -1) {
628 PrintError("Failed to activate passthrough page tables\n");
639 void v3_flush_vmx_vm_core(struct guest_info * core) {
640 struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
641 vmcs_clear(vmx_info->vmcs_ptr_phys);
642 vmx_info->state = VMX_UNLAUNCHED;
647 static int update_irq_exit_state(struct guest_info * info) {
648 struct vmx_exit_idt_vec_info idt_vec_info;
650 check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
652 if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
653 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
654 V3_Print("Calling v3_injecting_intr\n");
656 info->intr_core_state.irq_started = 0;
657 v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
663 static int update_irq_entry_state(struct guest_info * info) {
664 struct vmx_exit_idt_vec_info idt_vec_info;
665 struct vmcs_interrupt_state intr_core_state;
666 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
668 check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
669 check_vmcs_read(VMCS_GUEST_INT_STATE, &(intr_core_state));
671 /* Check for pending exceptions to inject */
672 if (v3_excp_pending(info)) {
673 struct vmx_entry_int_info int_info;
676 // In VMX, almost every exception is hardware
677 // Software exceptions are pretty much only for breakpoint or overflow
679 int_info.vector = v3_get_excp_number(info);
681 if (info->excp_state.excp_error_code_valid) {
682 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
683 int_info.error_code = 1;
685 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
686 V3_Print("Injecting exception %d with error code %x\n",
687 int_info.vector, info->excp_state.excp_error_code);
692 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
693 V3_Print("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
695 check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
697 v3_injecting_excp(info, int_info.vector);
699 } else if ((((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) &&
700 (intr_core_state.val == 0)) {
702 if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
704 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
705 V3_Print("IRQ pending from previous injection\n");
708 // Copy the IDT vectoring info over to reinject the old interrupt
709 if (idt_vec_info.error_code == 1) {
710 uint32_t err_code = 0;
712 check_vmcs_read(VMCS_IDT_VECTOR_ERR, &err_code);
713 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, err_code);
716 idt_vec_info.undef = 0;
717 check_vmcs_write(VMCS_ENTRY_INT_INFO, idt_vec_info.value);
720 struct vmx_entry_int_info ent_int;
723 switch (v3_intr_pending(info)) {
724 case V3_EXTERNAL_IRQ: {
725 info->intr_core_state.irq_vector = v3_get_intr(info);
726 ent_int.vector = info->intr_core_state.irq_vector;
728 ent_int.error_code = 0;
731 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
732 V3_Print("Injecting Interrupt %d at exit %u(EIP=%p)\n",
733 info->intr_core_state.irq_vector,
734 (uint32_t)info->num_exits,
735 (void *)(addr_t)info->rip);
738 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
739 info->intr_core_state.irq_started = 1;
744 PrintDebug("Injecting NMI\n");
749 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
752 case V3_SOFTWARE_INTR:
753 PrintDebug("Injecting software interrupt\n");
757 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
761 // Not sure what to do here, Intel doesn't have virtual IRQs
762 // May be the same as external interrupts/IRQs
765 case V3_INVALID_INTR:
770 } else if ((v3_intr_pending(info)) && (vmx_info->pri_proc_ctrls.int_wndw_exit == 0)) {
771 // Enable INTR window exiting so we know when IF=1
774 check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
776 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
777 V3_Print("Enabling Interrupt-Window exiting: %d\n", instr_len);
780 vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
781 check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
790 static struct vmx_exit_info exit_log[10];
791 static uint64_t rip_log[10];
795 static void print_exit_log(struct guest_info * info) {
796 int cnt = info->num_exits % 10;
800 V3_Print("\nExit Log (%d total exits):\n", (uint32_t)info->num_exits);
802 for (i = 0; i < 10; i++) {
803 struct vmx_exit_info * tmp = &exit_log[cnt];
805 V3_Print("%d:\texit_reason = %p\n", i, (void *)(addr_t)tmp->exit_reason);
806 V3_Print("\texit_qual = %p\n", (void *)tmp->exit_qual);
807 V3_Print("\tint_info = %p\n", (void *)(addr_t)tmp->int_info);
808 V3_Print("\tint_err = %p\n", (void *)(addr_t)tmp->int_err);
809 V3_Print("\tinstr_info = %p\n", (void *)(addr_t)tmp->instr_info);
810 V3_Print("\tguest_linear_addr= %p\n", (void *)(addr_t)tmp->guest_linear_addr);
811 V3_Print("\tRIP = %p\n", (void *)rip_log[cnt]);
825 v3_vmx_schedule_timeout(struct guest_info * info)
827 struct vmx_data * vmx_state = (struct vmx_data *)(info->vmm_data);
831 /* Check if the hardware supports an active timeout */
832 #define VMX_ACTIVE_PREEMPT_TIMER_PIN 0x40
833 if (hw_info.pin_ctrls.req_mask & VMX_ACTIVE_PREEMPT_TIMER_PIN) {
834 /* The hardware doesn't support us modifying this pin control */
838 /* Check if we have one to schedule and schedule it if we do */
839 cycles = (sint64_t)info->time_state.next_timeout - (sint64_t)v3_get_guest_time(&info->time_state);
840 if (info->time_state.next_timeout == (ullong_t) -1) {
842 vmx_state->pin_ctrls.active_preempt_timer = 0;
843 } else if (cycles < 0) {
844 /* set the timeout to 0 to force an immediate re-exit since it expired between
845 * when we checked a timeout and now. IF SOMEONE CONTINAULLY SETS A SHORT TIMEOUT,
846 * THIS CAN LOCK US OUT OF THE GUEST! */
848 vmx_state->pin_ctrls.active_preempt_timer = 1;
850 /* The hardware supports scheduling a timeout, and we have one to
852 timeout = (uint32_t)cycles >> hw_info.misc_info.tsc_multiple;
853 vmx_state->pin_ctrls.active_preempt_timer = 1;
856 /* Actually program the timer based on the settings above. */
857 check_vmcs_write(VMCS_PREEMPT_TIMER, timeout);
858 check_vmcs_write(VMCS_PIN_CTRLS, vmx_state->pin_ctrls.value);
863 * CAUTION and DANGER!!!
865 * The VMCS CANNOT(!!) be accessed outside of the cli/sti calls inside this function
866 * When exectuing a symbiotic call, the VMCS WILL be overwritten, so any dependencies
867 * on its contents will cause things to break. The contents at the time of the exit WILL
868 * change before the exit handler is executed.
870 int v3_vmx_enter(struct guest_info * info) {
872 uint32_t tsc_offset_low, tsc_offset_high;
873 struct vmx_exit_info exit_info;
874 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
876 // Conditionally yield the CPU if the timeslice has expired
879 // Perform any additional yielding needed for time adjustment
880 v3_adjust_time(info);
882 // Check for timeout - since this calls generic hooks in devices
883 // that may do things like pause the VM, it cannot be with interrupts
885 v3_check_timeout(info);
887 // disable global interrupts for vm state transition
890 // Update timer devices late after being in the VM so that as much
891 // of the time in the VM is accounted for as possible. Also do it before
892 // updating IRQ entry state so that any interrupts the timers raise get
893 // handled on the next VM entry. Must be done with interrupts disabled.
894 v3_update_timers(info);
896 if (vmcs_store() != vmx_info->vmcs_ptr_phys) {
897 vmcs_clear(vmx_info->vmcs_ptr_phys);
898 vmcs_load(vmx_info->vmcs_ptr_phys);
899 vmx_info->state = VMX_UNLAUNCHED;
902 v3_vmx_restore_vmcs(info);
905 #ifdef V3_CONFIG_SYMCALL
906 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
907 update_irq_entry_state(info);
910 update_irq_entry_state(info);
915 vmcs_read(VMCS_GUEST_CR3, &guest_cr3);
916 vmcs_write(VMCS_GUEST_CR3, guest_cr3);
919 // Update vmx active preemption timer to exit at the next timeout if
920 // the hardware supports it.
921 v3_vmx_schedule_timeout(info);
923 // Perform last-minute time bookkeeping prior to entering the VM
924 v3_time_enter_vm(info);
926 tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
927 tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
928 check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
929 check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
931 if (v3_update_vmcs_host_state(info)) {
933 PrintError("Could not write host state\n");
938 if (vmx_info->state == VMX_UNLAUNCHED) {
939 vmx_info->state = VMX_LAUNCHED;
940 ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
942 V3_ASSERT(vmx_info->state != VMX_UNLAUNCHED);
943 ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
948 // PrintDebug("VMX Exit: ret=%d\n", ret);
950 if (ret != VMX_SUCCESS) {
952 vmcs_read(VMCS_INSTR_ERR, &error);
956 PrintError("VMENTRY Error: %d (launch_ret = %d)\n", error, ret);
962 // Immediate exit from VM time bookkeeping
963 v3_time_exit_vm(info);
967 /* Update guest state */
968 v3_vmx_save_vmcs(info);
970 // info->cpl = info->segments.cs.selector & 0x3;
972 info->mem_mode = v3_get_vm_mem_mode(info);
973 info->cpu_mode = v3_get_vm_cpu_mode(info);
976 check_vmcs_read(VMCS_EXIT_INSTR_LEN, &(exit_info.instr_len));
977 check_vmcs_read(VMCS_EXIT_INSTR_INFO, &(exit_info.instr_info));
978 check_vmcs_read(VMCS_EXIT_REASON, &(exit_info.exit_reason));
979 check_vmcs_read(VMCS_EXIT_QUAL, &(exit_info.exit_qual));
980 check_vmcs_read(VMCS_EXIT_INT_INFO, &(exit_info.int_info));
981 check_vmcs_read(VMCS_EXIT_INT_ERR, &(exit_info.int_err));
982 check_vmcs_read(VMCS_GUEST_LINEAR_ADDR, &(exit_info.guest_linear_addr));
984 if (info->shdw_pg_mode == NESTED_PAGING) {
985 check_vmcs_read(VMCS_GUEST_PHYS_ADDR, &(exit_info.ept_fault_addr));
988 //PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_info.exit_reason, exit_info.exit_qual);
990 exit_log[info->num_exits % 10] = exit_info;
991 rip_log[info->num_exits % 10] = get_addr_linear(info, info->rip, &(info->segments.cs));
993 #ifdef V3_CONFIG_SYMCALL
994 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
995 update_irq_exit_state(info);
998 update_irq_exit_state(info);
1001 if (exit_info.exit_reason == VMEXIT_INTR_WINDOW) {
1002 // This is a special case whose only job is to inject an interrupt
1003 vmcs_read(VMCS_PROC_CTRLS, &(vmx_info->pri_proc_ctrls.value));
1004 vmx_info->pri_proc_ctrls.int_wndw_exit = 0;
1005 vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
1007 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
1008 V3_Print("Interrupts available again! (RIP=%llx)\n", info->rip);
1012 // reenable global interrupts after vm exit
1015 // Conditionally yield the CPU if the timeslice has expired
1016 v3_yield_cond(info);
1018 if (v3_handle_vmx_exit(info, &exit_info) == -1) {
1019 PrintError("Error in VMX exit handler (Exit reason=%x)\n", exit_info.exit_reason);
1027 int v3_start_vmx_guest(struct guest_info * info) {
1029 PrintDebug("Starting VMX core %u\n", info->vcpu_id);
1031 if (info->vcpu_id == 0) {
1032 info->core_run_state = CORE_RUNNING;
1035 PrintDebug("VMX core %u: Waiting for core initialization\n", info->vcpu_id);
1037 while (info->core_run_state == CORE_STOPPED) {
1039 if (info->vm_info->run_state == VM_STOPPED) {
1040 // The VM was stopped before this core was initialized.
1045 //PrintDebug("VMX core %u: still waiting for INIT\n",info->vcpu_id);
1048 PrintDebug("VMX core %u initialized\n", info->vcpu_id);
1050 // We'll be paranoid about race conditions here
1051 v3_wait_at_barrier(info);
1055 PrintDebug("VMX core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
1056 info->vcpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
1057 info->segments.cs.limit, (void *)(info->rip));
1060 PrintDebug("VMX core %u: Launching VMX VM on logical core %u\n", info->vcpu_id, info->pcpu_id);
1062 v3_start_time(info);
1066 if (info->vm_info->run_state == VM_STOPPED) {
1067 info->core_run_state = CORE_STOPPED;
1071 if (v3_vmx_enter(info) == -1) {
1074 addr_t linear_addr = 0;
1076 info->vm_info->run_state = VM_ERROR;
1078 V3_Print("VMX core %u: VMX ERROR!!\n", info->vcpu_id);
1080 v3_print_guest_state(info);
1082 V3_Print("VMX core %u\n", info->vcpu_id);
1084 linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
1086 if (info->mem_mode == PHYSICAL_MEM) {
1087 v3_gpa_to_hva(info, linear_addr, &host_addr);
1088 } else if (info->mem_mode == VIRTUAL_MEM) {
1089 v3_gva_to_hva(info, linear_addr, &host_addr);
1092 V3_Print("VMX core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
1094 V3_Print("VMX core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
1095 v3_dump_mem((uint8_t *)host_addr, 15);
1097 v3_print_stack(info);
1101 print_exit_log(info);
1105 v3_wait_at_barrier(info);
1108 if (info->vm_info->run_state == VM_STOPPED) {
1109 info->core_run_state = CORE_STOPPED;
1113 if ((info->num_exits % 5000) == 0) {
1114 V3_Print("VMX Exit number %d\n", (uint32_t)info->num_exits);
1126 #define VMX_FEATURE_CONTROL_MSR 0x0000003a
1127 #define CPUID_VMX_FEATURES 0x00000005 /* LOCK and VMXON */
1128 #define CPUID_1_ECX_VTXFLAG 0x00000020
1130 int v3_is_vmx_capable() {
1131 v3_msr_t feature_msr;
1132 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1134 v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
1136 PrintDebug("ECX: 0x%x\n", ecx);
1138 if (ecx & CPUID_1_ECX_VTXFLAG) {
1139 v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
1141 PrintDebug("MSRREGlow: 0x%.8x\n", feature_msr.lo);
1143 if ((feature_msr.lo & CPUID_VMX_FEATURES) != CPUID_VMX_FEATURES) {
1144 PrintDebug("VMX is locked -- enable in the BIOS\n");
1149 PrintDebug("VMX not supported on this cpu\n");
1157 int v3_reset_vmx_vm_core(struct guest_info * core, addr_t rip) {
1160 if ((core->shdw_pg_mode == NESTED_PAGING) &&
1161 (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
1164 core->segments.cs.selector = rip << 8;
1165 core->segments.cs.limit = 0xffff;
1166 core->segments.cs.base = rip << 12;
1168 core->vm_regs.rdx = core->vcpu_id;
1169 core->vm_regs.rbx = rip;
1177 void v3_init_vmx_cpu(int cpu_id) {
1178 addr_t vmx_on_region = 0;
1179 extern v3_cpu_arch_t v3_mach_type;
1181 if (v3_mach_type == V3_INVALID_CPU) {
1182 if (v3_init_vmx_hw(&hw_info) == -1) {
1183 PrintError("Could not initialize VMX hardware features on cpu %d\n", cpu_id);
1191 // Setup VMXON Region
1192 vmx_on_region = allocate_vmcs();
1195 if (vmx_on(vmx_on_region) == VMX_SUCCESS) {
1196 V3_Print("VMX Enabled\n");
1197 host_vmcs_ptrs[cpu_id] = vmx_on_region;
1199 V3_Print("VMX already enabled\n");
1200 V3_FreePages((void *)vmx_on_region, 1);
1203 PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);
1206 struct vmx_sec_proc_ctrls sec_proc_ctrls;
1207 sec_proc_ctrls.value = v3_vmx_get_ctrl_features(&(hw_info.sec_proc_ctrls));
1209 if (sec_proc_ctrls.enable_ept == 0) {
1210 V3_Print("VMX EPT (Nested) Paging not supported\n");
1211 v3_cpu_types[cpu_id] = V3_VMX_CPU;
1212 } else if (sec_proc_ctrls.unrstrct_guest == 0) {
1213 V3_Print("VMX EPT (Nested) Paging supported\n");
1214 v3_cpu_types[cpu_id] = V3_VMX_EPT_CPU;
1216 V3_Print("VMX EPT (Nested) Paging + Unrestricted guest supported\n");
1217 v3_cpu_types[cpu_id] = V3_VMX_EPT_UG_CPU;
1224 void v3_deinit_vmx_cpu(int cpu_id) {
1225 extern v3_cpu_arch_t v3_cpu_types[];
1226 v3_cpu_types[cpu_id] = V3_INVALID_CPU;
1228 if (host_vmcs_ptrs[cpu_id] != 0) {
1229 V3_Print("Disabling VMX\n");
1231 if (vmx_off() != VMX_SUCCESS) {
1232 PrintError("Error executing VMXOFF\n");
1235 V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1);
1237 host_vmcs_ptrs[cpu_id] = 0;