2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2011, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmx.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_handler.h>
24 #include <palacios/vmcs.h>
25 #include <palacios/vmx_lowlevel.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_ctrl_regs.h>
28 #include <palacios/vmm_config.h>
29 #include <palacios/vmm_time.h>
30 #include <palacios/vm_guest_mem.h>
31 #include <palacios/vmm_direct_paging.h>
32 #include <palacios/vmx_io.h>
33 #include <palacios/vmx_msr.h>
35 #include <palacios/vmx_ept.h>
36 #include <palacios/vmx_assist.h>
37 #include <palacios/vmx_hw_info.h>
39 #ifndef V3_CONFIG_DEBUG_VMX
41 #define PrintDebug(fmt, args...)
45 /* These fields contain the hardware feature sets supported by the local CPU */
46 static struct vmx_hw_info hw_info;
48 extern v3_cpu_arch_t v3_cpu_types[];
50 static addr_t host_vmcs_ptrs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
52 extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
53 extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
55 static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
58 ret = vmcs_write(field, val);
60 if (ret != VMX_SUCCESS) {
61 PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
68 static int inline check_vmcs_read(vmcs_field_t field, void * val) {
71 ret = vmcs_read(field, val);
73 if (ret != VMX_SUCCESS) {
74 PrintError("VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
83 static addr_t allocate_vmcs() {
84 struct vmcs_data * vmcs_page = NULL;
86 PrintDebug("Allocating page\n");
88 vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
89 memset(vmcs_page, 0, 4096);
91 vmcs_page->revision = hw_info.basic_info.revision;
92 PrintDebug("VMX Revision: 0x%x\n", vmcs_page->revision);
94 return (addr_t)V3_PAddr((void *)vmcs_page);
100 static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) {
103 // disable global interrupts for vm state initialization
106 PrintDebug("Loading VMCS\n");
107 vmx_ret = vmcs_load(vmx_state->vmcs_ptr_phys);
108 vmx_state->state = VMX_UNLAUNCHED;
110 if (vmx_ret != VMX_SUCCESS) {
111 PrintError("VMPTRLD failed\n");
116 /*** Setup default state from HW ***/
118 vmx_state->pin_ctrls.value = hw_info.pin_ctrls.def_val;
119 vmx_state->pri_proc_ctrls.value = hw_info.proc_ctrls.def_val;
120 vmx_state->exit_ctrls.value = hw_info.exit_ctrls.def_val;
121 vmx_state->entry_ctrls.value = hw_info.entry_ctrls.def_val;
122 vmx_state->sec_proc_ctrls.value = hw_info.sec_proc_ctrls.def_val;
124 /* Print Control MSRs */
125 PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)hw_info.cr0.value);
126 PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)hw_info.cr4.value);
130 /******* Setup Host State **********/
132 /* Cache GDTR, IDTR, and TR in host struct */
137 } __attribute__((packed)) tmp_seg;
140 __asm__ __volatile__(
146 gdtr_base = tmp_seg.base;
147 vmx_state->host_state.gdtr.base = gdtr_base;
149 __asm__ __volatile__(
155 vmx_state->host_state.idtr.base = tmp_seg.base;
157 __asm__ __volatile__(
163 vmx_state->host_state.tr.selector = tmp_seg.selector;
165 /* The GDTR *index* is bits 3-15 of the selector. */
166 struct tss_descriptor * desc = NULL;
167 desc = (struct tss_descriptor *)(gdtr_base + (8 * (tmp_seg.selector >> 3)));
169 tmp_seg.base = ((desc->base1) |
170 (desc->base2 << 16) |
171 (desc->base3 << 24) |
173 ((uint64_t)desc->base4 << 32)
179 vmx_state->host_state.tr.base = tmp_seg.base;
182 /********** Setup VMX Control Fields ***********/
184 /* Add external interrupts, NMI exiting, and virtual NMI */
185 vmx_state->pin_ctrls.nmi_exit = 1;
186 vmx_state->pin_ctrls.ext_int_exit = 1;
189 vmx_state->pri_proc_ctrls.hlt_exit = 1;
192 vmx_state->pri_proc_ctrls.pause_exit = 0;
193 vmx_state->pri_proc_ctrls.tsc_offset = 1;
194 #ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
195 vmx_state->pri_proc_ctrls.rdtsc_exit = 1;
199 vmx_state->pri_proc_ctrls.use_io_bitmap = 1;
200 vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(core->vm_info->io_map.arch_data));
201 vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR,
202 (addr_t)V3_PAddr(core->vm_info->io_map.arch_data) + PAGE_SIZE_4KB);
205 vmx_state->pri_proc_ctrls.use_msr_bitmap = 1;
206 vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data));
215 vmx_state->exit_ctrls.host_64_on = 1;
219 /* Not sure how exactly to handle this... */
220 v3_hook_msr(core->vm_info, EFER_MSR,
221 &v3_handle_efer_read,
222 &v3_handle_efer_write,
226 vmx_state->entry_ctrls.ld_efer = 1;
227 vmx_state->exit_ctrls.ld_efer = 1;
228 vmx_state->exit_ctrls.save_efer = 1;
231 vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
235 if (core->shdw_pg_mode == SHADOW_PAGING) {
236 PrintDebug("Creating initial shadow page table\n");
238 if (v3_init_passthrough_pts(core) == -1) {
239 PrintError("Could not initialize passthrough page tables\n");
243 #define CR0_PE 0x00000001
244 #define CR0_PG 0x80000000
245 #define CR0_WP 0x00010000 // To ensure mem hooks work
246 vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
248 core->ctrl_regs.cr3 = core->direct_map_pt;
250 // vmx_state->pinbased_ctrls |= NMI_EXIT;
253 vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
254 vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
256 vmx_state->pri_proc_ctrls.invlpg_exit = 1;
258 /* Add page fault exits */
259 vmx_state->excp_bmap.pf = 1;
262 v3_vmxassist_init(core, vmx_state);
264 } else if ((core->shdw_pg_mode == NESTED_PAGING) &&
265 (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_CPU)) {
267 #define CR0_PE 0x00000001
268 #define CR0_PG 0x80000000
269 #define CR0_WP 0x00010000 // To ensure mem hooks work
270 vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
272 // vmx_state->pinbased_ctrls |= NMI_EXIT;
274 /* Disable CR exits */
275 vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
276 vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
278 vmx_state->pri_proc_ctrls.invlpg_exit = 0;
280 /* Add page fault exits */
281 // vmx_state->excp_bmap.pf = 1; // This should never happen..., enabled to catch bugs
284 v3_vmxassist_init(core, vmx_state);
287 vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
288 vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
292 if (v3_init_ept(core, &hw_info) == -1) {
293 PrintError("Error initializing EPT\n");
297 } else if ((core->shdw_pg_mode == NESTED_PAGING) &&
298 (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
300 // For now we will assume that unrestricted guest mode is assured w/ EPT
303 core->vm_regs.rsp = 0x00;
305 core->vm_regs.rdx = 0x00000f00;
306 core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
307 core->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
310 core->segments.cs.selector = 0xf000;
311 core->segments.cs.limit = 0xffff;
312 core->segments.cs.base = 0x0000000f0000LL;
314 // (raw attributes = 0xf3)
315 core->segments.cs.type = 0xb;
316 core->segments.cs.system = 0x1;
317 core->segments.cs.dpl = 0x0;
318 core->segments.cs.present = 1;
322 struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds),
323 &(core->segments.es), &(core->segments.fs),
324 &(core->segments.gs), NULL};
326 for ( i = 0; segregs[i] != NULL; i++) {
327 struct v3_segment * seg = segregs[i];
329 seg->selector = 0x0000;
330 // seg->base = seg->selector << 4;
331 seg->base = 0x00000000;
339 // seg->granularity = 1;
344 core->segments.gdtr.limit = 0x0000ffff;
345 core->segments.gdtr.base = 0x0000000000000000LL;
347 core->segments.idtr.limit = 0x0000ffff;
348 core->segments.idtr.base = 0x0000000000000000LL;
350 core->segments.ldtr.selector = 0x0000;
351 core->segments.ldtr.limit = 0x0000ffff;
352 core->segments.ldtr.base = 0x0000000000000000LL;
353 core->segments.ldtr.type = 2;
354 core->segments.ldtr.present = 1;
356 core->segments.tr.selector = 0x0000;
357 core->segments.tr.limit = 0x0000ffff;
358 core->segments.tr.base = 0x0000000000000000LL;
359 core->segments.tr.type = 0xb;
360 core->segments.tr.present = 1;
362 // core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
363 core->dbg_regs.dr7 = 0x0000000000000400LL;
366 vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
367 vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
368 vmx_state->sec_proc_ctrls.unrstrct_guest = 1; // enable unrestricted guest operation
371 /* Disable shadow paging stuff */
372 vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
373 vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
375 vmx_state->pri_proc_ctrls.invlpg_exit = 0;
378 if (v3_init_ept(core, &hw_info) == -1) {
379 PrintError("Error initializing EPT\n");
384 PrintError("Invalid Virtual paging mode\n");
391 // Setup SYSCALL/SYSENTER MSRs in load/store area
393 // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
395 #define IA32_STAR 0xc0000081
396 #define IA32_LSTAR 0xc0000082
397 #define IA32_FMASK 0xc0000084
398 #define IA32_KERN_GS_BASE 0xc0000102
400 #define IA32_CSTAR 0xc0000083 // Compatibility mode STAR (ignored for now... hopefully its not that important...)
404 struct vmcs_msr_entry * exit_store_msrs = NULL;
405 struct vmcs_msr_entry * exit_load_msrs = NULL;
406 struct vmcs_msr_entry * entry_load_msrs = NULL;;
407 int max_msrs = (hw_info.misc_info.max_msr_cache_size + 1) * 4;
409 V3_Print("Setting up MSR load/store areas (max_msr_count=%d)\n", max_msrs);
412 PrintError("Max MSR cache size is too small (%d)\n", max_msrs);
416 vmx_state->msr_area = V3_VAddr(V3_AllocPages(1));
418 if (vmx_state->msr_area == NULL) {
419 PrintError("could not allocate msr load/store area\n");
423 msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_CNT, 4);
424 msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_CNT, 4);
425 msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_CNT, 4);
428 exit_store_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area);
429 exit_load_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area + (sizeof(struct vmcs_msr_entry) * 4));
430 entry_load_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area + (sizeof(struct vmcs_msr_entry) * 8));
433 exit_store_msrs[0].index = IA32_STAR;
434 exit_store_msrs[1].index = IA32_LSTAR;
435 exit_store_msrs[2].index = IA32_FMASK;
436 exit_store_msrs[3].index = IA32_KERN_GS_BASE;
438 memcpy(exit_store_msrs, exit_load_msrs, sizeof(struct vmcs_msr_entry) * 4);
439 memcpy(exit_store_msrs, entry_load_msrs, sizeof(struct vmcs_msr_entry) * 4);
442 v3_get_msr(IA32_STAR, &(exit_load_msrs[0].hi), &(exit_load_msrs[0].lo));
443 v3_get_msr(IA32_LSTAR, &(exit_load_msrs[1].hi), &(exit_load_msrs[1].lo));
444 v3_get_msr(IA32_FMASK, &(exit_load_msrs[2].hi), &(exit_load_msrs[2].lo));
445 v3_get_msr(IA32_KERN_GS_BASE, &(exit_load_msrs[3].hi), &(exit_load_msrs[3].lo));
447 msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_ADDR, (addr_t)V3_PAddr(exit_store_msrs));
448 msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_ADDR, (addr_t)V3_PAddr(exit_load_msrs));
449 msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_ADDR, (addr_t)V3_PAddr(entry_load_msrs));
453 /* Sanity check ctrl/reg fields against hw_defaults */
458 /*** Write all the info to the VMCS ***/
462 // IS THIS NECESSARY???
463 #define DEBUGCTL_MSR 0x1d9
464 struct v3_msr tmp_msr;
465 v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
466 vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
467 core->dbg_regs.dr7 = 0x400;
472 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
474 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffUL);
475 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR_HIGH, (addr_t)0xffffffffUL);
481 if (v3_update_vmcs_ctrl_fields(core)) {
482 PrintError("Could not write control fields!\n");
486 if (v3_update_vmcs_host_state(core)) {
487 PrintError("Could not write host state\n");
491 // reenable global interrupts for vm state initialization now
492 // that the vm state is initialized. If another VM kicks us off,
493 // it'll update our vmx state so that we know to reload ourself
499 int v3_init_vmx_vmcs(struct guest_info * core, v3_vm_class_t vm_class) {
500 struct vmx_data * vmx_state = NULL;
503 vmx_state = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
504 memset(vmx_state, 0, sizeof(struct vmx_data));
506 PrintDebug("vmx_data pointer: %p\n", (void *)vmx_state);
508 PrintDebug("Allocating VMCS\n");
509 vmx_state->vmcs_ptr_phys = allocate_vmcs();
511 PrintDebug("VMCS pointer: %p\n", (void *)(vmx_state->vmcs_ptr_phys));
513 core->vmm_data = vmx_state;
514 vmx_state->state = VMX_UNLAUNCHED;
516 PrintDebug("Initializing VMCS (addr=%p)\n", core->vmm_data);
518 // TODO: Fix vmcs fields so they're 32-bit
520 PrintDebug("Clearing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
521 vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
523 if (vmx_ret != VMX_SUCCESS) {
524 PrintError("VMCLEAR failed\n");
528 if (vm_class == V3_PC_VM) {
529 PrintDebug("Initializing VMCS\n");
530 if (init_vmcs_bios(core, vmx_state) == -1) {
531 PrintError("Error initializing VMCS to BIOS state\n");
535 PrintError("Invalid VM Class\n");
539 PrintDebug("Serializing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
540 vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
546 int v3_deinit_vmx_vmcs(struct guest_info * core) {
547 struct vmx_data * vmx_state = core->vmm_data;
549 V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1);
550 V3_FreePages(vmx_state->msr_area, 1);
558 static int update_irq_exit_state(struct guest_info * info) {
559 struct vmx_exit_idt_vec_info idt_vec_info;
561 check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
563 if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
564 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
565 V3_Print("Calling v3_injecting_intr\n");
567 info->intr_core_state.irq_started = 0;
568 v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
574 static int update_irq_entry_state(struct guest_info * info) {
575 struct vmx_exit_idt_vec_info idt_vec_info;
576 struct vmcs_interrupt_state intr_core_state;
577 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
579 check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
580 check_vmcs_read(VMCS_GUEST_INT_STATE, &(intr_core_state));
582 /* Check for pending exceptions to inject */
583 if (v3_excp_pending(info)) {
584 struct vmx_entry_int_info int_info;
587 // In VMX, almost every exception is hardware
588 // Software exceptions are pretty much only for breakpoint or overflow
590 int_info.vector = v3_get_excp_number(info);
592 if (info->excp_state.excp_error_code_valid) {
593 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
594 int_info.error_code = 1;
596 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
597 V3_Print("Injecting exception %d with error code %x\n",
598 int_info.vector, info->excp_state.excp_error_code);
603 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
604 V3_Print("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
606 check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
608 v3_injecting_excp(info, int_info.vector);
610 } else if ((((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) &&
611 (intr_core_state.val == 0)) {
613 if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
615 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
616 V3_Print("IRQ pending from previous injection\n");
619 // Copy the IDT vectoring info over to reinject the old interrupt
620 if (idt_vec_info.error_code == 1) {
621 uint32_t err_code = 0;
623 check_vmcs_read(VMCS_IDT_VECTOR_ERR, &err_code);
624 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, err_code);
627 idt_vec_info.undef = 0;
628 check_vmcs_write(VMCS_ENTRY_INT_INFO, idt_vec_info.value);
631 struct vmx_entry_int_info ent_int;
634 switch (v3_intr_pending(info)) {
635 case V3_EXTERNAL_IRQ: {
636 info->intr_core_state.irq_vector = v3_get_intr(info);
637 ent_int.vector = info->intr_core_state.irq_vector;
639 ent_int.error_code = 0;
642 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
643 V3_Print("Injecting Interrupt %d at exit %u(EIP=%p)\n",
644 info->intr_core_state.irq_vector,
645 (uint32_t)info->num_exits,
646 (void *)(addr_t)info->rip);
649 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
650 info->intr_core_state.irq_started = 1;
655 PrintDebug("Injecting NMI\n");
660 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
663 case V3_SOFTWARE_INTR:
664 PrintDebug("Injecting software interrupt\n");
668 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
672 // Not sure what to do here, Intel doesn't have virtual IRQs
673 // May be the same as external interrupts/IRQs
676 case V3_INVALID_INTR:
681 } else if ((v3_intr_pending(info)) && (vmx_info->pri_proc_ctrls.int_wndw_exit == 0)) {
682 // Enable INTR window exiting so we know when IF=1
685 check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
687 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
688 V3_Print("Enabling Interrupt-Window exiting: %d\n", instr_len);
691 vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
692 check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
701 static struct vmx_exit_info exit_log[10];
703 static void print_exit_log(struct guest_info * info) {
704 int cnt = info->num_exits % 10;
708 V3_Print("\nExit Log (%d total exits):\n", (uint32_t)info->num_exits);
710 for (i = 0; i < 10; i++) {
711 struct vmx_exit_info * tmp = &exit_log[cnt];
713 V3_Print("%d:\texit_reason = %p\n", i, (void *)(addr_t)tmp->exit_reason);
714 V3_Print("\texit_qual = %p\n", (void *)tmp->exit_qual);
715 V3_Print("\tint_info = %p\n", (void *)(addr_t)tmp->int_info);
716 V3_Print("\tint_err = %p\n", (void *)(addr_t)tmp->int_err);
717 V3_Print("\tinstr_info = %p\n", (void *)(addr_t)tmp->instr_info);
730 * CAUTION and DANGER!!!
732 * The VMCS CANNOT(!!) be accessed outside of the cli/sti calls inside this function
733 * When exectuing a symbiotic call, the VMCS WILL be overwritten, so any dependencies
734 * on its contents will cause things to break. The contents at the time of the exit WILL
735 * change before the exit handler is executed.
737 int v3_vmx_enter(struct guest_info * info) {
739 uint32_t tsc_offset_low, tsc_offset_high;
740 struct vmx_exit_info exit_info;
741 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
743 // Conditionally yield the CPU if the timeslice has expired
746 // Perform any additional yielding needed for time adjustment
747 v3_adjust_time(info);
749 // disable global interrupts for vm state transition
752 // Update timer devices late after being in the VM so that as much
753 // of hte time in the VM is accounted for as possible. Also do it before
754 // updating IRQ entry state so that any interrupts the timers raise get
755 // handled on the next VM entry. Must be done with interrupts disabled.
756 v3_update_timers(info);
758 if (vmcs_store() != vmx_info->vmcs_ptr_phys) {
759 vmcs_load(vmx_info->vmcs_ptr_phys);
762 v3_vmx_restore_vmcs(info);
765 #ifdef V3_CONFIG_SYMCALL
766 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
767 update_irq_entry_state(info);
770 update_irq_entry_state(info);
775 vmcs_read(VMCS_GUEST_CR3, &guest_cr3);
776 vmcs_write(VMCS_GUEST_CR3, guest_cr3);
779 // Perform last-minute time bookkeeping prior to entering the VM
780 v3_time_enter_vm(info);
782 tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
783 tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
784 check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
785 check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
787 if (v3_update_vmcs_host_state(info)) {
789 PrintError("Could not write host state\n");
794 if (vmx_info->state == VMX_UNLAUNCHED) {
795 vmx_info->state = VMX_LAUNCHED;
796 info->vm_info->run_state = VM_RUNNING;
797 ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
799 V3_ASSERT(vmx_info->state != VMX_UNLAUNCHED);
800 ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
803 // PrintDebug("VMX Exit: ret=%d\n", ret);
805 if (ret != VMX_SUCCESS) {
808 vmcs_read(VMCS_INSTR_ERR, &error);
812 PrintError("VMENTRY Error: %d\n", error);
816 // Immediate exit from VM time bookkeeping
817 v3_time_exit_vm(info);
821 /* Update guest state */
822 v3_vmx_save_vmcs(info);
824 // info->cpl = info->segments.cs.selector & 0x3;
826 info->mem_mode = v3_get_vm_mem_mode(info);
827 info->cpu_mode = v3_get_vm_cpu_mode(info);
830 check_vmcs_read(VMCS_EXIT_INSTR_LEN, &(exit_info.instr_len));
831 check_vmcs_read(VMCS_EXIT_INSTR_INFO, &(exit_info.instr_info));
832 check_vmcs_read(VMCS_EXIT_REASON, &(exit_info.exit_reason));
833 check_vmcs_read(VMCS_EXIT_QUAL, &(exit_info.exit_qual));
834 check_vmcs_read(VMCS_EXIT_INT_INFO, &(exit_info.int_info));
835 check_vmcs_read(VMCS_EXIT_INT_ERR, &(exit_info.int_err));
836 check_vmcs_read(VMCS_GUEST_LINEAR_ADDR, &(exit_info.guest_linear_addr));
838 if (info->shdw_pg_mode == NESTED_PAGING) {
839 check_vmcs_read(VMCS_GUEST_PHYS_ADDR, &(exit_info.ept_fault_addr));
842 //PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_info.exit_reason, exit_info.exit_qual);
844 exit_log[info->num_exits % 10] = exit_info;
846 #ifdef V3_CONFIG_SYMCALL
847 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
848 update_irq_exit_state(info);
851 update_irq_exit_state(info);
854 if (exit_info.exit_reason == VMEXIT_INTR_WINDOW) {
855 // This is a special case whose only job is to inject an interrupt
856 vmcs_read(VMCS_PROC_CTRLS, &(vmx_info->pri_proc_ctrls.value));
857 vmx_info->pri_proc_ctrls.int_wndw_exit = 0;
858 vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
860 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
861 V3_Print("Interrupts available again! (RIP=%llx)\n", info->rip);
865 // reenable global interrupts after vm exit
868 // Conditionally yield the CPU if the timeslice has expired
871 if (v3_handle_vmx_exit(info, &exit_info) == -1) {
872 PrintError("Error in VMX exit handler\n");
880 int v3_start_vmx_guest(struct guest_info * info) {
882 PrintDebug("Starting VMX core %u\n", info->vcpu_id);
884 if (info->vcpu_id == 0) {
885 info->core_run_state = CORE_RUNNING;
886 info->vm_info->run_state = VM_RUNNING;
889 PrintDebug("VMX core %u: Waiting for core initialization\n", info->vcpu_id);
891 while (info->core_run_state == CORE_STOPPED) {
893 //PrintDebug("VMX core %u: still waiting for INIT\n",info->vcpu_id);
896 PrintDebug("VMX core %u initialized\n", info->vcpu_id);
900 PrintDebug("VMX core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
901 info->vcpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
902 info->segments.cs.limit, (void *)(info->rip));
905 PrintDebug("VMX core %u: Launching VMX VM on logical core %u\n", info->vcpu_id, info->pcpu_id);
911 if (info->vm_info->run_state == VM_STOPPED) {
912 info->core_run_state = CORE_STOPPED;
916 if (v3_vmx_enter(info) == -1) {
918 print_exit_log(info);
924 if (info->vm_info->run_state == VM_STOPPED) {
925 info->core_run_state = CORE_STOPPED;
929 if ((info->num_exits % 5000) == 0) {
930 V3_Print("VMX Exit number %d\n", (uint32_t)info->num_exits);
942 #define VMX_FEATURE_CONTROL_MSR 0x0000003a
943 #define CPUID_VMX_FEATURES 0x00000005 /* LOCK and VMXON */
944 #define CPUID_1_ECX_VTXFLAG 0x00000020
946 int v3_is_vmx_capable() {
947 v3_msr_t feature_msr;
948 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
950 v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
952 PrintDebug("ECX: 0x%x\n", ecx);
954 if (ecx & CPUID_1_ECX_VTXFLAG) {
955 v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
957 PrintDebug("MSRREGlow: 0x%.8x\n", feature_msr.lo);
959 if ((feature_msr.lo & CPUID_VMX_FEATURES) != CPUID_VMX_FEATURES) {
960 PrintDebug("VMX is locked -- enable in the BIOS\n");
965 PrintDebug("VMX not supported on this cpu\n");
973 int v3_reset_vmx_vm_core(struct guest_info * core, addr_t rip) {
976 if ((core->shdw_pg_mode == NESTED_PAGING) &&
977 (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
980 core->segments.cs.selector = rip << 8;
981 core->segments.cs.limit = 0xffff;
982 core->segments.cs.base = rip << 12;
984 core->vm_regs.rdx = core->vcpu_id;
985 core->vm_regs.rbx = rip;
993 void v3_init_vmx_cpu(int cpu_id) {
996 if (v3_init_vmx_hw(&hw_info) == -1) {
997 PrintError("Could not initialize VMX hardware features on cpu %d\n", cpu_id);
1005 // Setup VMXON Region
1006 host_vmcs_ptrs[cpu_id] = allocate_vmcs();
1008 PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);
1010 if (vmx_on(host_vmcs_ptrs[cpu_id]) == VMX_SUCCESS) {
1011 V3_Print("VMX Enabled\n");
1013 PrintError("VMX initialization failure\n");
1019 struct vmx_sec_proc_ctrls sec_proc_ctrls;
1020 sec_proc_ctrls.value = v3_vmx_get_ctrl_features(&(hw_info.sec_proc_ctrls));
1022 if (sec_proc_ctrls.enable_ept == 0) {
1023 V3_Print("VMX EPT (Nested) Paging not supported\n");
1024 v3_cpu_types[cpu_id] = V3_VMX_CPU;
1025 } else if (sec_proc_ctrls.unrstrct_guest == 0) {
1026 V3_Print("VMX EPT (Nested) Paging supported\n");
1027 v3_cpu_types[cpu_id] = V3_VMX_EPT_CPU;
1029 V3_Print("VMX EPT (Nested) Paging + Unrestricted guest supported\n");
1030 v3_cpu_types[cpu_id] = V3_VMX_EPT_UG_CPU;
1036 void v3_deinit_vmx_cpu(int cpu_id) {
1037 extern v3_cpu_arch_t v3_cpu_types[];
1038 v3_cpu_types[cpu_id] = V3_INVALID_CPU;
1039 V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1);