2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Peter Dinda <pdinda@northwestern.edu>
11 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
12 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
13 * All rights reserved.
15 * Author: Peter Dinda <pdinda@northwestern.edu>
16 * Jack Lange <jarusl@cs.northwestern.edu>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 #include <palacios/vmx.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vmx_handler.h>
26 #include <palacios/vmcs.h>
27 #include <palacios/vmx_lowlevel.h>
28 #include <palacios/vmm_lowlevel.h>
29 #include <palacios/vmm_ctrl_regs.h>
30 #include <palacios/vmm_config.h>
31 #include <palacios/vmm_time.h>
32 #include <palacios/vm_guest_mem.h>
33 #include <palacios/vmm_direct_paging.h>
34 #include <palacios/vmx_io.h>
35 #include <palacios/vmx_msr.h>
38 #ifndef CONFIG_DEBUG_VMX
40 #define PrintDebug(fmt, args...)
44 static addr_t host_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
45 static addr_t active_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
47 extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
48 extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
50 static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
53 ret = vmcs_write(field,val);
55 if (ret != VMX_SUCCESS) {
56 PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
63 static int inline check_vmcs_read(vmcs_field_t field, void * val) {
66 ret = vmcs_read(field, val);
68 if (ret != VMX_SUCCESS) {
69 PrintError("VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
76 // For the 32 bit reserved bit fields
77 // MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
78 static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
81 PrintDebug("sanitize_bits1 (MSR:%x)\n", msr_num);
83 v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo);
85 PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
95 static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
97 addr_t msr0_val, msr1_val;
99 PrintDebug("sanitize_bits2 (MSR0=%x, MSR1=%x)\n", msr_num0, msr_num1);
101 v3_get_msr(msr_num0, &msr0.hi, &msr0.lo);
102 v3_get_msr(msr_num1, &msr1.hi, &msr1.lo);
104 // This generates a mask that is the natural bit width of the CPU
105 msr0_val = msr0.value;
106 msr1_val = msr1.value;
108 PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
121 static addr_t allocate_vmcs() {
123 struct vmcs_data * vmcs_page = NULL;
125 PrintDebug("Allocating page\n");
127 vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
128 memset(vmcs_page, 0, 4096);
130 v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
132 vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision;
133 PrintDebug("VMX Revision: 0x%x\n",vmcs_page->revision);
135 return (addr_t)V3_PAddr((void *)vmcs_page);
141 static int init_vmcs_bios(struct guest_info * info, struct vmx_data * vmx_state) {
143 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
145 // disable global interrupts for vm state initialization
148 PrintDebug("Loading VMCS\n");
149 vmx_ret = vmcs_load(vmx_state->vmcs_ptr_phys);
150 active_vmcs_ptrs[V3_Get_CPU()] = vmx_info->vmcs_ptr_phys;
151 vmx_state->state = VMX_UNLAUNCHED;
153 if (vmx_ret != VMX_SUCCESS) {
154 PrintError("VMPTRLD failed\n");
160 /******* Setup Host State **********/
162 /* Cache GDTR, IDTR, and TR in host struct */
167 } __attribute__((packed)) tmp_seg;
170 __asm__ __volatile__(
176 gdtr_base = tmp_seg.base;
177 vmx_state->host_state.gdtr.base = gdtr_base;
179 __asm__ __volatile__(
185 vmx_state->host_state.idtr.base = tmp_seg.base;
187 __asm__ __volatile__(
193 vmx_state->host_state.tr.selector = tmp_seg.selector;
195 /* The GDTR *index* is bits 3-15 of the selector. */
196 struct tss_descriptor * desc = NULL;
197 desc = (struct tss_descriptor *)(gdtr_base + (8 * (tmp_seg.selector >> 3)));
199 tmp_seg.base = ((desc->base1) |
200 (desc->base2 << 16) |
201 (desc->base3 << 24) |
203 ((uint64_t)desc->base4 << 32)
209 vmx_state->host_state.tr.base = tmp_seg.base;
213 /********** Setup and VMX Control Fields from MSR ***********/
217 struct v3_msr tmp_msr;
219 v3_get_msr(VMX_PINBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
221 /* Add external interrupts, NMI exiting, and virtual NMI */
222 vmx_state->pin_ctrls.value = tmp_msr.lo;
223 vmx_state->pin_ctrls.nmi_exit = 1;
224 vmx_state->pin_ctrls.ext_int_exit = 1;
226 v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
228 vmx_state->pri_proc_ctrls.value = tmp_msr.lo;
229 vmx_state->pri_proc_ctrls.use_io_bitmap = 1;
230 vmx_state->pri_proc_ctrls.hlt_exit = 1;
231 vmx_state->pri_proc_ctrls.invlpg_exit = 1;
232 vmx_state->pri_proc_ctrls.use_msr_bitmap = 1;
233 vmx_state->pri_proc_ctrls.pause_exit = 1;
234 vmx_state->pri_proc_ctrls.tsc_offset = 1;
235 #ifdef CONFIG_TIME_VIRTUALIZE_TSC
236 vmx_state->pri_proc_ctrls.rdtsc_exit = 1;
239 vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(info->vm_info->io_map.arch_data));
240 vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR,
241 (addr_t)V3_PAddr(info->vm_info->io_map.arch_data) + PAGE_SIZE_4KB);
244 vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(info->vm_info->msr_map.arch_data));
246 v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
247 vmx_state->exit_ctrls.value = tmp_msr.lo;
248 vmx_state->exit_ctrls.host_64_on = 1;
250 if ((vmx_state->exit_ctrls.save_efer == 1) || (vmx_state->exit_ctrls.ld_efer == 1)) {
251 vmx_state->ia32e_avail = 1;
254 v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
255 vmx_state->entry_ctrls.value = tmp_msr.lo;
258 struct vmx_exception_bitmap excp_bmap;
263 vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, excp_bmap.value);
265 /******* Setup VMXAssist guest state ***********/
268 info->vm_regs.rsp = 0x80000;
270 struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
273 /* Print Control MSRs */
274 v3_get_msr(VMX_CR0_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
275 PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)tmp_msr.value);
277 v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
278 PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)tmp_msr.value);
281 #define GUEST_CR0 0x80000031
282 #define GUEST_CR4 0x00002000
283 info->ctrl_regs.cr0 = GUEST_CR0;
284 info->ctrl_regs.cr4 = GUEST_CR4;
286 ((struct cr0_32 *)&(info->shdw_pg_state.guest_cr0))->pe = 1;
289 if (info->shdw_pg_mode == SHADOW_PAGING) {
290 PrintDebug("Creating initial shadow page table\n");
292 if (v3_init_passthrough_pts(info) == -1) {
293 PrintError("Could not initialize passthrough page tables\n");
297 #define CR0_PE 0x00000001
298 #define CR0_PG 0x80000000
301 vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG) );
302 vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
304 info->ctrl_regs.cr3 = info->direct_map_pt;
306 // vmx_state->pinbased_ctrls |= NMI_EXIT;
309 vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
310 vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
313 // Setup segment registers
315 struct v3_segment * seg_reg = (struct v3_segment *)&(info->segments);
319 for (i = 0; i < 10; i++) {
320 seg_reg[i].selector = 3 << 3;
321 seg_reg[i].limit = 0xffff;
322 seg_reg[i].base = 0x0;
325 info->segments.cs.selector = 2<<3;
327 /* Set only the segment registers */
328 for (i = 0; i < 6; i++) {
329 seg_reg[i].limit = 0xfffff;
330 seg_reg[i].granularity = 1;
332 seg_reg[i].system = 1;
334 seg_reg[i].present = 1;
338 info->segments.cs.type = 0xb;
340 info->segments.ldtr.selector = 0x20;
341 info->segments.ldtr.type = 2;
342 info->segments.ldtr.system = 0;
343 info->segments.ldtr.present = 1;
344 info->segments.ldtr.granularity = 0;
347 /************* Map in GDT and vmxassist *************/
349 uint64_t gdt[] __attribute__ ((aligned(32))) = {
350 0x0000000000000000ULL, /* 0x00: reserved */
351 0x0000830000000000ULL, /* 0x08: 32-bit TSS */
352 //0x0000890000000000ULL, /* 0x08: 32-bit TSS */
353 0x00CF9b000000FFFFULL, /* 0x10: CS 32-bit */
354 0x00CF93000000FFFFULL, /* 0x18: DS 32-bit */
355 0x000082000000FFFFULL, /* 0x20: LDTR 32-bit */
358 #define VMXASSIST_GDT 0x10000
359 addr_t vmxassist_gdt = 0;
361 if (v3_gpa_to_hva(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
362 PrintError("Could not find VMXASSIST GDT destination\n");
366 memcpy((void *)vmxassist_gdt, gdt, sizeof(uint64_t) * 5);
368 info->segments.gdtr.base = VMXASSIST_GDT;
370 #define VMXASSIST_TSS 0x40000
371 uint64_t vmxassist_tss = VMXASSIST_TSS;
372 gdt[0x08 / sizeof(gdt[0])] |=
373 ((vmxassist_tss & 0xFF000000) << (56 - 24)) |
374 ((vmxassist_tss & 0x00FF0000) << (32 - 16)) |
375 ((vmxassist_tss & 0x0000FFFF) << (16)) |
378 info->segments.tr.selector = 0x08;
379 info->segments.tr.base = vmxassist_tss;
381 //info->segments.tr.type = 0x9;
382 info->segments.tr.type = 0x3;
383 info->segments.tr.system = 0;
384 info->segments.tr.present = 1;
385 info->segments.tr.granularity = 0;
390 #define VMXASSIST_START 0x000d0000
391 extern uint8_t v3_vmxassist_start[];
392 extern uint8_t v3_vmxassist_end[];
393 addr_t vmxassist_dst = 0;
395 if (v3_gpa_to_hva(info, VMXASSIST_START, &vmxassist_dst) == -1) {
396 PrintError("Could not find VMXASSIST destination\n");
400 memcpy((void *)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start);
403 /*** Write all the info to the VMCS ***/
405 #define DEBUGCTL_MSR 0x1d9
406 v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
407 vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
409 info->dbg_regs.dr7 = 0x400;
412 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
414 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffUL);
415 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR_HIGH, (addr_t)0xffffffffUL);
418 if (v3_update_vmcs_ctrl_fields(info)) {
419 PrintError("Could not write control fields!\n");
423 if (v3_update_vmcs_host_state(info)) {
424 PrintError("Could not write host state\n");
429 vmx_state->assist_state = VMXASSIST_DISABLED;
431 // reenable global interrupts for vm state initialization now
432 // that the vm state is initialized. If another VM kicks us off,
433 // it'll update our vmx state so that we know to reload ourself
439 int v3_init_vmx_vmcs(struct guest_info * info, v3_vm_class_t vm_class) {
440 struct vmx_data * vmx_state = NULL;
443 vmx_state = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
445 PrintDebug("vmx_data pointer: %p\n", (void *)vmx_state);
447 PrintDebug("Allocating VMCS\n");
448 vmx_state->vmcs_ptr_phys = allocate_vmcs();
450 PrintDebug("VMCS pointer: %p\n", (void *)(vmx_state->vmcs_ptr_phys));
452 info->vmm_data = vmx_state;
453 vmx_state->state = VMX_UNLAUNCHED;
455 PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
457 // TODO: Fix vmcs fields so they're 32-bit
459 PrintDebug("Clearing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
460 vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
462 if (vmx_ret != VMX_SUCCESS) {
463 PrintError("VMCLEAR failed\n");
467 if (vm_class == V3_PC_VM) {
468 PrintDebug("Initializing VMCS\n");
469 init_vmcs_bios(info, vmx_state);
471 PrintError("Invalid VM Class\n");
479 int v3_deinit_vmx_vmcs(struct guest_info * core) {
480 struct vmx_data * vmx_state = core->vmm_data;
482 V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1);
490 static int update_irq_exit_state(struct guest_info * info) {
491 struct vmx_exit_idt_vec_info idt_vec_info;
493 check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
495 if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
496 #ifdef CONFIG_DEBUG_INTERRUPTS
497 PrintDebug("Calling v3_injecting_intr\n");
499 info->intr_core_state.irq_started = 0;
500 v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
506 static int update_irq_entry_state(struct guest_info * info) {
507 struct vmx_exit_idt_vec_info idt_vec_info;
508 struct vmcs_interrupt_state intr_core_state;
509 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
511 check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
512 check_vmcs_read(VMCS_GUEST_INT_STATE, &(intr_core_state));
514 /* Check for pending exceptions to inject */
515 if (v3_excp_pending(info)) {
516 struct vmx_entry_int_info int_info;
519 // In VMX, almost every exception is hardware
520 // Software exceptions are pretty much only for breakpoint or overflow
522 int_info.vector = v3_get_excp_number(info);
524 if (info->excp_state.excp_error_code_valid) {
525 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
526 int_info.error_code = 1;
528 #ifdef CONFIG_DEBUG_INTERRUPTS
529 PrintDebug("Injecting exception %d with error code %x\n",
530 int_info.vector, info->excp_state.excp_error_code);
535 #ifdef CONFIG_DEBUG_INTERRUPTS
536 PrintDebug("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
538 check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
540 v3_injecting_excp(info, int_info.vector);
542 } else if ((((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) &&
543 (intr_core_state.val == 0)) {
545 if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
547 #ifdef CONFIG_DEBUG_INTERRUPTS
548 PrintDebug("IRQ pending from previous injection\n");
551 // Copy the IDT vectoring info over to reinject the old interrupt
552 if (idt_vec_info.error_code == 1) {
553 uint32_t err_code = 0;
555 check_vmcs_read(VMCS_IDT_VECTOR_ERR, &err_code);
556 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, err_code);
559 idt_vec_info.undef = 0;
560 check_vmcs_write(VMCS_ENTRY_INT_INFO, idt_vec_info.value);
563 struct vmx_entry_int_info ent_int;
566 switch (v3_intr_pending(info)) {
567 case V3_EXTERNAL_IRQ: {
568 info->intr_core_state.irq_vector = v3_get_intr(info);
569 ent_int.vector = info->intr_core_state.irq_vector;
571 ent_int.error_code = 0;
574 #ifdef CONFIG_DEBUG_INTERRUPTS
575 PrintDebug("Injecting Interrupt %d at exit %u(EIP=%p)\n",
576 info->intr_core_state.irq_vector,
577 (uint32_t)info->num_exits,
578 (void *)(addr_t)info->rip);
581 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
582 info->intr_core_state.irq_started = 1;
587 PrintDebug("Injecting NMI\n");
592 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
595 case V3_SOFTWARE_INTR:
596 PrintDebug("Injecting software interrupt\n");
600 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
604 // Not sure what to do here, Intel doesn't have virtual IRQs
605 // May be the same as external interrupts/IRQs
608 case V3_INVALID_INTR:
613 } else if ((v3_intr_pending(info)) && (vmx_info->pri_proc_ctrls.int_wndw_exit == 0)) {
614 // Enable INTR window exiting so we know when IF=1
617 check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
619 #ifdef CONFIG_DEBUG_INTERRUPTS
620 PrintDebug("Enabling Interrupt-Window exiting: %d\n", instr_len);
623 vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
624 check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
633 static struct vmx_exit_info exit_log[10];
635 static void print_exit_log(struct guest_info * info) {
636 int cnt = info->num_exits % 10;
640 V3_Print("\nExit Log (%d total exits):\n", (uint32_t)info->num_exits);
642 for (i = 0; i < 10; i++) {
643 struct vmx_exit_info * tmp = &exit_log[cnt];
645 V3_Print("%d:\texit_reason = %p\n", i, (void *)(addr_t)tmp->exit_reason);
646 V3_Print("\texit_qual = %p\n", (void *)tmp->exit_qual);
647 V3_Print("\tint_info = %p\n", (void *)(addr_t)tmp->int_info);
648 V3_Print("\tint_err = %p\n", (void *)(addr_t)tmp->int_err);
649 V3_Print("\tinstr_info = %p\n", (void *)(addr_t)tmp->instr_info);
662 * CAUTION and DANGER!!!
664 * The VMCS CANNOT(!!) be accessed outside of the cli/sti calls inside this function
665 * When exectuing a symbiotic call, the VMCS WILL be overwritten, so any dependencies
666 * on its contents will cause things to break. The contents at the time of the exit WILL
667 * change before the exit handler is executed.
669 int v3_vmx_enter(struct guest_info * info) {
671 uint32_t tsc_offset_low, tsc_offset_high;
672 struct vmx_exit_info exit_info;
673 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
675 // Conditionally yield the CPU if the timeslice has expired
678 // Perform any additional yielding needed for time adjustment
679 v3_adjust_time(info);
681 // Update timer devices prior to entering VM.
682 v3_update_timers(info);
684 // disable global interrupts for vm state transition
687 v3_vmx_restore_vmcs(info);
690 #ifdef CONFIG_SYMCALL
691 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
692 update_irq_entry_state(info);
695 update_irq_entry_state(info);
700 vmcs_read(VMCS_GUEST_CR3, &guest_cr3);
701 vmcs_write(VMCS_GUEST_CR3, guest_cr3);
704 // Perform last-minute time bookkeeping prior to entering the VM
705 v3_time_enter_vm(info);
707 tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
708 tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
709 check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
710 check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
712 if (active_vmcs_ptrs[V3_Get_CPU()] != vmx_info->vmcs_ptr_phys) {
713 vmcs_load(vmx_info->vmcs_ptr_phys);
714 active_vmcs_ptrs[V3_Get_CPU()] = vmx_info->vmcs_ptr_phys;
717 if (vmx_info->state == VMX_UNLAUNCHED) {
718 vmx_info->state = VMX_LAUNCHED;
719 info->vm_info->run_state = VM_RUNNING;
720 ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
722 V3_ASSERT(vmx_info->state != VMX_UNLAUNCHED);
723 ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
726 // PrintDebug("VMX Exit: ret=%d\n", ret);
728 if (ret != VMX_SUCCESS) {
731 vmcs_read(VMCS_INSTR_ERR, &error);
732 PrintError("VMENTRY Error: %d\n", error);
737 // Immediate exit from VM time bookkeeping
738 v3_time_exit_vm(info);
742 /* Update guest state */
743 v3_vmx_save_vmcs(info);
745 // info->cpl = info->segments.cs.selector & 0x3;
747 info->mem_mode = v3_get_vm_mem_mode(info);
748 info->cpu_mode = v3_get_vm_cpu_mode(info);
751 check_vmcs_read(VMCS_EXIT_INSTR_LEN, &(exit_info.instr_len));
752 check_vmcs_read(VMCS_EXIT_INSTR_INFO, &(exit_info.instr_info));
753 check_vmcs_read(VMCS_EXIT_REASON, &(exit_info.exit_reason));
754 check_vmcs_read(VMCS_EXIT_QUAL, &(exit_info.exit_qual));
755 check_vmcs_read(VMCS_EXIT_INT_INFO, &(exit_info.int_info));
756 check_vmcs_read(VMCS_EXIT_INT_ERR, &(exit_info.int_err));
757 check_vmcs_read(VMCS_GUEST_LINEAR_ADDR, &(exit_info.guest_linear_addr));
759 //PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_info.exit_reason, exit_info.exit_qual);
761 exit_log[info->num_exits % 10] = exit_info;
764 #ifdef CONFIG_SYMCALL
765 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
766 update_irq_exit_state(info);
769 update_irq_exit_state(info);
772 // Handle any exits needed still in the atomic section
773 if (v3_handle_vmx_exit(info, &exit_info) == -1) {
774 PrintError("Error in atomic VMX exit handler\n");
778 // reenable global interrupts after vm exit
781 // Conditionally yield the CPU if the timeslice has expired
784 if (v3_handle_vmx_exit(info, &exit_info) == -1) {
785 PrintError("Error in VMX exit handler\n");
793 int v3_start_vmx_guest(struct guest_info * info) {
795 PrintDebug("Starting VMX core %u\n", info->cpu_id);
797 if (info->cpu_id == 0) {
798 info->core_run_state = CORE_RUNNING;
799 info->vm_info->run_state = VM_RUNNING;
802 PrintDebug("VMX core %u: Waiting for core initialization\n", info->cpu_id);
804 while (info->core_run_state == CORE_STOPPED) {
806 //PrintDebug("VMX core %u: still waiting for INIT\n",info->cpu_id);
809 PrintDebug("VMX core %u initialized\n", info->cpu_id);
813 PrintDebug("VMX core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
814 info->cpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
815 info->segments.cs.limit, (void *)(info->rip));
818 PrintDebug("VMX core %u: Launching VMX VM\n", info->cpu_id);
824 if (info->vm_info->run_state == VM_STOPPED) {
825 info->core_run_state = CORE_STOPPED;
829 if (v3_vmx_enter(info) == -1) {
831 print_exit_log(info);
837 if (info->vm_info->run_state == VM_STOPPED) {
838 info->core_run_state = CORE_STOPPED;
842 if ((info->num_exits % 5000) == 0) {
843 V3_Print("VMX Exit number %d\n", (uint32_t)info->num_exits);
853 int v3_is_vmx_capable() {
854 v3_msr_t feature_msr;
855 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
857 v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
859 PrintDebug("ECX: 0x%x\n", ecx);
861 if (ecx & CPUID_1_ECX_VTXFLAG) {
862 v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
864 PrintDebug("MSRREGlow: 0x%.8x\n", feature_msr.lo);
866 if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
867 PrintDebug("VMX is locked -- enable in the BIOS\n");
872 PrintDebug("VMX not supported on this cpu\n");
879 static int has_vmx_nested_paging() {
885 void v3_init_vmx_cpu(int cpu_id) {
886 extern v3_cpu_arch_t v3_cpu_types[];
887 struct v3_msr tmp_msr;
890 v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
893 __asm__ __volatile__ (
895 "orq $0x00002000, %%rbx;"
902 if ((~ret & tmp_msr.value) == 0) {
903 __asm__ __volatile__ (
909 PrintError("Invalid CR4 Settings!\n");
913 __asm__ __volatile__ (
914 "movq %%cr0, %%rbx; "
915 "orq $0x00000020,%%rbx; "
922 __asm__ __volatile__ (
924 "orl $0x00002000, %%ecx;"
931 if ((~ret & tmp_msr.value) == 0) {
932 __asm__ __volatile__ (
938 PrintError("Invalid CR4 Settings!\n");
942 __asm__ __volatile__ (
943 "movl %%cr0, %%ecx; "
944 "orl $0x00000020,%%ecx; "
954 // Should check and return Error here....
957 // Setup VMXON Region
958 host_vmcs_ptrs[cpu_id] = allocate_vmcs();
960 PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);
962 if (v3_enable_vmx(host_vmcs_ptrs[cpu_id]) == VMX_SUCCESS) {
963 PrintDebug("VMX Enabled\n");
965 PrintError("VMX initialization failure\n");
970 if (has_vmx_nested_paging() == 1) {
971 v3_cpu_types[cpu_id] = V3_VMX_EPT_CPU;
973 v3_cpu_types[cpu_id] = V3_VMX_CPU;
979 void v3_deinit_vmx_cpu(int cpu_id) {
980 extern v3_cpu_arch_t v3_cpu_types[];
981 v3_cpu_types[cpu_id] = V3_INVALID_CPU;
982 V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1);