2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Peter Dinda <pdinda@northwestern.edu>
11 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
12 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
13 * All rights reserved.
15 * Author: Peter Dinda <pdinda@northwestern.edu>
16 * Jack Lange <jarusl@cs.northwestern.edu>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 #include <palacios/vmx.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vmx_handler.h>
26 #include <palacios/vmcs.h>
27 #include <palacios/vmx_lowlevel.h>
28 #include <palacios/vmm_lowlevel.h>
29 #include <palacios/vmm_ctrl_regs.h>
30 #include <palacios/vmm_config.h>
31 #include <palacios/vm_guest_mem.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <palacios/vmx_io.h>
34 #include <palacios/vmx_msr.h>
37 #ifndef CONFIG_DEBUG_VMX
39 #define PrintDebug(fmt, args...)
43 static addr_t host_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
47 extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
48 extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
50 static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
53 ret = vmcs_write(field,val);
55 if (ret != VMX_SUCCESS) {
56 PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
63 static int inline check_vmcs_read(vmcs_field_t field, void * val) {
66 ret = vmcs_read(field, val);
68 if (ret != VMX_SUCCESS) {
69 PrintError("VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
76 // For the 32 bit reserved bit fields
77 // MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
78 static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
81 PrintDebug("sanitize_bits1 (MSR:%x)\n", msr_num);
83 v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo);
85 PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
95 static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
97 addr_t msr0_val, msr1_val;
99 PrintDebug("sanitize_bits2 (MSR0=%x, MSR1=%x)\n", msr_num0, msr_num1);
101 v3_get_msr(msr_num0, &msr0.hi, &msr0.lo);
102 v3_get_msr(msr_num1, &msr1.hi, &msr1.lo);
104 // This generates a mask that is the natural bit width of the CPU
105 msr0_val = msr0.value;
106 msr1_val = msr1.value;
108 PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
121 static addr_t allocate_vmcs() {
123 struct vmcs_data * vmcs_page = NULL;
125 PrintDebug("Allocating page\n");
127 vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
128 memset(vmcs_page, 0, 4096);
130 v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
132 vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision;
133 PrintDebug("VMX Revision: 0x%x\n",vmcs_page->revision);
135 return (addr_t)V3_PAddr((void *)vmcs_page);
141 static int init_vmcs_bios(struct guest_info * info, struct vmx_data * vmx_state) {
144 PrintDebug("Loading VMCS\n");
145 vmx_ret = vmcs_load(vmx_state->vmcs_ptr_phys);
147 if (vmx_ret != VMX_SUCCESS) {
148 PrintError("VMPTRLD failed\n");
154 /******* Setup Host State **********/
156 /* Cache GDTR, IDTR, and TR in host struct */
161 } __attribute__((packed)) tmp_seg;
164 __asm__ __volatile__(
170 gdtr_base = tmp_seg.base;
171 vmx_state->host_state.gdtr.base = gdtr_base;
173 __asm__ __volatile__(
179 vmx_state->host_state.idtr.base = tmp_seg.base;
181 __asm__ __volatile__(
187 vmx_state->host_state.tr.selector = tmp_seg.selector;
189 /* The GDTR *index* is bits 3-15 of the selector. */
190 struct tss_descriptor * desc = NULL;
191 desc = (struct tss_descriptor *)(gdtr_base + (8 * (tmp_seg.selector >> 3)));
193 tmp_seg.base = ((desc->base1) |
194 (desc->base2 << 16) |
195 (desc->base3 << 24) |
197 ((uint64_t)desc->base4 << 32)
203 vmx_state->host_state.tr.base = tmp_seg.base;
207 /********** Setup and VMX Control Fields from MSR ***********/
211 struct v3_msr tmp_msr;
213 v3_get_msr(VMX_PINBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
215 /* Add external interrupts, NMI exiting, and virtual NMI */
216 vmx_state->pin_ctrls.value = tmp_msr.lo;
217 vmx_state->pin_ctrls.nmi_exit = 1;
218 vmx_state->pin_ctrls.ext_int_exit = 1;
220 v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
222 vmx_state->pri_proc_ctrls.value = tmp_msr.lo;
223 vmx_state->pri_proc_ctrls.use_io_bitmap = 1;
224 vmx_state->pri_proc_ctrls.hlt_exit = 1;
225 vmx_state->pri_proc_ctrls.invlpg_exit = 1;
226 vmx_state->pri_proc_ctrls.use_msr_bitmap = 1;
227 vmx_state->pri_proc_ctrls.pause_exit = 1;
228 vmx_state->pri_proc_ctrls.tsc_offset = 1;
229 #ifdef CONFIG_TIME_VIRTUALIZE_TSC
230 vmx_state->pri_proc_ctrls.rdtsc_exit = 1;
233 vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(info->vm_info->io_map.arch_data));
234 vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR,
235 (addr_t)V3_PAddr(info->vm_info->io_map.arch_data) + PAGE_SIZE_4KB);
238 vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(info->vm_info->msr_map.arch_data));
240 v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
241 vmx_state->exit_ctrls.value = tmp_msr.lo;
242 vmx_state->exit_ctrls.host_64_on = 1;
244 if ((vmx_state->exit_ctrls.save_efer == 1) || (vmx_state->exit_ctrls.ld_efer == 1)) {
245 vmx_state->ia32e_avail = 1;
248 v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
249 vmx_state->entry_ctrls.value = tmp_msr.lo;
252 struct vmx_exception_bitmap excp_bmap;
257 vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, excp_bmap.value);
259 /******* Setup VMXAssist guest state ***********/
262 info->vm_regs.rsp = 0x80000;
264 struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
267 /* Print Control MSRs */
268 v3_get_msr(VMX_CR0_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
269 PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)tmp_msr.value);
271 v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
272 PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)tmp_msr.value);
275 #define GUEST_CR0 0x80000031
276 #define GUEST_CR4 0x00002000
277 info->ctrl_regs.cr0 = GUEST_CR0;
278 info->ctrl_regs.cr4 = GUEST_CR4;
280 ((struct cr0_32 *)&(info->shdw_pg_state.guest_cr0))->pe = 1;
283 if (info->shdw_pg_mode == SHADOW_PAGING) {
284 PrintDebug("Creating initial shadow page table\n");
286 if (v3_init_passthrough_pts(info) == -1) {
287 PrintError("Could not initialize passthrough page tables\n");
291 #define CR0_PE 0x00000001
292 #define CR0_PG 0x80000000
295 vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG) );
296 vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
298 info->ctrl_regs.cr3 = info->direct_map_pt;
300 // vmx_state->pinbased_ctrls |= NMI_EXIT;
303 vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
304 vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
307 // Setup segment registers
309 struct v3_segment * seg_reg = (struct v3_segment *)&(info->segments);
313 for (i = 0; i < 10; i++) {
314 seg_reg[i].selector = 3 << 3;
315 seg_reg[i].limit = 0xffff;
316 seg_reg[i].base = 0x0;
319 info->segments.cs.selector = 2<<3;
321 /* Set only the segment registers */
322 for (i = 0; i < 6; i++) {
323 seg_reg[i].limit = 0xfffff;
324 seg_reg[i].granularity = 1;
326 seg_reg[i].system = 1;
328 seg_reg[i].present = 1;
332 info->segments.cs.type = 0xb;
334 info->segments.ldtr.selector = 0x20;
335 info->segments.ldtr.type = 2;
336 info->segments.ldtr.system = 0;
337 info->segments.ldtr.present = 1;
338 info->segments.ldtr.granularity = 0;
341 /************* Map in GDT and vmxassist *************/
343 uint64_t gdt[] __attribute__ ((aligned(32))) = {
344 0x0000000000000000ULL, /* 0x00: reserved */
345 0x0000830000000000ULL, /* 0x08: 32-bit TSS */
346 //0x0000890000000000ULL, /* 0x08: 32-bit TSS */
347 0x00CF9b000000FFFFULL, /* 0x10: CS 32-bit */
348 0x00CF93000000FFFFULL, /* 0x18: DS 32-bit */
349 0x000082000000FFFFULL, /* 0x20: LDTR 32-bit */
352 #define VMXASSIST_GDT 0x10000
353 addr_t vmxassist_gdt = 0;
355 if (v3_gpa_to_hva(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
356 PrintError("Could not find VMXASSIST GDT destination\n");
360 memcpy((void *)vmxassist_gdt, gdt, sizeof(uint64_t) * 5);
362 info->segments.gdtr.base = VMXASSIST_GDT;
364 #define VMXASSIST_TSS 0x40000
365 uint64_t vmxassist_tss = VMXASSIST_TSS;
366 gdt[0x08 / sizeof(gdt[0])] |=
367 ((vmxassist_tss & 0xFF000000) << (56 - 24)) |
368 ((vmxassist_tss & 0x00FF0000) << (32 - 16)) |
369 ((vmxassist_tss & 0x0000FFFF) << (16)) |
372 info->segments.tr.selector = 0x08;
373 info->segments.tr.base = vmxassist_tss;
375 //info->segments.tr.type = 0x9;
376 info->segments.tr.type = 0x3;
377 info->segments.tr.system = 0;
378 info->segments.tr.present = 1;
379 info->segments.tr.granularity = 0;
384 #define VMXASSIST_START 0x000d0000
385 extern uint8_t v3_vmxassist_start[];
386 extern uint8_t v3_vmxassist_end[];
387 addr_t vmxassist_dst = 0;
389 if (v3_gpa_to_hva(info, VMXASSIST_START, &vmxassist_dst) == -1) {
390 PrintError("Could not find VMXASSIST destination\n");
394 memcpy((void *)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start);
397 /*** Write all the info to the VMCS ***/
399 #define DEBUGCTL_MSR 0x1d9
400 v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
401 vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
403 info->dbg_regs.dr7 = 0x400;
406 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
408 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffUL);
409 vmx_ret |= check_vmcs_write(VMCS_LINK_PTR_HIGH, (addr_t)0xffffffffUL);
412 if (v3_update_vmcs_ctrl_fields(info)) {
413 PrintError("Could not write control fields!\n");
417 if (v3_update_vmcs_host_state(info)) {
418 PrintError("Could not write host state\n");
423 vmx_state->state = VMXASSIST_DISABLED;
428 int v3_init_vmx_vmcs(struct guest_info * info, v3_vm_class_t vm_class) {
429 struct vmx_data * vmx_state = NULL;
432 vmx_state = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
434 PrintDebug("vmx_data pointer: %p\n", (void *)vmx_state);
436 PrintDebug("Allocating VMCS\n");
437 vmx_state->vmcs_ptr_phys = allocate_vmcs();
439 PrintDebug("VMCS pointer: %p\n", (void *)(vmx_state->vmcs_ptr_phys));
441 info->vmm_data = vmx_state;
443 PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
445 // TODO: Fix vmcs fields so they're 32-bit
447 PrintDebug("Clearing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
448 vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
450 if (vmx_ret != VMX_SUCCESS) {
451 PrintError("VMCLEAR failed\n");
455 if (vm_class == V3_PC_VM) {
456 PrintDebug("Initializing VMCS\n");
457 init_vmcs_bios(info, vmx_state);
459 PrintError("Invalid VM Class\n");
466 static int update_irq_exit_state(struct guest_info * info) {
467 struct vmx_exit_idt_vec_info idt_vec_info;
469 check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
471 if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
472 #ifdef CONFIG_DEBUG_INTERRUPTS
473 PrintDebug("Calling v3_injecting_intr\n");
475 info->intr_core_state.irq_started = 0;
476 v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
482 static int update_irq_entry_state(struct guest_info * info) {
483 struct vmx_exit_idt_vec_info idt_vec_info;
484 struct vmcs_interrupt_state intr_core_state;
485 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
487 check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
488 check_vmcs_read(VMCS_GUEST_INT_STATE, &(intr_core_state));
490 /* Check for pending exceptions to inject */
491 if (v3_excp_pending(info)) {
492 struct vmx_entry_int_info int_info;
495 // In VMX, almost every exception is hardware
496 // Software exceptions are pretty much only for breakpoint or overflow
498 int_info.vector = v3_get_excp_number(info);
500 if (info->excp_state.excp_error_code_valid) {
501 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
502 int_info.error_code = 1;
504 #ifdef CONFIG_DEBUG_INTERRUPTS
505 PrintDebug("Injecting exception %d with error code %x\n",
506 int_info.vector, info->excp_state.excp_error_code);
511 #ifdef CONFIG_DEBUG_INTERRUPTS
512 PrintDebug("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
514 check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
516 v3_injecting_excp(info, int_info.vector);
518 } else if ((((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) &&
519 (intr_core_state.val == 0)) {
521 if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
523 #ifdef CONFIG_DEBUG_INTERRUPTS
524 PrintDebug("IRQ pending from previous injection\n");
527 // Copy the IDT vectoring info over to reinject the old interrupt
528 if (idt_vec_info.error_code == 1) {
529 uint32_t err_code = 0;
531 check_vmcs_read(VMCS_IDT_VECTOR_ERR, &err_code);
532 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, err_code);
535 idt_vec_info.undef = 0;
536 check_vmcs_write(VMCS_ENTRY_INT_INFO, idt_vec_info.value);
539 struct vmx_entry_int_info ent_int;
542 switch (v3_intr_pending(info)) {
543 case V3_EXTERNAL_IRQ: {
544 info->intr_core_state.irq_vector = v3_get_intr(info);
545 ent_int.vector = info->intr_core_state.irq_vector;
547 ent_int.error_code = 0;
550 #ifdef CONFIG_DEBUG_INTERRUPTS
551 PrintDebug("Injecting Interrupt %d at exit %u(EIP=%p)\n",
552 info->intr_core_state.irq_vector,
553 (uint32_t)info->num_exits,
554 (void *)(addr_t)info->rip);
557 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
558 info->intr_core_state.irq_started = 1;
563 PrintDebug("Injecting NMI\n");
568 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
571 case V3_SOFTWARE_INTR:
572 PrintDebug("Injecting software interrupt\n");
576 check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
580 // Not sure what to do here, Intel doesn't have virtual IRQs
581 // May be the same as external interrupts/IRQs
584 case V3_INVALID_INTR:
589 } else if ((v3_intr_pending(info)) && (vmx_info->pri_proc_ctrls.int_wndw_exit == 0)) {
590 // Enable INTR window exiting so we know when IF=1
593 check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
595 #ifdef CONFIG_DEBUG_INTERRUPTS
596 PrintDebug("Enabling Interrupt-Window exiting: %d\n", instr_len);
599 vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
600 check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
609 static struct vmx_exit_info exit_log[10];
611 static void print_exit_log(struct guest_info * info) {
612 int cnt = info->num_exits % 10;
616 V3_Print("\nExit Log (%d total exits):\n", (uint32_t)info->num_exits);
618 for (i = 0; i < 10; i++) {
619 struct vmx_exit_info * tmp = &exit_log[cnt];
621 V3_Print("%d:\texit_reason = %p\n", i, (void *)(addr_t)tmp->exit_reason);
622 V3_Print("\texit_qual = %p\n", (void *)tmp->exit_qual);
623 V3_Print("\tint_info = %p\n", (void *)(addr_t)tmp->int_info);
624 V3_Print("\tint_err = %p\n", (void *)(addr_t)tmp->int_err);
625 V3_Print("\tinstr_info = %p\n", (void *)(addr_t)tmp->instr_info);
638 * CAUTION and DANGER!!!
640 * The VMCS CANNOT(!!) be accessed outside of the cli/sti calls inside this function
641 * When exectuing a symbiotic call, the VMCS WILL be overwritten, so any dependencies
642 * on its contents will cause things to break. The contents at the time of the exit WILL
643 * change before the exit handler is executed.
645 int v3_vmx_enter(struct guest_info * info) {
647 uint32_t tsc_offset_low, tsc_offset_high;
648 struct vmx_exit_info exit_info;
650 // Conditionally yield the CPU if the timeslice has expired
653 /* If this guest is frequency-lagged behind host time, wait
654 * for the appropriate host time before resuming the guest. */
655 v3_adjust_time(info);
657 // v3_print_guest_state(info);
659 // disable global interrupts for vm state transition
662 v3_vmx_restore_vmcs(info);
665 #ifdef CONFIG_SYMCALL
666 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
667 update_irq_entry_state(info);
670 update_irq_entry_state(info);
675 vmcs_read(VMCS_GUEST_CR3, &guest_cr3);
676 vmcs_write(VMCS_GUEST_CR3, guest_cr3);
679 v3_update_timers(info);
681 tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
682 tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
683 check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
684 check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
686 if (info->vm_info->run_state == VM_STOPPED) {
687 info->vm_info->run_state = VM_RUNNING;
688 ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
690 ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
693 // PrintDebug("VMX Exit: ret=%d\n", ret);
695 if (ret != VMX_SUCCESS) {
698 vmcs_read(VMCS_INSTR_ERR, &error);
699 PrintError("VMENTRY Error: %d\n", error);
706 /* Update guest state */
707 v3_vmx_save_vmcs(info);
709 // info->cpl = info->segments.cs.selector & 0x3;
711 info->mem_mode = v3_get_vm_mem_mode(info);
712 info->cpu_mode = v3_get_vm_cpu_mode(info);
715 check_vmcs_read(VMCS_EXIT_INSTR_LEN, &(exit_info.instr_len));
716 check_vmcs_read(VMCS_EXIT_INSTR_INFO, &(exit_info.instr_info));
717 check_vmcs_read(VMCS_EXIT_REASON, &(exit_info.exit_reason));
718 check_vmcs_read(VMCS_EXIT_QUAL, &(exit_info.exit_qual));
719 check_vmcs_read(VMCS_EXIT_INT_INFO, &(exit_info.int_info));
720 check_vmcs_read(VMCS_EXIT_INT_ERR, &(exit_info.int_err));
721 check_vmcs_read(VMCS_GUEST_LINEAR_ADDR, &(exit_info.guest_linear_addr));
723 //PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_info.exit_reason, exit_info.exit_qual);
725 exit_log[info->num_exits % 10] = exit_info;
728 #ifdef CONFIG_SYMCALL
729 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
730 update_irq_exit_state(info);
733 update_irq_exit_state(info);
736 // reenable global interrupts after vm exit
739 // Conditionally yield the CPU if the timeslice has expired
742 if (v3_handle_vmx_exit(info, &exit_info) == -1) {
743 PrintError("Error in VMX exit handler\n");
751 int v3_start_vmx_guest(struct guest_info * info) {
753 PrintDebug("Starting VMX core %u\n", info->cpu_id);
755 if (info->cpu_id == 0) {
756 info->core_run_state = CORE_RUNNING;
757 info->vm_info->run_state = VM_RUNNING;
760 PrintDebug("VMX core %u: Waiting for core initialization\n", info->cpu_id);
762 while (info->core_run_state == CORE_STOPPED) {
764 //PrintDebug("VMX core %u: still waiting for INIT\n",info->cpu_id);
767 PrintDebug("VMX core %u initialized\n", info->cpu_id);
771 PrintDebug("VMX core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
772 info->cpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
773 info->segments.cs.limit, (void *)(info->rip));
776 PrintDebug("VMX core %u: Launching VMX VM\n", info->cpu_id);
782 if (info->vm_info->run_state == VM_STOPPED) {
783 info->core_run_state = CORE_STOPPED;
787 if (v3_vmx_enter(info) == -1) {
789 print_exit_log(info);
795 if (info->vm_info->run_state == VM_STOPPED) {
796 info->core_run_state = CORE_STOPPED;
800 if ((info->num_exits % 5000) == 0) {
801 V3_Print("VMX Exit number %d\n", (uint32_t)info->num_exits);
811 int v3_is_vmx_capable() {
812 v3_msr_t feature_msr;
813 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
815 v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
817 PrintDebug("ECX: 0x%x\n", ecx);
819 if (ecx & CPUID_1_ECX_VTXFLAG) {
820 v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
822 PrintDebug("MSRREGlow: 0x%.8x\n", feature_msr.lo);
824 if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
825 PrintDebug("VMX is locked -- enable in the BIOS\n");
830 PrintDebug("VMX not supported on this cpu\n");
837 static int has_vmx_nested_paging() {
843 void v3_init_vmx_cpu(int cpu_id) {
844 extern v3_cpu_arch_t v3_cpu_types[];
845 struct v3_msr tmp_msr;
848 v3_get_msr(VMX_CR4_FIXED0_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
850 __asm__ __volatile__ (
852 "orq $0x00002000, %%rbx;"
859 if ((~ret & tmp_msr.value) == 0) {
860 __asm__ __volatile__ (
866 PrintError("Invalid CR4 Settings!\n");
870 __asm__ __volatile__ (
871 "movq %%cr0, %%rbx; "
872 "orq $0x00000020,%%rbx; "
879 __asm__ __volatile__ (
881 "orl $0x00002000, %%ecx;"
888 if ((~ret & tmp_msr.value) == 0) {
889 __asm__ __volatile__ (
895 PrintError("Invalid CR4 Settings!\n");
899 __asm__ __volatile__ (
900 "movl %%cr0, %%ecx; "
901 "orl $0x00000020,%%ecx; "
911 // Should check and return Error here....
914 // Setup VMXON Region
915 host_vmcs_ptrs[cpu_id] = allocate_vmcs();
917 PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);
919 if (v3_enable_vmx(host_vmcs_ptrs[cpu_id]) == VMX_SUCCESS) {
920 PrintDebug("VMX Enabled\n");
922 PrintError("VMX initialization failure\n");
927 if (has_vmx_nested_paging() == 1) {
928 v3_cpu_types[cpu_id] = V3_VMX_EPT_CPU;
930 v3_cpu_types[cpu_id] = V3_VMX_CPU;