2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 #include <palacios/vm_guest.h>
24 #include <palacios/vmm_ctrl_regs.h>
25 #include <palacios/vmm.h>
26 #include <palacios/vmm_decoder.h>
27 #include <palacios/vmcb.h>
28 #include <palacios/vm_guest_mem.h>
29 #include <palacios/vmm_lowlevel.h>
30 #include <palacios/vmm_sprintf.h>
31 #include <palacios/vmm_xed.h>
32 #include <palacios/vmm_direct_paging.h>
36 v3_cpu_mode_t v3_get_vm_cpu_mode(struct guest_info * info) {
38 struct efer_64 * efer;
39 struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
40 struct v3_segment * cs = &(info->segments.cs);
41 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
43 if (info->shdw_pg_mode == SHADOW_PAGING) {
44 cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
45 efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
46 } else if (info->shdw_pg_mode == NESTED_PAGING) {
47 cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
48 efer = (struct efer_64 *)&(guest_state->efer);
50 PrintError("Invalid Paging Mode...\n");
57 } else if ((cr4->pae == 0) && (efer->lme == 0)) {
59 } else if (efer->lme == 0) {
61 } else if ((efer->lme == 1) && (cs->long_mode == 1)) {
64 // What about LONG_16_COMPAT???
65 return LONG_32_COMPAT;
69 // Get address width in bytes
70 uint_t v3_get_addr_width(struct guest_info * info) {
72 struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
73 struct efer_64 * efer;
74 struct v3_segment * cs = &(info->segments.cs);
75 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
77 if (info->shdw_pg_mode == SHADOW_PAGING) {
78 cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
79 efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
80 } else if (info->shdw_pg_mode == NESTED_PAGING) {
81 cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
82 efer = (struct efer_64 *)&(guest_state->efer);
84 PrintError("Invalid Paging Mode...\n");
91 } else if ((cr4->pae == 0) && (efer->lme == 0)) {
93 } else if (efer->lme == 0) {
95 } else if ((efer->lme == 1) && (cs->long_mode == 1)) {
98 // What about LONG_16_COMPAT???
104 static const uchar_t REAL_STR[] = "Real";
105 static const uchar_t PROTECTED_STR[] = "Protected";
106 static const uchar_t PROTECTED_PAE_STR[] = "Protected+PAE";
107 static const uchar_t LONG_STR[] = "Long";
108 static const uchar_t LONG_32_COMPAT_STR[] = "32bit Compat";
109 static const uchar_t LONG_16_COMPAT_STR[] = "16bit Compat";
111 const uchar_t * v3_cpu_mode_to_str(v3_cpu_mode_t mode) {
116 return PROTECTED_STR;
118 return PROTECTED_PAE_STR;
122 return LONG_32_COMPAT_STR;
124 return LONG_16_COMPAT_STR;
130 v3_mem_mode_t v3_get_vm_mem_mode(struct guest_info * info) {
133 if (info->shdw_pg_mode == SHADOW_PAGING) {
134 cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
135 } else if (info->shdw_pg_mode == NESTED_PAGING) {
136 cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
138 PrintError("Invalid Paging Mode...\n");
150 static const uchar_t PHYS_MEM_STR[] = "Physical Memory";
151 static const uchar_t VIRT_MEM_STR[] = "Virtual Memory";
153 const uchar_t * v3_mem_mode_to_str(v3_mem_mode_t mode) {
165 void v3_print_segments(struct v3_segments * segs) {
167 struct v3_segment * seg_ptr;
169 seg_ptr=(struct v3_segment *)segs;
171 char *seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
172 V3_Print("Segments\n");
174 for (i = 0; seg_names[i] != NULL; i++) {
176 V3_Print("\t%s: Sel=%x, base=%p, limit=%x (long_mode=%d, db=%d)\n", seg_names[i], seg_ptr[i].selector,
177 (void *)(addr_t)seg_ptr[i].base, seg_ptr[i].limit,
178 seg_ptr[i].long_mode, seg_ptr[i].db);
184 // We don't handle those fancy 64 bit system segments...
186 int v3_translate_segment(struct guest_info * info, uint16_t selector, struct v3_segment * seg) {
187 struct v3_segment * gdt = &(info->segments.gdtr);
189 uint16_t seg_offset = (selector & ~0x7);
191 struct gen_segment * gen_seg = NULL;
192 struct seg_selector sel;
194 memset(seg, 0, sizeof(struct v3_segment));
196 sel.value = selector;
199 PrintError("LDT translations not supported\n");
203 if (v3_gva_to_hva(info, gdt->base, &gdt_addr) == -1) {
204 PrintError("Unable to translate GDT address\n");
208 seg_addr = gdt_addr + seg_offset;
209 gen_seg = (struct gen_segment *)seg_addr;
212 seg->selector = selector;
214 seg->limit = gen_seg->limit_hi;
216 seg->limit += gen_seg->limit_lo;
218 seg->base = gen_seg->base_hi;
220 seg->base += gen_seg->base_lo;
222 if (gen_seg->granularity == 1) {
227 seg->type = gen_seg->type;
228 seg->system = gen_seg->system;
229 seg->dpl = gen_seg->dpl;
230 seg->present = gen_seg->present;
231 seg->avail = gen_seg->avail;
232 seg->long_mode = gen_seg->long_mode;
233 seg->db = gen_seg->db;
234 seg->granularity = gen_seg->granularity;
242 void v3_print_ctrl_regs(struct guest_info * info) {
243 struct v3_ctrl_regs * regs = &(info->ctrl_regs);
246 char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", NULL};
247 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(info->vmm_data);
249 reg_ptr = (v3_reg_t *)regs;
251 V3_Print("32 bit Ctrl Regs:\n");
253 for (i = 0; reg_names[i] != NULL; i++) {
254 V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
257 V3_Print("\tEFER=0x%p\n", (void*)(addr_t)(guest_state->efer));
262 static int safe_gva_to_hva(struct guest_info * info, addr_t linear_addr, addr_t * host_addr) {
263 /* select the proper translation based on guest mode */
264 if (info->mem_mode == PHYSICAL_MEM) {
265 if (v3_gpa_to_hva(info, linear_addr, host_addr) == -1) return -1;
266 } else if (info->mem_mode == VIRTUAL_MEM) {
267 if (v3_gva_to_hva(info, linear_addr, host_addr) == -1) return -1;
272 static int v3_print_disassembly(struct guest_info * info) {
274 addr_t rip, rip_linear, rip_host;
276 /* we don't know where the instructions preceding RIP start, so we just take
277 * a guess and hope the instruction stream synced up with our disassembly
278 * some time before RIP; if it has not we correct RIP at that point
281 /* start disassembly 64 bytes before current RIP, continue 32 bytes after */
282 rip = (addr_t) info->rip - 64;
283 while ((int) (rip - info->rip) < 32) {
284 V3_Print("disassembly step\n");
286 /* always print RIP, even if the instructions before were bad */
287 if (!passed_rip && rip >= info->rip) {
288 if (rip != info->rip) {
289 V3_Print("***** bad disassembly up to this point *****\n");
295 /* look up host virtual address for this instruction */
296 rip_linear = get_addr_linear(info, rip, &(info->segments.cs));
297 if (safe_gva_to_hva(info, rip_linear, &rip_host) < 0) {
302 /* print disassembled instrcution (updates rip) */
303 if (v3_disasm(info, (void *) rip_host, &rip, rip == info->rip) < 0) {
315 void v3_print_guest_state(struct guest_info * info) {
316 addr_t linear_addr = 0;
318 V3_Print("RIP: %p\n", (void *)(addr_t)(info->rip));
319 linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
320 V3_Print("RIP Linear: %p\n", (void *)linear_addr);
322 V3_Print("NumExits: %u\n", (uint32_t)info->num_exits);
324 v3_print_segments(&(info->segments));
325 v3_print_ctrl_regs(info);
327 if (info->shdw_pg_mode == SHADOW_PAGING) {
328 V3_Print("Shadow Paging Guest Registers:\n");
329 V3_Print("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
330 V3_Print("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
331 V3_Print("\tGuest EFER=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_efer.value));
336 v3_print_mem_map(info->vm_info);
338 v3_print_stack(info);
340 // v3_print_disassembly(info);
343 void v3_print_guest_state_all(struct v3_vm_info * vm) {
346 V3_Print("VM Core states for %s\n", vm->name);
348 for (i = 0; i < 80; i++) {
352 for (i = 0; i < vm->num_cores; i++) {
353 v3_print_guest_state(&vm->cores[i]);
356 for (i = 0; i < 80; i++) {
363 static void print_real_mode_stack(struct guest_info *info)
372 ss = info->segments.ss.selector & 0xffff;
373 sp = info->vm_regs.rsp & 0xffff;
374 addr = (((uint32_t)ss)<<4) + sp;
377 V3_Print("Real Mode Stack starting at 0x%x:0x%x (0x%p):\n",ss,sp,(void*)addr);
379 if (info->mem_mode!=PHYSICAL_MEM) {
380 PrintError("Cannot print real mode stack when virtual memory active\n");
384 for (i=0;i<=24;i++,sp+=2) {
385 // note that it's correct for this to wrap around
386 addr = (((uint32_t)ss)<<4) + sp;
387 if (v3_gpa_to_hva(info,addr,&host_addr)) {
388 PrintError("Could not translate physical stack address 0x%p\n",(void*)addr);
391 V3_Print("\t0x%.4x\n",*((uint16_t*)host_addr));
398 void v3_print_stack(struct guest_info * info) {
399 addr_t linear_addr = 0;
400 addr_t host_addr = 0;
402 v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(info);
405 if (cpu_mode==REAL) {
406 print_real_mode_stack(info);
410 // protected mode, 32 or 64 bit
412 linear_addr = get_addr_linear(info, info->vm_regs.rsp, &(info->segments.ss));
414 V3_Print("Stack at %p:\n", (void *)linear_addr);
416 if (info->mem_mode == PHYSICAL_MEM) {
417 if (v3_gpa_to_hva(info, linear_addr, &host_addr) == -1) {
418 PrintError("Could not translate Stack address\n");
421 } else if (info->mem_mode == VIRTUAL_MEM) {
422 if (v3_gva_to_hva(info, linear_addr, &host_addr) == -1) {
423 PrintError("Could not translate Virtual Stack address\n");
428 V3_Print("Host Address of rsp = 0x%p\n", (void *)host_addr);
430 // We start i at one because the current stack pointer points to an unused stack element
431 for (i = 0; i <= 24; i++) {
432 if (cpu_mode == LONG) {
433 V3_Print("\t%p\n", (void *)*(addr_t *)(host_addr + (i * 8)));
436 V3_Print("\t%.8x\n", *(uint32_t *)(host_addr + (i * 4)));
444 void v3_print_GPRs(struct guest_info * info) {
445 struct v3_gprs * regs = &(info->vm_regs);
448 char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", NULL};
450 reg_ptr = (v3_reg_t *)regs;
452 V3_Print("32 bit GPRs:\n");
454 for (i = 0; reg_names[i] != NULL; i++) {
455 V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
461 void v3_print_GPRs(struct guest_info * info) {
462 struct v3_gprs * regs = &(info->vm_regs);
465 char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", \
466 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", NULL};
468 reg_ptr = (v3_reg_t *)regs;
470 V3_Print("64 bit GPRs:\n");
472 for (i = 0; reg_names[i] != NULL; i++) {
473 V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
480 #include <palacios/vmcs.h>
481 #include <palacios/vmcb.h>
482 static int info_hcall(struct guest_info * core, uint_t hcall_id, void * priv_data) {
483 v3_cpu_arch_t cpu_type = v3_get_cpu_type(V3_Get_CPU());
486 V3_Print("************** Guest State ************\n");
487 v3_print_guest_state(core);
491 if ((cpu_type == V3_SVM_CPU) || (cpu_type == V3_SVM_REV3_CPU)) {
493 PrintDebugVMCB((vmcb_t *)(core->vmm_data));
497 if ((cpu_type == V3_VMX_CPU) || (cpu_type == V3_VMX_EPT_CPU) || (cpu_type == V3_VMX_EPT_UG_CPU)) {
503 PrintError("Invalid CPU Type 0x%x\n", cpu_type);
513 #include <palacios/svm.h>
514 #include <palacios/svm_io.h>
515 #include <palacios/svm_msr.h>
519 #include <palacios/vmx.h>
520 #include <palacios/vmx_io.h>
521 #include <palacios/vmx_msr.h>
525 int v3_init_vm(struct v3_vm_info * vm) {
526 v3_cpu_arch_t cpu_type = v3_get_cpu_type(V3_Get_CPU());
530 #ifdef V3_CONFIG_TELEMETRY
531 v3_init_telemetry(vm);
534 v3_init_hypercall_map(vm);
537 v3_init_cpuid_map(vm);
538 v3_init_host_events(vm);
539 v3_init_intr_routers(vm);
540 v3_init_ext_manager(vm);
542 // Initialize the memory map
543 if (v3_init_mem_map(vm) == -1) {
544 PrintError("Could not initialize shadow map\n");
548 v3_init_mem_hooks(vm);
550 if (v3_init_shdw_impl(vm) == -1) {
551 PrintError("VM initialization error in shadow implementaion\n");
559 #ifdef V3_CONFIG_SYMBIOTIC
560 v3_init_symbiotic_vm(vm);
570 case V3_SVM_REV3_CPU:
571 v3_init_svm_io_map(vm);
572 v3_init_svm_msr_map(vm);
578 case V3_VMX_EPT_UG_CPU:
579 v3_init_vmx_io_map(vm);
580 v3_init_vmx_msr_map(vm);
584 PrintError("Invalid CPU Type 0x%x\n", cpu_type);
588 v3_register_hypercall(vm, GUEST_INFO_HCALL, info_hcall, NULL);
590 V3_Print("GUEST_INFO_HCALL=%x\n", GUEST_INFO_HCALL);
596 int v3_free_vm_internal(struct v3_vm_info * vm) {
597 v3_cpu_arch_t cpu_type = v3_get_cpu_type(V3_Get_CPU());
599 v3_remove_hypercall(vm, GUEST_INFO_HCALL);
603 #ifdef V3_CONFIG_SYMBIOTIC
604 v3_deinit_symbiotic_vm(vm);
611 case V3_SVM_REV3_CPU:
612 v3_deinit_svm_io_map(vm);
613 v3_deinit_svm_msr_map(vm);
619 case V3_VMX_EPT_UG_CPU:
620 v3_deinit_vmx_io_map(vm);
621 v3_deinit_vmx_msr_map(vm);
625 PrintError("Invalid CPU Type 0x%x\n", cpu_type);
629 v3_deinit_dev_mgr(vm);
631 v3_deinit_time_vm(vm);
633 v3_deinit_mem_hooks(vm);
634 v3_delete_mem_map(vm);
635 v3_deinit_shdw_impl(vm);
637 v3_deinit_intr_routers(vm);
638 v3_deinit_host_events(vm);
640 v3_deinit_cpuid_map(vm);
641 v3_deinit_msr_map(vm);
642 v3_deinit_io_map(vm);
643 v3_deinit_hypercall_map(vm);
645 #ifdef V3_CONFIG_TELEMETRY
646 v3_deinit_telemetry(vm);
655 int v3_init_core(struct guest_info * core) {
656 v3_cpu_arch_t cpu_type = v3_get_cpu_type(V3_Get_CPU());
657 struct v3_vm_info * vm = core->vm_info;
662 * Initialize the subsystem data strutures
664 #ifdef V3_CONFIG_TELEMETRY
665 v3_init_core_telemetry(core);
668 if (core->shdw_pg_mode == SHADOW_PAGING) {
669 v3_init_shdw_pg_state(core);
672 v3_init_time_core(core);
673 v3_init_intr_controllers(core);
674 v3_init_exception_state(core);
676 v3_init_decoder(core);
679 #ifdef V3_CONFIG_SYMBIOTIC
680 v3_init_symbiotic_core(core);
689 case V3_SVM_REV3_CPU:
690 if (v3_init_svm_vmcb(core, vm->vm_class) == -1) {
691 PrintError("Error in SVM initialization\n");
699 case V3_VMX_EPT_UG_CPU:
700 if (v3_init_vmx_vmcs(core, vm->vm_class) == -1) {
701 PrintError("Error in VMX initialization\n");
707 PrintError("Invalid CPU Type 0x%x\n", cpu_type);
716 int v3_free_core(struct guest_info * core) {
717 v3_cpu_arch_t cpu_type = v3_get_cpu_type(V3_Get_CPU());
720 #ifdef V3_CONFIG_SYMBIOTIC
721 v3_deinit_symbiotic_core(core);
724 v3_deinit_decoder(core);
726 v3_deinit_intr_controllers(core);
727 v3_deinit_time_core(core);
729 if (core->shdw_pg_mode == SHADOW_PAGING) {
730 v3_deinit_shdw_pg_state(core);
733 v3_free_passthrough_pts(core);
735 #ifdef V3_CONFIG_TELEMETRY
736 v3_deinit_core_telemetry(core);
742 case V3_SVM_REV3_CPU:
743 if (v3_deinit_svm_vmcb(core) == -1) {
744 PrintError("Error in SVM initialization\n");
752 case V3_VMX_EPT_UG_CPU:
753 if (v3_deinit_vmx_vmcs(core) == -1) {
754 PrintError("Error in VMX initialization\n");
760 PrintError("Invalid CPU Type 0x%x\n", cpu_type);