2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 #include <palacios/vm_guest.h>
24 #include <palacios/vmm_ctrl_regs.h>
25 #include <palacios/vmm.h>
26 #include <palacios/vmm_decoder.h>
27 #include <palacios/vmcb.h>
28 #include <palacios/vm_guest_mem.h>
29 #include <palacios/vmm_lowlevel.h>
30 #include <palacios/vmm_sprintf.h>
31 #include <palacios/vmm_xed.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <palacios/vmm_barrier.h>
36 v3_cpu_mode_t v3_get_vm_cpu_mode(struct guest_info * info) {
38 struct efer_64 * efer;
39 struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
40 struct v3_segment * cs = &(info->segments.cs);
43 if (info->shdw_pg_mode == SHADOW_PAGING) {
44 cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
45 efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
46 } else if (info->shdw_pg_mode == NESTED_PAGING) {
47 cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
48 efer = (struct efer_64 *)&(info->ctrl_regs.efer);
50 PrintError("Invalid Paging Mode...\n");
57 } else if ((cr4->pae == 0) && (efer->lme == 0)) {
59 } else if (efer->lme == 0) {
61 } else if ((efer->lme == 1) && (cs->long_mode == 1)) {
64 // What about LONG_16_COMPAT???
65 return LONG_32_COMPAT;
69 // Get address width in bytes
70 uint_t v3_get_addr_width(struct guest_info * info) {
72 struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
73 struct efer_64 * efer;
74 struct v3_segment * cs = &(info->segments.cs);
77 if (info->shdw_pg_mode == SHADOW_PAGING) {
78 cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
79 efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
80 } else if (info->shdw_pg_mode == NESTED_PAGING) {
81 cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
82 efer = (struct efer_64 *)&(info->ctrl_regs.efer);
84 PrintError("Invalid Paging Mode...\n");
91 } else if ((cr4->pae == 0) && (efer->lme == 0)) {
93 } else if (efer->lme == 0) {
95 } else if ((efer->lme == 1) && (cs->long_mode == 1)) {
98 // What about LONG_16_COMPAT???
104 static const uchar_t REAL_STR[] = "Real";
105 static const uchar_t PROTECTED_STR[] = "Protected";
106 static const uchar_t PROTECTED_PAE_STR[] = "Protected+PAE";
107 static const uchar_t LONG_STR[] = "Long";
108 static const uchar_t LONG_32_COMPAT_STR[] = "32bit Compat";
109 static const uchar_t LONG_16_COMPAT_STR[] = "16bit Compat";
111 const uchar_t * v3_cpu_mode_to_str(v3_cpu_mode_t mode) {
116 return PROTECTED_STR;
118 return PROTECTED_PAE_STR;
122 return LONG_32_COMPAT_STR;
124 return LONG_16_COMPAT_STR;
130 v3_mem_mode_t v3_get_vm_mem_mode(struct guest_info * info) {
133 if (info->shdw_pg_mode == SHADOW_PAGING) {
134 cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
135 } else if (info->shdw_pg_mode == NESTED_PAGING) {
136 cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
138 PrintError("Invalid Paging Mode...\n");
150 static const uchar_t PHYS_MEM_STR[] = "Physical Memory";
151 static const uchar_t VIRT_MEM_STR[] = "Virtual Memory";
153 const uchar_t * v3_mem_mode_to_str(v3_mem_mode_t mode) {
165 void v3_print_segments(struct v3_segments * segs) {
167 struct v3_segment * seg_ptr;
169 seg_ptr=(struct v3_segment *)segs;
171 char *seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
172 V3_Print("Segments\n");
174 for (i = 0; seg_names[i] != NULL; i++) {
176 V3_Print("\t%s: Sel=%x, base=%p, limit=%x (long_mode=%d, db=%d)\n", seg_names[i], seg_ptr[i].selector,
177 (void *)(addr_t)seg_ptr[i].base, seg_ptr[i].limit,
178 seg_ptr[i].long_mode, seg_ptr[i].db);
184 // We don't handle those fancy 64 bit system segments...
186 int v3_translate_segment(struct guest_info * info, uint16_t selector, struct v3_segment * seg) {
187 struct v3_segment * gdt = &(info->segments.gdtr);
189 uint16_t seg_offset = (selector & ~0x7);
191 struct gen_segment * gen_seg = NULL;
192 struct seg_selector sel;
194 memset(seg, 0, sizeof(struct v3_segment));
196 sel.value = selector;
199 PrintError("LDT translations not supported\n");
203 if (v3_gva_to_hva(info, gdt->base, &gdt_addr) == -1) {
204 PrintError("Unable to translate GDT address\n");
208 seg_addr = gdt_addr + seg_offset;
209 gen_seg = (struct gen_segment *)seg_addr;
212 seg->selector = selector;
214 seg->limit = gen_seg->limit_hi;
216 seg->limit += gen_seg->limit_lo;
218 seg->base = gen_seg->base_hi;
220 seg->base += gen_seg->base_lo;
222 if (gen_seg->granularity == 1) {
227 seg->type = gen_seg->type;
228 seg->system = gen_seg->system;
229 seg->dpl = gen_seg->dpl;
230 seg->present = gen_seg->present;
231 seg->avail = gen_seg->avail;
232 seg->long_mode = gen_seg->long_mode;
233 seg->db = gen_seg->db;
234 seg->granularity = gen_seg->granularity;
242 void v3_print_ctrl_regs(struct guest_info * info) {
243 struct v3_ctrl_regs * regs = &(info->ctrl_regs);
246 char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", "EFER", NULL};
249 reg_ptr = (v3_reg_t *)regs;
251 V3_Print("Ctrl Regs:\n");
253 for (i = 0; reg_names[i] != NULL; i++) {
254 V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
261 static int safe_gva_to_hva(struct guest_info * info, addr_t linear_addr, addr_t * host_addr) {
262 /* select the proper translation based on guest mode */
263 if (info->mem_mode == PHYSICAL_MEM) {
264 if (v3_gpa_to_hva(info, linear_addr, host_addr) == -1) return -1;
265 } else if (info->mem_mode == VIRTUAL_MEM) {
266 if (v3_gva_to_hva(info, linear_addr, host_addr) == -1) return -1;
271 static int v3_print_disassembly(struct guest_info * info) {
273 addr_t rip, rip_linear, rip_host;
275 /* we don't know where the instructions preceding RIP start, so we just take
276 * a guess and hope the instruction stream synced up with our disassembly
277 * some time before RIP; if it has not we correct RIP at that point
280 /* start disassembly 64 bytes before current RIP, continue 32 bytes after */
281 rip = (addr_t) info->rip - 64;
282 while ((int) (rip - info->rip) < 32) {
283 V3_Print("disassembly step\n");
285 /* always print RIP, even if the instructions before were bad */
286 if (!passed_rip && rip >= info->rip) {
287 if (rip != info->rip) {
288 V3_Print("***** bad disassembly up to this point *****\n");
294 /* look up host virtual address for this instruction */
295 rip_linear = get_addr_linear(info, rip, &(info->segments.cs));
296 if (safe_gva_to_hva(info, rip_linear, &rip_host) < 0) {
301 /* print disassembled instrcution (updates rip) */
302 if (v3_disasm(info, (void *) rip_host, &rip, rip == info->rip) < 0) {
314 void v3_print_guest_state(struct guest_info * info) {
315 addr_t linear_addr = 0;
317 V3_Print("RIP: %p\n", (void *)(addr_t)(info->rip));
318 linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
319 V3_Print("RIP Linear: %p\n", (void *)linear_addr);
321 V3_Print("NumExits: %u\n", (uint32_t)info->num_exits);
323 V3_Print("IRQ STATE: started=%d, pending=%d\n",
324 info->intr_core_state.irq_started,
325 info->intr_core_state.irq_pending);
326 V3_Print("EXCP STATE: err_code_valid=%d, err_code=%x\n",
327 info->excp_state.excp_error_code_valid,
328 info->excp_state.excp_error_code);
331 v3_print_segments(&(info->segments));
332 v3_print_ctrl_regs(info);
334 if (info->shdw_pg_mode == SHADOW_PAGING) {
335 V3_Print("Shadow Paging Guest Registers:\n");
336 V3_Print("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
337 V3_Print("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
338 V3_Print("\tGuest EFER=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_efer.value));
343 v3_print_mem_map(info->vm_info);
345 v3_print_stack(info);
347 // v3_print_disassembly(info);
350 void v3_print_guest_state_all(struct v3_vm_info * vm) {
353 V3_Print("VM Core states for %s\n", vm->name);
355 for (i = 0; i < 80; i++) {
359 for (i = 0; i < vm->num_cores; i++) {
360 v3_print_guest_state(&vm->cores[i]);
363 for (i = 0; i < 80; i++) {
370 static void print_real_mode_stack(struct guest_info *info)
379 ss = info->segments.ss.selector & 0xffff;
380 sp = info->vm_regs.rsp & 0xffff;
381 addr = (((uint32_t)ss)<<4) + sp;
384 V3_Print("Real Mode Stack starting at 0x%x:0x%x (0x%p):\n",ss,sp,(void*)addr);
386 if (info->mem_mode!=PHYSICAL_MEM) {
387 PrintError("Cannot print real mode stack when virtual memory active\n");
391 for (i=0;i<=24;i++,sp+=2) {
392 // note that it's correct for this to wrap around
393 addr = (((uint32_t)ss)<<4) + sp;
394 if (v3_gpa_to_hva(info,addr,&host_addr)) {
395 PrintError("Could not translate physical stack address 0x%p\n",(void*)addr);
398 V3_Print("\t0x%.4x\n",*((uint16_t*)host_addr));
405 void v3_print_stack(struct guest_info * info) {
406 addr_t linear_addr = 0;
407 addr_t host_addr = 0;
409 v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(info);
412 if (cpu_mode==REAL) {
413 print_real_mode_stack(info);
417 // protected mode, 32 or 64 bit
419 linear_addr = get_addr_linear(info, info->vm_regs.rsp, &(info->segments.ss));
421 V3_Print("Stack at %p:\n", (void *)linear_addr);
423 if (info->mem_mode == PHYSICAL_MEM) {
424 if (v3_gpa_to_hva(info, linear_addr, &host_addr) == -1) {
425 PrintError("Could not translate Stack address\n");
428 } else if (info->mem_mode == VIRTUAL_MEM) {
429 if (v3_gva_to_hva(info, linear_addr, &host_addr) == -1) {
430 PrintError("Could not translate Virtual Stack address\n");
435 V3_Print("Host Address of rsp = 0x%p\n", (void *)host_addr);
437 // We start i at one because the current stack pointer points to an unused stack element
438 for (i = 0; i <= 24; i++) {
439 if (cpu_mode == LONG) {
440 V3_Print("\t%p\n", (void *)*(addr_t *)(host_addr + (i * 8)));
443 V3_Print("\t%.8x\n", *(uint32_t *)(host_addr + (i * 4)));
451 void v3_print_GPRs(struct guest_info * info) {
452 struct v3_gprs * regs = &(info->vm_regs);
455 char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", NULL};
457 reg_ptr = (v3_reg_t *)regs;
459 V3_Print("32 bit GPRs:\n");
461 for (i = 0; reg_names[i] != NULL; i++) {
462 V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
468 void v3_print_GPRs(struct guest_info * info) {
469 struct v3_gprs * regs = &(info->vm_regs);
472 char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", \
473 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", NULL};
475 reg_ptr = (v3_reg_t *)regs;
477 V3_Print("64 bit GPRs:\n");
479 for (i = 0; reg_names[i] != NULL; i++) {
480 V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
487 #include <palacios/vmcs.h>
488 #include <palacios/vmcb.h>
489 static int info_hcall(struct guest_info * core, uint_t hcall_id, void * priv_data) {
490 extern v3_cpu_arch_t v3_mach_type;
493 V3_Print("************** Guest State ************\n");
494 v3_print_guest_state(core);
498 if ((v3_mach_type == V3_SVM_CPU) || (v3_mach_type == V3_SVM_REV3_CPU)) {
500 PrintDebugVMCB((vmcb_t *)(core->vmm_data));
504 if ((v3_mach_type == V3_VMX_CPU) || (v3_mach_type == V3_VMX_EPT_CPU) || (v3_mach_type == V3_VMX_EPT_UG_CPU)) {
510 PrintError("Invalid CPU Type 0x%x\n", v3_mach_type);
520 #include <palacios/svm.h>
521 #include <palacios/svm_io.h>
522 #include <palacios/svm_msr.h>
526 #include <palacios/vmx.h>
527 #include <palacios/vmx_io.h>
528 #include <palacios/vmx_msr.h>
532 int v3_init_vm(struct v3_vm_info * vm) {
533 extern v3_cpu_arch_t v3_mach_type;
537 #ifdef V3_CONFIG_TELEMETRY
538 v3_init_telemetry(vm);
541 v3_init_hypercall_map(vm);
544 v3_init_cpuid_map(vm);
545 v3_init_host_events(vm);
546 v3_init_intr_routers(vm);
547 v3_init_ext_manager(vm);
551 // Initialize the memory map
552 if (v3_init_mem_map(vm) == -1) {
553 PrintError("Could not initialize shadow map\n");
557 v3_init_mem_hooks(vm);
559 if (v3_init_shdw_impl(vm) == -1) {
560 PrintError("VM initialization error in shadow implementaion\n");
568 #ifdef V3_CONFIG_SYMBIOTIC
569 v3_init_symbiotic_vm(vm);
576 switch (v3_mach_type) {
579 case V3_SVM_REV3_CPU:
580 v3_init_svm_io_map(vm);
581 v3_init_svm_msr_map(vm);
587 case V3_VMX_EPT_UG_CPU:
588 v3_init_vmx_io_map(vm);
589 v3_init_vmx_msr_map(vm);
593 PrintError("Invalid CPU Type 0x%x\n", v3_mach_type);
597 v3_register_hypercall(vm, GUEST_INFO_HCALL, info_hcall, NULL);
599 V3_Print("GUEST_INFO_HCALL=%x\n", GUEST_INFO_HCALL);
605 int v3_free_vm_internal(struct v3_vm_info * vm) {
606 extern v3_cpu_arch_t v3_mach_type;
608 v3_remove_hypercall(vm, GUEST_INFO_HCALL);
612 #ifdef V3_CONFIG_SYMBIOTIC
613 v3_deinit_symbiotic_vm(vm);
617 switch (v3_mach_type) {
620 case V3_SVM_REV3_CPU:
621 v3_deinit_svm_io_map(vm);
622 v3_deinit_svm_msr_map(vm);
628 case V3_VMX_EPT_UG_CPU:
629 v3_deinit_vmx_io_map(vm);
630 v3_deinit_vmx_msr_map(vm);
634 PrintError("Invalid CPU Type 0x%x\n", v3_mach_type);
638 v3_deinit_dev_mgr(vm);
640 v3_deinit_time_vm(vm);
642 v3_deinit_mem_hooks(vm);
643 v3_delete_mem_map(vm);
644 v3_deinit_shdw_impl(vm);
646 v3_deinit_intr_routers(vm);
647 v3_deinit_host_events(vm);
649 v3_deinit_barrier(vm);
651 v3_deinit_cpuid_map(vm);
652 v3_deinit_msr_map(vm);
653 v3_deinit_io_map(vm);
654 v3_deinit_hypercall_map(vm);
656 #ifdef V3_CONFIG_TELEMETRY
657 v3_deinit_telemetry(vm);
666 int v3_init_core(struct guest_info * core) {
667 extern v3_cpu_arch_t v3_mach_type;
668 struct v3_vm_info * vm = core->vm_info;
673 * Initialize the subsystem data strutures
675 #ifdef V3_CONFIG_TELEMETRY
676 v3_init_core_telemetry(core);
679 if (core->shdw_pg_mode == SHADOW_PAGING) {
680 v3_init_shdw_pg_state(core);
683 v3_init_time_core(core);
684 v3_init_intr_controllers(core);
685 v3_init_exception_state(core);
687 v3_init_decoder(core);
690 #ifdef V3_CONFIG_SYMBIOTIC
691 v3_init_symbiotic_core(core);
697 switch (v3_mach_type) {
700 case V3_SVM_REV3_CPU:
701 if (v3_init_svm_vmcb(core, vm->vm_class) == -1) {
702 PrintError("Error in SVM initialization\n");
710 case V3_VMX_EPT_UG_CPU:
711 if (v3_init_vmx_vmcs(core, vm->vm_class) == -1) {
712 PrintError("Error in VMX initialization\n");
718 PrintError("Invalid CPU Type 0x%x\n", v3_mach_type);
727 int v3_free_core(struct guest_info * core) {
728 extern v3_cpu_arch_t v3_mach_type;
731 #ifdef V3_CONFIG_SYMBIOTIC
732 v3_deinit_symbiotic_core(core);
735 v3_deinit_decoder(core);
737 v3_deinit_intr_controllers(core);
738 v3_deinit_time_core(core);
740 if (core->shdw_pg_mode == SHADOW_PAGING) {
741 v3_deinit_shdw_pg_state(core);
744 v3_free_passthrough_pts(core);
746 #ifdef V3_CONFIG_TELEMETRY
747 v3_deinit_core_telemetry(core);
750 switch (v3_mach_type) {
753 case V3_SVM_REV3_CPU:
754 if (v3_deinit_svm_vmcb(core) == -1) {
755 PrintError("Error in SVM initialization\n");
763 case V3_VMX_EPT_UG_CPU:
764 if (v3_deinit_vmx_vmcs(core) == -1) {
765 PrintError("Error in VMX initialization\n");
771 PrintError("Invalid CPU Type 0x%x\n", v3_mach_type);