2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 #include <palacios/vm_guest.h>
24 #include <palacios/vmm_ctrl_regs.h>
25 #include <palacios/vmm.h>
26 #include <palacios/vmm_decoder.h>
27 #include <palacios/vmcb.h>
28 #include <palacios/vm_guest_mem.h>
29 #include <palacios/vmm_lowlevel.h>
30 #include <palacios/vmm_sprintf.h>
31 #include <palacios/vmm_muxer.h>
34 v3_cpu_mode_t v3_get_vm_cpu_mode(struct guest_info * info) {
36 struct efer_64 * efer;
37 struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
38 struct v3_segment * cs = &(info->segments.cs);
39 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
41 if (info->shdw_pg_mode == SHADOW_PAGING) {
42 cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
43 efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
44 } else if (info->shdw_pg_mode == NESTED_PAGING) {
45 cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
46 efer = (struct efer_64 *)&(guest_state->efer);
48 PrintError("Invalid Paging Mode...\n");
55 } else if ((cr4->pae == 0) && (efer->lme == 0)) {
57 } else if (efer->lme == 0) {
59 } else if ((efer->lme == 1) && (cs->long_mode == 1)) {
62 // What about LONG_16_COMPAT???
63 return LONG_32_COMPAT;
67 // Get address width in bytes
68 uint_t v3_get_addr_width(struct guest_info * info) {
70 struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
71 struct efer_64 * efer;
72 struct v3_segment * cs = &(info->segments.cs);
73 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
75 if (info->shdw_pg_mode == SHADOW_PAGING) {
76 cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
77 efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
78 } else if (info->shdw_pg_mode == NESTED_PAGING) {
79 cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
80 efer = (struct efer_64 *)&(guest_state->efer);
82 PrintError("Invalid Paging Mode...\n");
89 } else if ((cr4->pae == 0) && (efer->lme == 0)) {
91 } else if (efer->lme == 0) {
93 } else if ((efer->lme == 1) && (cs->long_mode == 1)) {
96 // What about LONG_16_COMPAT???
102 static const uchar_t REAL_STR[] = "Real";
103 static const uchar_t PROTECTED_STR[] = "Protected";
104 static const uchar_t PROTECTED_PAE_STR[] = "Protected+PAE";
105 static const uchar_t LONG_STR[] = "Long";
106 static const uchar_t LONG_32_COMPAT_STR[] = "32bit Compat";
107 static const uchar_t LONG_16_COMPAT_STR[] = "16bit Compat";
109 const uchar_t * v3_cpu_mode_to_str(v3_cpu_mode_t mode) {
114 return PROTECTED_STR;
116 return PROTECTED_PAE_STR;
120 return LONG_32_COMPAT_STR;
122 return LONG_16_COMPAT_STR;
128 v3_mem_mode_t v3_get_vm_mem_mode(struct guest_info * info) {
131 if (info->shdw_pg_mode == SHADOW_PAGING) {
132 cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
133 } else if (info->shdw_pg_mode == NESTED_PAGING) {
134 cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
136 PrintError("Invalid Paging Mode...\n");
148 static const uchar_t PHYS_MEM_STR[] = "Physical Memory";
149 static const uchar_t VIRT_MEM_STR[] = "Virtual Memory";
151 const uchar_t * v3_mem_mode_to_str(v3_mem_mode_t mode) {
163 void v3_print_segments(struct v3_segments * segs) {
165 struct v3_segment * seg_ptr;
167 seg_ptr=(struct v3_segment *)segs;
169 char *seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
170 V3_Print("Segments\n");
172 for (i = 0; seg_names[i] != NULL; i++) {
174 V3_Print("\t%s: Sel=%x, base=%p, limit=%x (long_mode=%d, db=%d)\n", seg_names[i], seg_ptr[i].selector,
175 (void *)(addr_t)seg_ptr[i].base, seg_ptr[i].limit,
176 seg_ptr[i].long_mode, seg_ptr[i].db);
182 // We don't handle those fancy 64 bit system segments...
184 int v3_translate_segment(struct guest_info * info, uint16_t selector, struct v3_segment * seg) {
185 struct v3_segment * gdt = &(info->segments.gdtr);
187 uint16_t seg_offset = (selector & ~0x7);
189 struct gen_segment * gen_seg = NULL;
190 struct seg_selector sel;
192 memset(seg, 0, sizeof(struct v3_segment));
194 sel.value = selector;
197 PrintError("LDT translations not supported\n");
201 if (v3_gva_to_hva(info, gdt->base, &gdt_addr) == -1) {
202 PrintError("Unable to translate GDT address\n");
206 seg_addr = gdt_addr + seg_offset;
207 gen_seg = (struct gen_segment *)seg_addr;
210 seg->selector = selector;
212 seg->limit = gen_seg->limit_hi;
214 seg->limit += gen_seg->limit_lo;
216 seg->base = gen_seg->base_hi;
218 seg->base += gen_seg->base_lo;
220 if (gen_seg->granularity == 1) {
225 seg->type = gen_seg->type;
226 seg->system = gen_seg->system;
227 seg->dpl = gen_seg->dpl;
228 seg->present = gen_seg->present;
229 seg->avail = gen_seg->avail;
230 seg->long_mode = gen_seg->long_mode;
231 seg->db = gen_seg->db;
232 seg->granularity = gen_seg->granularity;
240 void v3_print_ctrl_regs(struct guest_info * info) {
241 struct v3_ctrl_regs * regs = &(info->ctrl_regs);
244 char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", NULL};
245 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(info->vmm_data);
247 reg_ptr = (v3_reg_t *)regs;
249 V3_Print("32 bit Ctrl Regs:\n");
251 for (i = 0; reg_names[i] != NULL; i++) {
252 V3_Print("\t%s=0x%p\n", reg_names[i], (void *)(addr_t)reg_ptr[i]);
255 V3_Print("\tEFER=0x%p\n", (void*)(addr_t)(guest_state->efer));
260 void v3_print_guest_state(struct guest_info * info) {
261 addr_t linear_addr = 0;
263 V3_Print("RIP: %p\n", (void *)(addr_t)(info->rip));
264 linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
265 V3_Print("RIP Linear: %p\n", (void *)linear_addr);
267 V3_Print("NumExits: %u\n", (uint32_t)info->num_exits);
269 v3_print_segments(&(info->segments));
270 v3_print_ctrl_regs(info);
272 if (info->shdw_pg_mode == SHADOW_PAGING) {
273 V3_Print("Shadow Paging Guest Registers:\n");
274 V3_Print("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
275 V3_Print("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
276 V3_Print("\tGuest EFER=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_efer.value));
281 v3_print_mem_map(info->vm_info);
283 v3_print_stack(info);
287 void v3_print_stack(struct guest_info * info) {
288 addr_t linear_addr = 0;
289 addr_t host_addr = 0;
291 v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(info);
294 linear_addr = get_addr_linear(info, info->vm_regs.rsp, &(info->segments.ss));
296 V3_Print("Stack at %p:\n", (void *)linear_addr);
298 if (info->mem_mode == PHYSICAL_MEM) {
299 if (v3_gpa_to_hva(info, linear_addr, &host_addr) == -1) {
300 PrintError("Could not translate Stack address\n");
303 } else if (info->mem_mode == VIRTUAL_MEM) {
304 if (v3_gva_to_hva(info, linear_addr, &host_addr) == -1) {
305 PrintError("Could not translate Virtual Stack address\n");
310 V3_Print("Host Address of rsp = 0x%p\n", (void *)host_addr);
312 // We start i at one because the current stack pointer points to an unused stack element
313 for (i = 0; i <= 24; i++) {
314 if (cpu_mode == LONG) {
315 V3_Print("\t%p\n", (void *)*(addr_t *)(host_addr + (i * 8)));
316 } else if (cpu_mode == REAL) {
317 V3_Print("Don't currently handle 16 bit stacks... \n");
320 V3_Print("\t%.8x\n", *(uint32_t *)(host_addr + (i * 4)));
328 void v3_print_GPRs(struct guest_info * info) {
329 struct v3_gprs * regs = &(info->vm_regs);
332 char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", NULL};
334 reg_ptr= (v3_reg_t *)regs;
336 V3_Print("32 bit GPRs:\n");
338 for (i = 0; reg_names[i] != NULL; i++) {
339 V3_Print("\t%s=0x%p\n", reg_names[i], (void *)(addr_t)reg_ptr[i]);
345 void v3_print_GPRs(struct guest_info * info) {
346 struct v3_gprs * regs = &(info->vm_regs);
349 char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", \
350 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", NULL};
352 reg_ptr = (v3_reg_t *)regs;
354 V3_Print("64 bit GPRs:\n");
356 for (i = 0; reg_names[i] != NULL; i++) {
357 V3_Print("\t%s=0x%p\n", reg_names[i], (void *)(addr_t)reg_ptr[i]);
364 #include <palacios/vmcs.h>
365 #include <palacios/vmcb.h>
366 static int info_hcall(struct guest_info * core, uint_t hcall_id, void * priv_data) {
367 v3_cpu_arch_t cpu_type = v3_get_cpu_type(V3_Get_CPU());
370 v3_print_guest_state(core);
374 if ((cpu_type == V3_SVM_CPU) || (cpu_type == V3_SVM_REV3_CPU)) {
376 PrintDebugVMCB((vmcb_t *)(core->vmm_data));
380 if ((cpu_type == V3_VMX_CPU) || (cpu_type == V3_VMX_EPT_CPU)) {
386 PrintError("Invalid CPU Type 0x%x\n", cpu_type);
396 #include <palacios/svm.h>
397 #include <palacios/svm_io.h>
398 #include <palacios/svm_msr.h>
402 #include <palacios/vmx.h>
403 #include <palacios/vmx_io.h>
404 #include <palacios/vmx_msr.h>
408 int v3_init_vm(struct v3_vm_info * vm) {
409 v3_cpu_arch_t cpu_type = v3_get_cpu_type(V3_Get_CPU());
412 if (v3_get_foreground_vm() == NULL) {
413 v3_set_foreground_vm(vm);
416 #ifdef CONFIG_TELEMETRY
417 v3_init_telemetry(vm);
420 v3_init_hypercall_map(vm);
423 v3_init_cpuid_map(vm);
424 v3_init_host_events(vm);
425 v3_init_intr_routers(vm);
427 // Initialize the memory map
428 if (v3_init_mem_map(vm) == -1) {
429 PrintError("Could not initialize shadow map\n");
433 v3_init_mem_hooks(vm);
435 if (v3_init_shdw_impl(vm) == -1) {
436 PrintError("VM initialization error in shadow implementaion\n");
442 #ifdef CONFIG_SYMBIOTIC
443 v3_init_symbiotic_vm(vm);
451 if ((cpu_type == V3_SVM_CPU) || (cpu_type == V3_SVM_REV3_CPU)) {
452 v3_init_svm_io_map(vm);
453 v3_init_svm_msr_map(vm);
458 if ((cpu_type == V3_VMX_CPU) || (cpu_type == V3_VMX_EPT_CPU)) {
459 v3_init_vmx_io_map(vm);
460 v3_init_vmx_msr_map(vm);
465 PrintError("Invalid CPU Type 0x%x\n", cpu_type);
469 v3_register_hypercall(vm, GUEST_INFO_HCALL, info_hcall, NULL);
471 V3_Print("GUEST_INFO_HCALL=%x\n", GUEST_INFO_HCALL);
476 int v3_init_core(struct guest_info * core) {
477 v3_cpu_arch_t cpu_type = v3_get_cpu_type(V3_Get_CPU());
478 struct v3_vm_info * vm = core->vm_info;
481 * Initialize the subsystem data strutures
483 #ifdef CONFIG_TELEMETRY
484 v3_init_core_telemetry(core);
487 if (core->shdw_pg_mode == SHADOW_PAGING) {
488 v3_init_shdw_pg_state(core);
492 v3_init_intr_controllers(core);
493 v3_init_exception_state(core);
495 v3_init_decoder(core);
498 #ifdef CONFIG_SYMBIOTIC
499 v3_init_symbiotic_core(core);
508 case V3_SVM_REV3_CPU:
509 if (v3_init_svm_vmcb(core, vm->vm_class) == -1) {
510 PrintError("Error in SVM initialization\n");
518 if (v3_init_vmx_vmcs(core, vm->vm_class) == -1) {
519 PrintError("Error in VMX initialization\n");
525 PrintError("Invalid CPU Type 0x%x\n", cpu_type);