2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmm_debug.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_host_events.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_decoder.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_config.h>
29 #define PRINT_TELEMETRY 1
30 #define PRINT_CORE_STATE 2
31 #define PRINT_ARCH_STATE 3
33 #define PRINT_BACKTRACE 5
36 #define PRINT_ALL 100 // Absolutely everything
37 #define PRINT_STATE 101 // telemetry, core state, arch state
42 static int core_handler(struct guest_info * core, uint32_t cmd) {
46 #ifdef V3_CONFIG_TELEMETRY
48 v3_print_core_telemetry(core);
52 case PRINT_CORE_STATE:
53 v3_raise_barrier(core->vm_info, NULL);
55 v3_print_guest_state(core);
57 v3_lower_barrier(core->vm_info);
59 case PRINT_ARCH_STATE:
60 v3_raise_barrier(core->vm_info, NULL);
62 v3_print_arch_state(core);
64 v3_lower_barrier(core->vm_info);
67 v3_raise_barrier(core->vm_info, NULL);
71 v3_lower_barrier(core->vm_info);
74 v3_raise_barrier(core->vm_info, NULL);
76 v3_print_backtrace(core);
78 v3_lower_barrier(core->vm_info);
82 v3_raise_barrier(core->vm_info, NULL);
84 #ifdef V3_CONFIG_TELEMETRY
85 v3_print_core_telemetry(core);
87 v3_print_guest_state(core);
88 v3_print_arch_state(core);
90 v3_lower_barrier(core->vm_info);
99 static int evt_handler(struct v3_vm_info * vm, struct v3_debug_event * evt, void * priv_data) {
101 V3_Print("Debug Event Handler for core %d\n", evt->core_id);
103 if (evt->core_id == -1) {
105 for (i = 0; i < vm->num_cores; i++) {
106 core_handler(&(vm->cores[i]), evt->cmd);
109 return core_handler(&vm->cores[evt->core_id], evt->cmd);
117 int v3_init_vm_debugging(struct v3_vm_info * vm) {
118 v3_hook_host_event(vm, HOST_DEBUG_EVT,
119 V3_HOST_EVENT_HANDLER(evt_handler),
130 void v3_print_segments(struct v3_segments * segs) {
132 struct v3_segment * seg_ptr;
134 seg_ptr=(struct v3_segment *)segs;
136 char *seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
137 V3_Print("Segments\n");
139 for (i = 0; seg_names[i] != NULL; i++) {
141 V3_Print("\t%s: Sel=%x, base=%p, limit=%x (long_mode=%d, db=%d)\n", seg_names[i], seg_ptr[i].selector,
142 (void *)(addr_t)seg_ptr[i].base, seg_ptr[i].limit,
143 seg_ptr[i].long_mode, seg_ptr[i].db);
150 void v3_print_ctrl_regs(struct guest_info * core) {
151 struct v3_ctrl_regs * regs = &(core->ctrl_regs);
154 char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", "EFER", NULL};
157 reg_ptr = (v3_reg_t *)regs;
159 V3_Print("Ctrl Regs:\n");
161 for (i = 0; reg_names[i] != NULL; i++) {
162 V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
169 static int safe_gva_to_hva(struct guest_info * core, addr_t linear_addr, addr_t * host_addr) {
170 /* select the proper translation based on guest mode */
171 if (core->mem_mode == PHYSICAL_MEM) {
172 if (v3_gpa_to_hva(core, linear_addr, host_addr) == -1) return -1;
173 } else if (core->mem_mode == VIRTUAL_MEM) {
174 if (v3_gva_to_hva(core, linear_addr, host_addr) == -1) return -1;
179 static int v3_print_disassembly(struct guest_info * core) {
181 addr_t rip, rip_linear, rip_host;
183 /* we don't know where the instructions preceding RIP start, so we just take
184 * a guess and hope the instruction stream synced up with our disassembly
185 * some time before RIP; if it has not we correct RIP at that point
188 /* start disassembly 64 bytes before current RIP, continue 32 bytes after */
189 rip = (addr_t) core->rip - 64;
190 while ((int) (rip - core->rip) < 32) {
191 V3_Print("disassembly step\n");
193 /* always print RIP, even if the instructions before were bad */
194 if (!passed_rip && rip >= core->rip) {
195 if (rip != core->rip) {
196 V3_Print("***** bad disassembly up to this point *****\n");
202 /* look up host virtual address for this instruction */
203 rip_linear = get_addr_linear(core, rip, &(core->segments.cs));
204 if (safe_gva_to_hva(core, rip_linear, &rip_host) < 0) {
209 /* print disassembled instrcution (updates rip) */
210 if (v3_disasm(core, (void *) rip_host, &rip, rip == core->rip) < 0) {
222 void v3_print_guest_state(struct guest_info * core) {
223 addr_t linear_addr = 0;
225 V3_Print("RIP: %p\n", (void *)(addr_t)(core->rip));
226 linear_addr = get_addr_linear(core, core->rip, &(core->segments.cs));
227 V3_Print("RIP Linear: %p\n", (void *)linear_addr);
229 V3_Print("NumExits: %u\n", (uint32_t)core->num_exits);
231 V3_Print("IRQ STATE: started=%d, pending=%d\n",
232 core->intr_core_state.irq_started,
233 core->intr_core_state.irq_pending);
234 V3_Print("EXCP STATE: err_code_valid=%d, err_code=%x\n",
235 core->excp_state.excp_error_code_valid,
236 core->excp_state.excp_error_code);
239 v3_print_segments(&(core->segments));
240 v3_print_ctrl_regs(core);
242 if (core->shdw_pg_mode == SHADOW_PAGING) {
243 V3_Print("Shadow Paging Guest Registers:\n");
244 V3_Print("\tGuest CR0=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr0));
245 V3_Print("\tGuest CR3=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr3));
246 V3_Print("\tGuest EFER=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_efer.value));
251 v3_print_mem_map(core->vm_info);
253 v3_print_stack(core);
255 // v3_print_disassembly(core);
259 void v3_print_arch_state(struct guest_info * core) {
265 void v3_print_guest_state_all(struct v3_vm_info * vm) {
268 V3_Print("VM Core states for %s\n", vm->name);
270 for (i = 0; i < 80; i++) {
274 for (i = 0; i < vm->num_cores; i++) {
275 v3_print_guest_state(&vm->cores[i]);
278 for (i = 0; i < 80; i++) {
287 void v3_print_stack(struct guest_info * core) {
288 addr_t linear_addr = 0;
289 addr_t host_addr = 0;
291 v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
293 linear_addr = get_addr_linear(core, core->vm_regs.rsp, &(core->segments.ss));
295 V3_Print("Stack at %p:\n", (void *)linear_addr);
297 if (core->mem_mode == PHYSICAL_MEM) {
298 if (v3_gpa_to_hva(core, linear_addr, &host_addr) == -1) {
299 PrintError("Could not translate Stack address\n");
302 } else if (core->mem_mode == VIRTUAL_MEM) {
303 if (v3_gva_to_hva(core, linear_addr, &host_addr) == -1) {
304 PrintError("Could not translate Virtual Stack address\n");
309 V3_Print("Host Address of rsp = 0x%p\n", (void *)host_addr);
311 // We start i at one because the current stack pointer points to an unused stack element
312 for (i = 0; i <= 24; i++) {
314 if (cpu_mode == REAL) {
315 V3_Print("\t0x%.4x\n", *((uint16_t *)host_addr + (i * 2)));
316 } else if (cpu_mode == LONG) {
317 V3_Print("\t%p\n", (void *)*(addr_t *)(host_addr + (i * 8)));
320 V3_Print("\t0x%.8x\n", *(uint32_t *)(host_addr + (i * 4)));
327 void v3_print_backtrace(struct guest_info * core) {
330 v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
331 struct v3_cfg_file * system_map = v3_cfg_get_file(core->vm_info, "System.map");
333 V3_Print("Performing Backtrace for Core %d\n", core->vcpu_id);
334 V3_Print("\tRSP=%p, RBP=%p\n", (void *)core->vm_regs.rsp, (void *)core->vm_regs.rbp);
336 gla_rbp = get_addr_linear(core, core->vm_regs.rbp, &(core->segments.ss));
339 for (i = 0; i < 30; i++) {
342 char * sym_name = NULL;
345 if (core->mem_mode == PHYSICAL_MEM) {
346 if (v3_gpa_to_hva(core, gla_rbp, &hva_rbp) == -1) {
347 PrintError("Could not translate Stack address\n");
350 } else if (core->mem_mode == VIRTUAL_MEM) {
351 if (v3_gva_to_hva(core, gla_rbp, &hva_rbp) == -1) {
352 PrintError("Could not translate Virtual Stack address\n");
358 hva_rip = hva_rbp + v3_get_addr_width(core);
360 if (cpu_mode == REAL) {
361 rip_val = (addr_t)*(uint16_t *)hva_rip;
362 } else if (cpu_mode == LONG) {
363 rip_val = (addr_t)*(uint64_t *)hva_rip;
365 rip_val = (addr_t)*(uint32_t *)hva_rip;
369 char * tmp_ptr = system_map->data;
370 char * sym_ptr = NULL;
371 uint64_t file_offset = 0;
372 uint64_t sym_offset = 0;
374 while (file_offset < system_map->size) {
375 sym_offset = strtox(tmp_ptr, &tmp_ptr);
377 tmp_ptr += 3; // pass over symbol type
379 if (sym_offset > rip_val) {
380 char * end_ptr = strchr(sym_ptr, '\n');
383 *end_ptr = 0; // null terminate symbol...
392 char * end_ptr2 = strchr(tmp_ptr, '\n');
395 tmp_ptr += strlen(tmp_ptr) + 1;
397 tmp_ptr = end_ptr2 + 1;
407 if (cpu_mode == REAL) {
408 V3_Print("Next RBP=0x%.4x, RIP=0x%.4x (%s)\n",
409 *(uint16_t *)hva_rbp,*(uint16_t *)hva_rip,
412 gla_rbp = *(uint16_t *)hva_rbp;
413 } else if (cpu_mode == LONG) {
414 V3_Print("Next RBP=%p, RIP=%p (%s)\n",
415 (void *)*(uint64_t *)hva_rbp, (void *)*(uint64_t *)hva_rip,
417 gla_rbp = *(uint64_t *)hva_rbp;
419 V3_Print("Next RBP=0x%.8x, RIP=0x%.8x (%s)\n",
420 *(uint32_t *)hva_rbp, *(uint32_t *)hva_rip,
422 gla_rbp = *(uint32_t *)hva_rbp;
431 void v3_print_GPRs(struct guest_info * core) {
432 struct v3_gprs * regs = &(core->vm_regs);
435 char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", NULL};
437 reg_ptr = (v3_reg_t *)regs;
439 V3_Print("32 bit GPRs:\n");
441 for (i = 0; reg_names[i] != NULL; i++) {
442 V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
448 void v3_print_GPRs(struct guest_info * core) {
449 struct v3_gprs * regs = &(core->vm_regs);
452 char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", \
453 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", NULL};
455 reg_ptr = (v3_reg_t *)regs;
457 V3_Print("64 bit GPRs:\n");
459 for (i = 0; reg_names[i] != NULL; i++) {
460 V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));