2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmm_debug.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_host_events.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_decoder.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_config.h>
29 #define PRINT_TELEMETRY 1
30 #define PRINT_CORE_STATE 2
31 #define PRINT_ARCH_STATE 3
33 #define PRINT_BACKTRACE 5
36 #define PRINT_ALL 100 // Absolutely everything
37 #define PRINT_STATE 101 // telemetry, core state, arch state
42 static int core_handler(struct guest_info * core, uint32_t cmd) {
46 #ifdef V3_CONFIG_TELEMETRY
48 v3_print_core_telemetry(core);
52 case PRINT_CORE_STATE:
53 v3_raise_barrier(core->vm_info, NULL);
55 v3_print_guest_state(core);
57 v3_lower_barrier(core->vm_info);
59 case PRINT_ARCH_STATE:
60 v3_raise_barrier(core->vm_info, NULL);
62 v3_print_arch_state(core);
64 v3_lower_barrier(core->vm_info);
67 v3_raise_barrier(core->vm_info, NULL);
71 v3_lower_barrier(core->vm_info);
74 v3_raise_barrier(core->vm_info, NULL);
76 v3_print_backtrace(core);
78 v3_lower_barrier(core->vm_info);
82 v3_raise_barrier(core->vm_info, NULL);
84 #ifdef V3_CONFIG_TELEMETRY
85 v3_print_core_telemetry(core);
87 v3_print_guest_state(core);
88 v3_print_arch_state(core);
90 v3_lower_barrier(core->vm_info);
94 v3_raise_barrier(core->vm_info, NULL);
96 #ifdef V3_CONFIG_TELEMETRY
97 v3_print_core_telemetry(core);
99 v3_print_guest_state(core);
100 v3_print_arch_state(core);
101 v3_print_stack(core);
102 v3_print_backtrace(core);
104 v3_lower_barrier(core->vm_info);
113 static int evt_handler(struct v3_vm_info * vm, struct v3_debug_event * evt, void * priv_data) {
115 V3_Print(vm, VCORE_NONE,"Debug Event Handler for core %d cmd=%x\n", evt->core_id, evt->cmd);
117 if (evt->core_id == -1) {
119 for (i = 0; i < vm->num_cores; i++) {
120 core_handler(&(vm->cores[i]), evt->cmd);
123 return core_handler(&vm->cores[evt->core_id], evt->cmd);
131 int v3_init_vm_debugging(struct v3_vm_info * vm) {
132 v3_hook_host_event(vm, HOST_DEBUG_EVT,
133 V3_HOST_EVENT_HANDLER(evt_handler),
144 void v3_print_segments(struct v3_segments * segs) {
146 struct v3_segment * seg_ptr;
148 seg_ptr=(struct v3_segment *)segs;
150 char *seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
151 V3_Print(VM_NONE, VCORE_NONE, "Segments\n");
153 for (i = 0; seg_names[i] != NULL; i++) {
155 V3_Print(VM_NONE, VCORE_NONE, "\t%s: selector=0x%x, base=%p, limit=0x%x type=0x%x system=0x%x dpl=0x%x present=0x%x avail=0x%x long_mode=0x%x db=0x%x granularity=0x%x unusable=0x%x\n",
158 (void *)(addr_t)seg_ptr[i].base,
165 seg_ptr[i].long_mode,
167 seg_ptr[i].granularity,
168 seg_ptr[i].unusable);
174 void v3_print_ctrl_regs(struct guest_info * core) {
175 struct v3_ctrl_regs * regs = &(core->ctrl_regs);
178 char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", "EFER", NULL};
181 reg_ptr = (v3_reg_t *)regs;
183 V3_Print(core->vm_info, core,"Ctrl Regs:\n");
185 for (i = 0; reg_names[i] != NULL; i++) {
186 V3_Print(core->vm_info, core, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
193 static int safe_gva_to_hva(struct guest_info * core, addr_t linear_addr, addr_t * host_addr) {
194 /* select the proper translation based on guest mode */
195 if (core->mem_mode == PHYSICAL_MEM) {
196 if (v3_gpa_to_hva(core, linear_addr, host_addr) == -1) return -1;
197 } else if (core->mem_mode == VIRTUAL_MEM) {
198 if (v3_gva_to_hva(core, linear_addr, host_addr) == -1) return -1;
203 static int v3_print_disassembly(struct guest_info * core) {
205 addr_t rip, rip_linear, rip_host;
207 /* we don't know where the instructions preceding RIP start, so we just take
208 * a guess and hope the instruction stream synced up with our disassembly
209 * some time before RIP; if it has not we correct RIP at that point
212 /* start disassembly 64 bytes before current RIP, continue 32 bytes after */
213 rip = (addr_t) core->rip - 64;
214 while ((int) (rip - core->rip) < 32) {
215 V3_Print(info->vm_info, info, "disassembly step\n");
217 /* always print RIP, even if the instructions before were bad */
218 if (!passed_rip && rip >= core->rip) {
219 if (rip != core->rip) {
220 V3_Print(info->vm_info, info, "***** bad disassembly up to this point *****\n");
226 /* look up host virtual address for this instruction */
227 rip_linear = get_addr_linear(core, rip, &(core->segments.cs));
228 if (safe_gva_to_hva(core, rip_linear, &rip_host) < 0) {
233 /* print disassembled instrcution (updates rip) */
234 if (v3_disasm(core, (void *) rip_host, &rip, rip == core->rip) < 0) {
246 void v3_print_guest_state(struct guest_info * core) {
247 addr_t linear_addr = 0;
249 V3_Print(core->vm_info, core, "RIP: %p\n", (void *)(addr_t)(core->rip));
250 linear_addr = get_addr_linear(core, core->rip, &(core->segments.cs));
251 V3_Print(core->vm_info, core, "RIP Linear: %p\n", (void *)linear_addr);
253 V3_Print(core->vm_info, core, "NumExits: %u\n", (uint32_t)core->num_exits);
255 V3_Print(core->vm_info, core, "IRQ STATE: started=%d, pending=%d\n",
256 core->intr_core_state.irq_started,
257 core->intr_core_state.irq_pending);
258 V3_Print(core->vm_info, core, "EXCP STATE: err_code_valid=%d, err_code=%x\n",
259 core->excp_state.excp_error_code_valid,
260 core->excp_state.excp_error_code);
263 v3_print_segments(&(core->segments));
264 v3_print_ctrl_regs(core);
266 if (core->shdw_pg_mode == SHADOW_PAGING) {
267 V3_Print(core->vm_info, core, "Shadow Paging Guest Registers:\n");
268 V3_Print(core->vm_info, core, "\tGuest CR0=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr0));
269 V3_Print(core->vm_info, core, "\tGuest CR3=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr3));
270 V3_Print(core->vm_info, core, "\tGuest EFER=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_efer.value));
275 v3_print_idt(core,core->segments.idtr.base);
276 v3_print_gdt(core,core->segments.gdtr.base);
277 v3_print_ldt(core,core->segments.ldtr.base);
278 v3_print_tss(core,core->segments.tr.base);
280 v3_print_mem_map(core->vm_info);
282 v3_print_stack(core);
284 // v3_print_disassembly(core);
288 void v3_print_arch_state(struct guest_info * core) {
294 void v3_print_guest_state_all(struct v3_vm_info * vm) {
297 V3_Print(vm, VCORE_NONE,"VM Core states for %s\n", vm->name);
299 for (i = 0; i < 80; i++) {
300 V3_Print(vm, VCORE_NONE, "-");
303 for (i = 0; i < vm->num_cores; i++) {
304 v3_print_guest_state(&vm->cores[i]);
307 for (i = 0; i < 80; i++) {
308 V3_Print(vm, VCORE_NONE, "-");
311 V3_Print(vm, VCORE_NONE, "\n");
316 void v3_print_stack(struct guest_info * core) {
317 addr_t linear_addr = 0;
318 addr_t host_addr = 0;
320 v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
322 linear_addr = get_addr_linear(core, core->vm_regs.rsp, &(core->segments.ss));
324 V3_Print(core->vm_info, core, "Stack at %p:\n", (void *)linear_addr);
326 if (core->mem_mode == PHYSICAL_MEM) {
327 if (v3_gpa_to_hva(core, linear_addr, &host_addr) == -1) {
328 PrintError(core->vm_info, core, "Could not translate Stack address\n");
331 } else if (core->mem_mode == VIRTUAL_MEM) {
332 if (v3_gva_to_hva(core, linear_addr, &host_addr) == -1) {
333 PrintError(core->vm_info, core, "Could not translate Virtual Stack address\n");
338 V3_Print(core->vm_info, core, "Host Address of rsp = 0x%p\n", (void *)host_addr);
340 // We start i at one because the current stack pointer points to an unused stack element
341 for (i = 0; i <= 24; i++) {
343 if (cpu_mode == REAL) {
344 V3_Print(core->vm_info, core, "\t0x%.4x\n", *((uint16_t *)host_addr + (i * 2)));
345 } else if (cpu_mode == LONG) {
346 V3_Print(core->vm_info, core, "\t%p\n", (void *)*(addr_t *)(host_addr + (i * 8)));
349 V3_Print(core->vm_info, core, "\t0x%.8x\n", *(uint32_t *)(host_addr + (i * 4)));
356 void v3_print_backtrace(struct guest_info * core) {
359 v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
360 struct v3_cfg_file * system_map = v3_cfg_get_file(core->vm_info, "System.map");
362 V3_Print(core->vm_info, core, "Performing Backtrace for Core %d\n", core->vcpu_id);
363 V3_Print(core->vm_info, core, "\tRSP=%p, RBP=%p\n", (void *)core->vm_regs.rsp, (void *)core->vm_regs.rbp);
365 gla_rbp = get_addr_linear(core, core->vm_regs.rbp, &(core->segments.ss));
368 for (i = 0; i < 30; i++) {
371 char * sym_name = NULL;
374 if (core->mem_mode == PHYSICAL_MEM) {
375 if (v3_gpa_to_hva(core, gla_rbp, &hva_rbp) == -1) {
376 PrintError(core->vm_info, core, "Could not translate Stack address\n");
379 } else if (core->mem_mode == VIRTUAL_MEM) {
380 if (v3_gva_to_hva(core, gla_rbp, &hva_rbp) == -1) {
381 PrintError(core->vm_info, core, "Could not translate Virtual Stack address\n");
387 hva_rip = hva_rbp + v3_get_addr_width(core);
389 if (cpu_mode == REAL) {
390 rip_val = (addr_t)*(uint16_t *)hva_rip;
391 } else if (cpu_mode == LONG) {
392 rip_val = (addr_t)*(uint64_t *)hva_rip;
394 rip_val = (addr_t)*(uint32_t *)hva_rip;
398 char * tmp_ptr = system_map->data;
399 char * sym_ptr = NULL;
400 uint64_t file_offset = 0;
401 uint64_t sym_offset = 0;
403 while (file_offset < system_map->size) {
404 sym_offset = strtox(tmp_ptr, &tmp_ptr);
406 tmp_ptr += 3; // pass over symbol type
408 if (sym_offset > rip_val) {
410 char * end_ptr = strchr(sym_ptr, '\n');
413 *end_ptr = 0; // null terminate symbol...
425 char * end_ptr2 = strchr(tmp_ptr, '\n');
428 tmp_ptr += strlen(tmp_ptr) + 1;
430 tmp_ptr = end_ptr2 + 1;
440 if (cpu_mode == REAL) {
441 V3_Print(core->vm_info, core, "Next RBP=0x%.4x, RIP=0x%.4x (%s)\n",
442 *(uint16_t *)hva_rbp,*(uint16_t *)hva_rip,
445 gla_rbp = *(uint16_t *)hva_rbp;
446 } else if (cpu_mode == LONG) {
447 V3_Print(core->vm_info, core, "Next RBP=%p, RIP=%p (%s)\n",
448 (void *)*(uint64_t *)hva_rbp, (void *)*(uint64_t *)hva_rip,
450 gla_rbp = *(uint64_t *)hva_rbp;
452 V3_Print(core->vm_info, core, "Next RBP=0x%.8x, RIP=0x%.8x (%s)\n",
453 *(uint32_t *)hva_rbp, *(uint32_t *)hva_rip,
455 gla_rbp = *(uint32_t *)hva_rbp;
464 void v3_print_GPRs(struct guest_info * core) {
465 struct v3_gprs * regs = &(core->vm_regs);
468 char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", NULL};
470 reg_ptr = (v3_reg_t *)regs;
472 V3_Print(info->vm_info, info, "32 bit GPRs:\n");
474 for (i = 0; reg_names[i] != NULL; i++) {
475 V3_Print(info->vm_info, info, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
479 void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
482 if (v3_get_vm_cpu_mode(core)!=LONG) {
483 V3_Print(core->vm_info, core, "= IDT ========\n");
484 V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
489 if (core->mem_mode == PHYSICAL_MEM) {
490 if (v3_gpa_to_hva(core,
491 get_addr_linear(core, idtr_base, &(core->segments.cs)),
493 PrintError(core->vm_info, core, "Cannot translate address\n");
496 PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
497 } else if (core->mem_mode == VIRTUAL_MEM) {
498 if (v3_gva_to_hva(core,
499 get_addr_linear(core, idtr_base, &(core->segments.cs)),
501 PrintError(core->vm_info, core, "Cannot translate address\n");
507 if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
508 PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
512 PrintError(core->vm_info, core "idtr address does not translate! skipping.\n");
517 char *types[16] = {" ILGL","aTSS16"," LDT","bTSS16","call16"," task","intr16","trap16",
518 " ILGL","aTSS32"," ILGL","bTSS32","call32"," ILGL","intr32","trap32"};
520 struct int_trap_gate_lgcy * entry;
521 entry = (struct int_trap_gate_lgcy *)base_hva;
522 V3_Print(core->vm_info, core, "= IDT ========\n");
523 V3_Print(core->vm_info, core, " # | hex | selector | si:ti:rpl | offset | type | dpl | s | p\n");
524 for (i = 0; i < NUM_IDT_ENTRIES; i++) {
525 uint32_t tmp = entry->selector;
526 struct segment_selector * seg = (struct segment_selector *)(&tmp);
527 V3_Print(core->vm_info, core, "%3d | %3x | %04x | %03x:%x:%x | %04x%04x | %s | %x | %x | %x | %x\n", i, i,
529 seg->index, seg->ti, seg->rpl,
530 entry->offset_hi, entry->offset_lo,
531 types[entry->type], entry->dpl, entry->s, entry->p);
536 void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
539 if (v3_get_vm_cpu_mode(core)!=LONG) {
540 V3_Print(core->vm_info, core, "= GDT ========\n");
541 V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
545 if (core->mem_mode == PHYSICAL_MEM) {
546 if (v3_gpa_to_hva(core,
547 get_addr_linear(core, gdtr_base, &(core->segments.cs)),
549 PrintError(core->vm_info, core, "Cannot translate address\n");
552 PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
553 } else if (core->mem_mode == VIRTUAL_MEM) {
554 if (v3_gva_to_hva(core,
555 get_addr_linear(core, gdtr_base, &(core->segments.cs)),
557 PrintError(core->vm_info, core, "Cannot translate address\n");
563 if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
564 PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
568 PrintError(core->vm_info, core "gdtr address does not translate! skipping.\n");
573 char* cd[2] = {"data","code"};
574 // TODO: handle possibility of gate/segment descriptor
576 struct code_desc_lgcy * entry;
577 entry = (struct code_desc_long *)base_hva;
578 V3_Print(core->vm_info, core, "= GDT ========\n");
579 V3_Print(core->vm_info, core, " # | hex | limit | base | c/d | dpl | p\n");
580 for (i = 0; i < NUM_GDT_ENTRIES; i++) {
581 V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s | %x | %x\n", i, i,
582 entry->limit_hi, entry->limit_lo,
583 entry->base_hi, entry->base_mid, entry->base_lo,
584 cd[entry->one1], entry->dpl, entry->p);
589 void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
590 struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
592 V3_Print(core->vm_info, core, " selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
593 error->index, error->ti, error->idt, error->ext,
594 (unsigned long long)exit_info1);
599 void v3_print_GPRs(struct guest_info * core) {
600 struct v3_gprs * regs = &(core->vm_regs);
603 char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", \
604 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", NULL};
606 reg_ptr = (v3_reg_t *)regs;
608 V3_Print(core->vm_info, core, "64 bit GPRs:\n");
610 for (i = 0; reg_names[i] != NULL; i++) {
611 V3_Print(core->vm_info, core, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
615 void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
618 if (v3_get_vm_cpu_mode(core)!=LONG) {
619 V3_Print(core->vm_info, core, "= IDT ========\n");
620 V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
624 if (core->mem_mode == PHYSICAL_MEM) {
625 if (v3_gpa_to_hva(core,
626 get_addr_linear(core, idtr_base, &(core->segments.cs)),
628 PrintError(core->vm_info, core, "Cannot translate address\n");
631 } else if (core->mem_mode == VIRTUAL_MEM) {
632 if (v3_gva_to_hva(core,
633 get_addr_linear(core, idtr_base, &(core->segments.cs)),
635 PrintError(core->vm_info, core, "Cannot translate address\n");
641 if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
642 PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
646 PrintError(core->vm_info, core, "idtr address does not translate! skipping.\n");
651 char *types[16] = {"ILGL","ILGL"," LDT","ILGL","ILGL","ILGL","ILGL","ILGL","ILGL",
652 "aTSS","ILGL","bTSS","call","ILGL","intr","trap"};
654 struct int_trap_gate_long * entry;
655 entry = (struct int_trap_gate_long *)base_hva;
656 V3_Print(core->vm_info, core, "= IDT ========\n");
657 V3_Print(core->vm_info, core, " # | hex | selector | si:ti:rpl | offset | type | dpl | s | r | p\n");
658 for (i = 0; i < NUM_IDT_ENTRIES; i++) {
659 uint32_t tmp = entry->selector;
660 struct segment_selector * seg = (struct segment_selector *)(&tmp);
661 V3_Print(core->vm_info, core, "%3d | %3x | %04x | %03x:%x:%x | %08x%04x%04x | %s | %x | %x | %x | %x\n", i, i,
663 seg->index, seg->ti, seg->rpl,
664 entry->offset_hi, entry->offset_mid, entry->offset_lo,
665 types[entry->type], entry->dpl, entry->s,
671 void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
674 if (v3_get_vm_cpu_mode(core)!=LONG) {
675 V3_Print(core->vm_info, core, "= GDT ========\n");
676 V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
680 if (core->mem_mode == PHYSICAL_MEM) {
681 if (v3_gpa_to_hva(core,
682 get_addr_linear(core, gdtr_base, &(core->segments.cs)),
684 PrintError(core->vm_info, core, "Cannot translate address\n");
687 } else if (core->mem_mode == VIRTUAL_MEM) {
688 if (v3_gva_to_hva(core,
689 get_addr_linear(core, gdtr_base, &(core->segments.cs)),
691 PrintError(core->vm_info, core, "Cannot translate address\n");
697 if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
698 PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
702 PrintError(core->vm_info, core, "gdtr address does not translate! skipping.\n");
707 char* cd[2] = {"data","code"};
708 // TODO: handle possibility of gate/segment descriptor
710 struct code_desc_long * entry;
711 entry = (struct code_desc_long *)base_hva;
712 V3_Print(core->vm_info, core, "= GDT ========\n");
713 V3_Print(core->vm_info, core, " # | hex | limit | base | c/d | dpl | p\n");
714 for (i = 0; i < NUM_GDT_ENTRIES; i++) {
715 V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s | %x | %x\n", i, i,
716 entry->limit_hi, entry->limit_lo,
717 entry->base_hi, entry->base_mid, entry->base_lo,
718 cd[entry->one1], entry->dpl, entry->p);
723 void v3_print_ldt(struct guest_info * core, addr_t ldtr_base) {
726 if (v3_get_vm_cpu_mode(core)!=LONG) {
727 V3_Print(core->vm_info, core, "= LDT ========\n");
728 V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
732 V3_Print(core->vm_info, core, "= LDT ========\n");
734 if (ldtr_base == 0) {
735 V3_Print(core->vm_info, core, " (no LDT is installed)\n");
739 if (core->mem_mode == PHYSICAL_MEM) {
740 if (v3_gpa_to_hva(core,
741 get_addr_linear(core, ldtr_base, &(core->segments.cs)),
743 PrintError(core->vm_info, core, "Cannot translate address\n");
746 } else if (core->mem_mode == VIRTUAL_MEM) {
747 if (v3_gva_to_hva(core,
748 get_addr_linear(core, ldtr_base, &(core->segments.cs)),
750 PrintError(core->vm_info, core, "Cannot translate address\n");
756 if (ldtr_base != get_addr_linear(core, ldtr_base, &(core->segments.cs))) {
757 PrintError(core->vm_info, core, "ldtr base address != linear translation, might be something funky with cs\n");
761 PrintError(core->vm_info, core, "ldtr address does not translate! skipping.\n");
766 char* cd[2] = {"data","code"};
767 // TODO: handle possibility of gate/segment descriptor
769 struct code_desc_long * entry;
770 entry = (struct code_desc_long *)base_hva;
771 V3_Print(core->vm_info, core, " # | hex | limit | base | c/d | dpl | p\n");
772 for (i = 0; i < NUM_LDT_ENTRIES; i++) {
773 V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s | %x | %x\n", i, i,
774 entry->limit_hi, entry->limit_lo,
775 entry->base_hi, entry->base_mid, entry->base_lo,
776 cd[entry->one1], entry->dpl, entry->p);
781 void v3_print_tss(struct guest_info * core, addr_t tr_base) {
785 if (v3_get_vm_cpu_mode(core)!=LONG) {
786 V3_Print(core->vm_info, core, "= TSS ========\n");
787 V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
791 V3_Print(core->vm_info, core, "= TSS ========\n");
794 V3_Print(core->vm_info, core, " (no TSS is installed)\n");
798 if (core->mem_mode == PHYSICAL_MEM) {
799 if (v3_gpa_to_hva(core,
800 get_addr_linear(core, tr_base, &(core->segments.cs)),
802 PrintError(core->vm_info, core, "Cannot translate address\n");
805 } else if (core->mem_mode == VIRTUAL_MEM) {
806 if (v3_gva_to_hva(core,
807 get_addr_linear(core, tr_base, &(core->segments.cs)),
809 PrintError(core->vm_info, core, "Cannot translate address\n");
815 if (tr_base != get_addr_linear(core, tr_base, &(core->segments.cs))) {
816 PrintError(core->vm_info, core, "tr base address != linear translation, might be something funky with cs\n");
820 PrintError(core->vm_info, core, "tr address does not translate! skipping.\n");
824 t=(struct tss_long*)base_hva;
826 V3_Print(core->vm_info, core," res1 : 0x%llx\n", (uint64_t) t->res1);
827 V3_Print(core->vm_info, core," rsp0 : 0x%llx\n", t->rsp0);
828 V3_Print(core->vm_info, core," rsp1 : 0x%llx\n", t->rsp1);
829 V3_Print(core->vm_info, core," rsp2 : 0x%llx\n", t->rsp2);
830 V3_Print(core->vm_info, core," res2 : 0x%llx\n", t->res2);
831 V3_Print(core->vm_info, core," ist1 : 0x%llx\n", t->ist1);
832 V3_Print(core->vm_info, core," ist2 : 0x%llx\n", t->ist2);
833 V3_Print(core->vm_info, core," ist3 : 0x%llx\n", t->ist3);
834 V3_Print(core->vm_info, core," ist4 : 0x%llx\n", t->ist4);
835 V3_Print(core->vm_info, core," ist5 : 0x%llx\n", t->ist5);
836 V3_Print(core->vm_info, core," ist6 : 0x%llx\n", t->ist6);
837 V3_Print(core->vm_info, core," ist7 : 0x%llx\n", t->ist7);
838 V3_Print(core->vm_info, core," res3 : 0x%llx\n", t->res3);
839 V3_Print(core->vm_info, core," res4 : 0x%llx\n", (uint64_t) t->res4);
840 V3_Print(core->vm_info, core," iomap_base : 0x%llx\n", (uint64_t) t->iomap_base);
841 V3_Print(core->vm_info, core," (following io permission bitmap not currently printed)\n");
845 void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
846 struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
848 if (v3_get_vm_cpu_mode(core)!=LONG) {
849 V3_Print(core->vm_info, core, "= IDT ========\n");
850 V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
854 V3_Print(core->vm_info, core, " selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
855 error->index, error->ti, error->idt, error->ext,
856 (unsigned long long)exit_info1);