X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_debug.c;h=85bd8d22300ee734d5f6a593a2d0f496b7ad5f83;hb=3e937e5468169b8183a22a90f6d07ff370abde0c;hp=8638f90ced429ba0e06af72ae00d5844e3f9283e;hpb=58e1e81a0ccea4cc7fdbfb714133ccefd235ae70;p=palacios.git diff --git a/palacios/src/palacios/vmm_debug.c b/palacios/src/palacios/vmm_debug.c index 8638f90..85bd8d2 100644 --- a/palacios/src/palacios/vmm_debug.c +++ b/palacios/src/palacios/vmm_debug.c @@ -152,10 +152,20 @@ void v3_print_segments(struct v3_segments * segs) { for (i = 0; seg_names[i] != NULL; i++) { - V3_Print(VM_NONE, VCORE_NONE, "\t%s: Sel=%x, base=%p, limit=%x (long_mode=%d, db=%d)\n", seg_names[i], seg_ptr[i].selector, - (void *)(addr_t)seg_ptr[i].base, seg_ptr[i].limit, - seg_ptr[i].long_mode, seg_ptr[i].db); - + V3_Print(VM_NONE, VCORE_NONE, "\t%s: selector=0x%x, base=%p, limit=0x%x type=0x%x system=0x%x dpl=0x%x present=0x%x avail=0x%x long_mode=0x%x db=0x%x granularity=0x%x unusable=0x%x\n", + seg_names[i], + seg_ptr[i].selector, + (void *)(addr_t)seg_ptr[i].base, + seg_ptr[i].limit, + seg_ptr[i].type, + seg_ptr[i].system, + seg_ptr[i].dpl, + seg_ptr[i].present, + seg_ptr[i].avail, + seg_ptr[i].long_mode, + seg_ptr[i].db, + seg_ptr[i].granularity, + seg_ptr[i].unusable); } } @@ -262,6 +272,11 @@ void v3_print_guest_state(struct guest_info * core) { } v3_print_GPRs(core); + v3_print_idt(core,core->segments.idtr.base); + v3_print_gdt(core,core->segments.gdtr.base); + v3_print_ldt(core,core->segments.ldtr.base); + v3_print_tss(core,core->segments.tr.base); + v3_print_mem_map(core->vm_info); v3_print_stack(core); @@ -391,17 +406,21 @@ void v3_print_backtrace(struct guest_info * core) { tmp_ptr += 3; // pass over symbol type if (sym_offset > rip_val) { - char * end_ptr = strchr(sym_ptr, '\n'); - - if (end_ptr) { - *end_ptr = 0; // null terminate symbol... + if (sym_ptr) { + char * end_ptr = strchr(sym_ptr, '\n'); + + if (end_ptr) { + *end_ptr = 0; // null terminate symbol... + } + sym_name = sym_ptr; + } else { + sym_name = NULL; } - - sym_name = sym_ptr; break; } sym_ptr = tmp_ptr; + { char * end_ptr2 = strchr(tmp_ptr, '\n'); @@ -457,6 +476,112 @@ void v3_print_GPRs(struct guest_info * core) { } } +void v3_print_idt(struct guest_info * core, addr_t idtr_base) { + addr_t base_hva; + + if (v3_get_vm_cpu_mode(core)!=LONG) { + V3_Print(core->vm_info, core, "= IDT ========\n"); + V3_Print(core->vm_info, core, "(currently only supported in long mode)\n"); + return; + } + + + if (core->mem_mode == PHYSICAL_MEM) { + v3_gpa_to_hva(core, + get_addr_linear(core, idtr_base, &(core->segments.cs)), + &base_hva); + PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n"); + } else if (core->mem_mode == VIRTUAL_MEM) { + v3_gva_to_hva(core, + get_addr_linear(core, idtr_base, &(core->segments.cs)), + &base_hva); + } + + // SANITY CHECK + if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) { + PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n"); + } + + if (!base_hva) { + PrintError(core->vm_info, core "idtr address does not translate! skipping.\n"); + return ; + } + + int i; + char *types[16] = {" ILGL","aTSS16"," LDT","bTSS16","call16"," task","intr16","trap16", + " ILGL","aTSS32"," ILGL","bTSS32","call32"," ILGL","intr32","trap32"}; + + struct int_trap_gate_lgcy * entry; + entry = (struct int_trap_gate_lgcy *)base_hva; + V3_Print(core->vm_info, core, "= IDT ========\n"); + V3_Print(core->vm_info, core, " # | hex | selector | si:ti:rpl | offset | type | dpl | s | p\n"); + for (i = 0; i < NUM_IDT_ENTRIES; i++) { + uint32_t tmp = entry->selector; + struct segment_selector * seg = (struct segment_selector *)(&tmp); + V3_Print(core->vm_info, core, "%3d | %3x | %04x | %03x:%x:%x | %04x%04x | %s | %x | %x | %x | %x\n", i, i, + entry->selector, + seg->index, seg->ti, seg->rpl, + entry->offset_hi, entry->offset_lo, + types[entry->type], entry->dpl, entry->s, entry->p); + entry++; + } +} + +void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) { + addr_t base_hva; + + if (v3_get_vm_cpu_mode(core)!=LONG) { + V3_Print(core->vm_info, core, "= GDT ========\n"); + V3_Print(core->vm_info, core, "(currently only supported in long mode)\n"); + return; + } + + if (core->mem_mode == PHYSICAL_MEM) { + v3_gpa_to_hva(core, + get_addr_linear(core, gdtr_base, &(core->segments.cs)), + &base_hva); + PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n"); + } else if (core->mem_mode == VIRTUAL_MEM) { + v3_gva_to_hva(core, + get_addr_linear(core, gdtr_base, &(core->segments.cs)), + &base_hva); + } + + // SANITY CHECK + if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) { + PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n"); + } + + if (!base_hva) { + PrintError(core->vm_info, core "gdtr address does not translate! skipping.\n"); + return ; + } + + int i; + char* cd[2] = {"data","code"}; + // TODO: handle possibility of gate/segment descriptor + + struct code_desc_lgcy * entry; + entry = (struct code_desc_long *)base_hva; + V3_Print(core->vm_info, core, "= GDT ========\n"); + V3_Print(core->vm_info, core, " # | hex | limit | base | c/d | dpl | p\n"); + for (i = 0; i < NUM_GDT_ENTRIES; i++) { + V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s | %x | %x\n", i, i, + entry->limit_hi, entry->limit_lo, + entry->base_hi, entry->base_mid, entry->base_lo, + cd[entry->one1], entry->dpl, entry->p); + entry++; + } +} + +void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) { + struct selector_error_code * error = (struct selector_error_code *)(&exit_info1); + + V3_Print(core->vm_info, core, " selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n", + error->index, error->ti, error->idt, error->ext, + (unsigned long long)exit_info1); +} + #elif __V3_64BIT__ void v3_print_GPRs(struct guest_info * core) { @@ -475,4 +600,224 @@ void v3_print_GPRs(struct guest_info * core) { } } +void v3_print_idt(struct guest_info * core, addr_t idtr_base) { + addr_t base_hva; + + if (v3_get_vm_cpu_mode(core)!=LONG) { + V3_Print(core->vm_info, core, "= IDT ========\n"); + V3_Print(core->vm_info, core, "(currently only supported in long mode)\n"); + return; + } + + if (core->mem_mode == PHYSICAL_MEM) { + v3_gpa_to_hva(core, + get_addr_linear(core, idtr_base, &(core->segments.cs)), + &base_hva); + } else if (core->mem_mode == VIRTUAL_MEM) { + v3_gva_to_hva(core, + get_addr_linear(core, idtr_base, &(core->segments.cs)), + &base_hva); + } + + // SANITY CHECK + if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) { + PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n"); + } + + if (!base_hva) { + PrintError(core->vm_info, core, "idtr address does not translate! skipping.\n"); + return ; + } + + int i; + char *types[16] = {"ILGL","ILGL"," LDT","ILGL","ILGL","ILGL","ILGL","ILGL","ILGL", + "aTSS","ILGL","bTSS","call","ILGL","intr","trap"}; + + struct int_trap_gate_long * entry; + entry = (struct int_trap_gate_long *)base_hva; + V3_Print(core->vm_info, core, "= IDT ========\n"); + V3_Print(core->vm_info, core, " # | hex | selector | si:ti:rpl | offset | type | dpl | s | r | p\n"); + for (i = 0; i < NUM_IDT_ENTRIES; i++) { + uint32_t tmp = entry->selector; + struct segment_selector * seg = (struct segment_selector *)(&tmp); + V3_Print(core->vm_info, core, "%3d | %3x | %04x | %03x:%x:%x | %08x%04x%04x | %s | %x | %x | %x | %x\n", i, i, + entry->selector, + seg->index, seg->ti, seg->rpl, + entry->offset_hi, entry->offset_mid, entry->offset_lo, + types[entry->type], entry->dpl, entry->s, + entry->s, entry->p); + entry++; + } +} + +void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) { + addr_t base_hva; + + if (v3_get_vm_cpu_mode(core)!=LONG) { + V3_Print(core->vm_info, core, "= GDT ========\n"); + V3_Print(core->vm_info, core, "(currently only supported in long mode)\n"); + return; + } + + if (core->mem_mode == PHYSICAL_MEM) { + v3_gpa_to_hva(core, + get_addr_linear(core, gdtr_base, &(core->segments.cs)), + &base_hva); + } else if (core->mem_mode == VIRTUAL_MEM) { + v3_gva_to_hva(core, + get_addr_linear(core, gdtr_base, &(core->segments.cs)), + &base_hva); + } + + // SANITY CHECK + if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) { + PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n"); + } + + if (!base_hva) { + PrintError(core->vm_info, core, "gdtr address does not translate! skipping.\n"); + return ; + } + + int i; + char* cd[2] = {"data","code"}; + // TODO: handle possibility of gate/segment descriptor + + struct code_desc_long * entry; + entry = (struct code_desc_long *)base_hva; + V3_Print(core->vm_info, core, "= GDT ========\n"); + V3_Print(core->vm_info, core, " # | hex | limit | base | c/d | dpl | p\n"); + for (i = 0; i < NUM_GDT_ENTRIES; i++) { + V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s | %x | %x\n", i, i, + entry->limit_hi, entry->limit_lo, + entry->base_hi, entry->base_mid, entry->base_lo, + cd[entry->one1], entry->dpl, entry->p); + entry++; + } +} + +void v3_print_ldt(struct guest_info * core, addr_t ldtr_base) { + addr_t base_hva; + + if (v3_get_vm_cpu_mode(core)!=LONG) { + V3_Print(core->vm_info, core, "= LDT ========\n"); + V3_Print(core->vm_info, core, "(currently only supported in long mode)\n"); + return; + } + + V3_Print(core->vm_info, core, "= LDT ========\n"); + + if (ldtr_base == 0) { + V3_Print(core->vm_info, core, " (no LDT is installed)\n"); + return; + } + + if (core->mem_mode == PHYSICAL_MEM) { + v3_gpa_to_hva(core, + get_addr_linear(core, ldtr_base, &(core->segments.cs)), + &base_hva); + } else if (core->mem_mode == VIRTUAL_MEM) { + v3_gva_to_hva(core, + get_addr_linear(core, ldtr_base, &(core->segments.cs)), + &base_hva); + } + + // SANITY CHECK + if (ldtr_base != get_addr_linear(core, ldtr_base, &(core->segments.cs))) { + PrintError(core->vm_info, core, "ldtr base address != linear translation, might be something funky with cs\n"); + } + + if (!base_hva) { + PrintError(core->vm_info, core, "ldtr address does not translate! skipping.\n"); + return ; + } + + int i; + char* cd[2] = {"data","code"}; + // TODO: handle possibility of gate/segment descriptor + + struct code_desc_long * entry; + entry = (struct code_desc_long *)base_hva; + V3_Print(core->vm_info, core, " # | hex | limit | base | c/d | dpl | p\n"); + for (i = 0; i < NUM_LDT_ENTRIES; i++) { + V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s | %x | %x\n", i, i, + entry->limit_hi, entry->limit_lo, + entry->base_hi, entry->base_mid, entry->base_lo, + cd[entry->one1], entry->dpl, entry->p); + entry++; + } +} + +void v3_print_tss(struct guest_info * core, addr_t tr_base) { + addr_t base_hva; + struct tss_long *t; + + if (v3_get_vm_cpu_mode(core)!=LONG) { + V3_Print(core->vm_info, core, "= TSS ========\n"); + V3_Print(core->vm_info, core, "(currently only supported in long mode)\n"); + return; + } + + V3_Print(core->vm_info, core, "= TSS ========\n"); + + if (tr_base == 0) { + V3_Print(core->vm_info, core, " (no TSS is installed)\n"); + return; + } + + if (core->mem_mode == PHYSICAL_MEM) { + v3_gpa_to_hva(core, + get_addr_linear(core, tr_base, &(core->segments.cs)), + &base_hva); + } else if (core->mem_mode == VIRTUAL_MEM) { + v3_gva_to_hva(core, + get_addr_linear(core, tr_base, &(core->segments.cs)), + &base_hva); + } + + // SANITY CHECK + if (tr_base != get_addr_linear(core, tr_base, &(core->segments.cs))) { + PrintError(core->vm_info, core, "tr base address != linear translation, might be something funky with cs\n"); + } + + if (!base_hva) { + PrintError(core->vm_info, core, "tr address does not translate! skipping.\n"); + return ; + } + + t=(struct tss_long*)base_hva; + + V3_Print(core->vm_info, core," res1 : 0x%llx\n", (uint64_t) t->res1); + V3_Print(core->vm_info, core," rsp0 : 0x%llx\n", t->rsp0); + V3_Print(core->vm_info, core," rsp1 : 0x%llx\n", t->rsp1); + V3_Print(core->vm_info, core," rsp2 : 0x%llx\n", t->rsp2); + V3_Print(core->vm_info, core," res2 : 0x%llx\n", t->res2); + V3_Print(core->vm_info, core," ist1 : 0x%llx\n", t->ist1); + V3_Print(core->vm_info, core," ist2 : 0x%llx\n", t->ist2); + V3_Print(core->vm_info, core," ist3 : 0x%llx\n", t->ist3); + V3_Print(core->vm_info, core," ist4 : 0x%llx\n", t->ist4); + V3_Print(core->vm_info, core," ist5 : 0x%llx\n", t->ist5); + V3_Print(core->vm_info, core," ist6 : 0x%llx\n", t->ist6); + V3_Print(core->vm_info, core," ist7 : 0x%llx\n", t->ist7); + V3_Print(core->vm_info, core," res3 : 0x%llx\n", t->res3); + V3_Print(core->vm_info, core," res4 : 0x%llx\n", (uint64_t) t->res4); + V3_Print(core->vm_info, core," iomap_base : 0x%llx\n", (uint64_t) t->iomap_base); + V3_Print(core->vm_info, core," (following io permission bitmap not currently printed)\n"); + +} + +void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) { + struct selector_error_code * error = (struct selector_error_code *)(&exit_info1); + + if (v3_get_vm_cpu_mode(core)!=LONG) { + V3_Print(core->vm_info, core, "= IDT ========\n"); + V3_Print(core->vm_info, core, "(currently only supported in long mode)\n"); + return; + } + + V3_Print(core->vm_info, core, " selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n", + error->index, error->ti, error->idt, error->ext, + (unsigned long long)exit_info1); +} + #endif