v3_lower_barrier(core->vm_info);
break;
+ case PRINT_ALL:
+ v3_raise_barrier(core->vm_info, NULL);
+
+#ifdef V3_CONFIG_TELEMETRY
+ v3_print_core_telemetry(core);
+#endif
+ v3_print_guest_state(core);
+ v3_print_arch_state(core);
+ v3_print_stack(core);
+ v3_print_backtrace(core);
+
+ v3_lower_barrier(core->vm_info);
+ break;
+
}
return 0;
static int evt_handler(struct v3_vm_info * vm, struct v3_debug_event * evt, void * priv_data) {
- V3_Print(vm, VCORE_NONE,"Debug Event Handler for core %d\n", evt->core_id);
+ V3_Print(vm, VCORE_NONE,"Debug Event Handler for core %d cmd=%x\n", evt->core_id, evt->cmd);
if (evt->core_id == -1) {
int i = 0;
for (i = 0; seg_names[i] != NULL; i++) {
- V3_Print(VM_NONE, VCORE_NONE, "\t%s: Sel=%x, base=%p, limit=%x (long_mode=%d, db=%d)\n", seg_names[i], seg_ptr[i].selector,
- (void *)(addr_t)seg_ptr[i].base, seg_ptr[i].limit,
- seg_ptr[i].long_mode, seg_ptr[i].db);
-
+ V3_Print(VM_NONE, VCORE_NONE, "\t%s: selector=0x%x, base=%p, limit=0x%x type=0x%x system=0x%x dpl=0x%x present=0x%x avail=0x%x long_mode=0x%x db=0x%x granularity=0x%x unusable=0x%x\n",
+ seg_names[i],
+ seg_ptr[i].selector,
+ (void *)(addr_t)seg_ptr[i].base,
+ seg_ptr[i].limit,
+ seg_ptr[i].type,
+ seg_ptr[i].system,
+ seg_ptr[i].dpl,
+ seg_ptr[i].present,
+ seg_ptr[i].avail,
+ seg_ptr[i].long_mode,
+ seg_ptr[i].db,
+ seg_ptr[i].granularity,
+ seg_ptr[i].unusable);
}
}
}
v3_print_GPRs(core);
+ v3_print_idt(core,core->segments.idtr.base);
+ v3_print_gdt(core,core->segments.gdtr.base);
+ v3_print_ldt(core,core->segments.ldtr.base);
+ v3_print_tss(core,core->segments.tr.base);
+
v3_print_mem_map(core->vm_info);
v3_print_stack(core);
tmp_ptr += 3; // pass over symbol type
if (sym_offset > rip_val) {
- char * end_ptr = strchr(sym_ptr, '\n');
-
- if (end_ptr) {
- *end_ptr = 0; // null terminate symbol...
+ if (sym_ptr) {
+ char * end_ptr = strchr(sym_ptr, '\n');
+
+ if (end_ptr) {
+ *end_ptr = 0; // null terminate symbol...
+ }
+ sym_name = sym_ptr;
+ } else {
+ sym_name = NULL;
}
-
- sym_name = sym_ptr;
break;
}
sym_ptr = tmp_ptr;
+
{
char * end_ptr2 = strchr(tmp_ptr, '\n');
}
}
+void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
+ addr_t base_hva;
+
+ if (v3_get_vm_cpu_mode(core)!=LONG) {
+ V3_Print(core->vm_info, core, "= IDT ========\n");
+ V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
+ return;
+ }
+
+
+ if (core->mem_mode == PHYSICAL_MEM) {
+ if (v3_gpa_to_hva(core,
+ get_addr_linear(core, idtr_base, &(core->segments.cs)),
+ &base_hva)) {
+ PrintError(core->vm_info, core, "Cannot translate address\n");
+ return;
+ }
+ PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
+ } else if (core->mem_mode == VIRTUAL_MEM) {
+ if (v3_gva_to_hva(core,
+ get_addr_linear(core, idtr_base, &(core->segments.cs)),
+ &base_hva)) {
+ PrintError(core->vm_info, core, "Cannot translate address\n");
+ return;
+ }
+ }
+
+ // SANITY CHECK
+ if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
+ PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
+ }
+
+ if (!base_hva) {
+ PrintError(core->vm_info, core "idtr address does not translate! skipping.\n");
+ return ;
+ }
+
+ int i;
+ char *types[16] = {" ILGL","aTSS16"," LDT","bTSS16","call16"," task","intr16","trap16",
+ " ILGL","aTSS32"," ILGL","bTSS32","call32"," ILGL","intr32","trap32"};
+
+ struct int_trap_gate_lgcy * entry;
+ entry = (struct int_trap_gate_lgcy *)base_hva;
+ V3_Print(core->vm_info, core, "= IDT ========\n");
+ V3_Print(core->vm_info, core, " # | hex | selector | si:ti:rpl | offset | type | dpl | s | p\n");
+ for (i = 0; i < NUM_IDT_ENTRIES; i++) {
+ uint32_t tmp = entry->selector;
+ struct segment_selector * seg = (struct segment_selector *)(&tmp);
+ V3_Print(core->vm_info, core, "%3d | %3x | %04x | %03x:%x:%x | %04x%04x | %s | %x | %x | %x | %x\n", i, i,
+ entry->selector,
+ seg->index, seg->ti, seg->rpl,
+ entry->offset_hi, entry->offset_lo,
+ types[entry->type], entry->dpl, entry->s, entry->p);
+ entry++;
+ }
+}
+
+void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
+ addr_t base_hva;
+
+ if (v3_get_vm_cpu_mode(core)!=LONG) {
+ V3_Print(core->vm_info, core, "= GDT ========\n");
+ V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
+ return;
+ }
+
+ if (core->mem_mode == PHYSICAL_MEM) {
+ if (v3_gpa_to_hva(core,
+ get_addr_linear(core, gdtr_base, &(core->segments.cs)),
+ &base_hva)) {
+ PrintError(core->vm_info, core, "Cannot translate address\n");
+ return;
+ }
+ PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
+ } else if (core->mem_mode == VIRTUAL_MEM) {
+ if (v3_gva_to_hva(core,
+ get_addr_linear(core, gdtr_base, &(core->segments.cs)),
+ &base_hva)) {
+ PrintError(core->vm_info, core, "Cannot translate address\n");
+ return;
+ }
+ }
+
+ // SANITY CHECK
+ if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
+ PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
+ }
+
+ if (!base_hva) {
+ PrintError(core->vm_info, core "gdtr address does not translate! skipping.\n");
+ return ;
+ }
+
+ int i;
+ char* cd[2] = {"data","code"};
+ char * sys_types[16] = {"rsvd",
+ "rsvd",
+ "64bit LDT",
+ "rsvd",
+ "rsvd",
+ "rsvd",
+ "rsvd",
+ "rsvd",
+ "rsvd",
+ "avail 64bit TSS",
+ "rsvd",
+ "busy 64bit TSS",
+ "64bit call gate",
+ "rsvd",
+ "64bit int gate",
+ "64bit trap gate"};
+
+ struct code_desc_long * entry;
+ entry = (struct code_desc_long *)base_hva;
+ V3_Print(core->vm_info, core, "= GDT ========\n");
+ V3_Print(core->vm_info, core, " # | hex | limit | base | c/d | dpl | p\n");
+ for (i = 0; i < (core->segments.gdtr.limit+1)/8; i++) {
+ if (entry->one2 == 0) { // this is a system descriptor
+ struct system_desc_long* sys = (struct system_desc_long*)entry;
+ V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %08x%02x%02x%04x | %20s | %x | %x\n", i, i,
+ entry->limit_hi, entry->limit_lo,
+ sys->base_hi, entry->base_hi, entry->base_mid, entry->base_lo,
+ sys_types[sys->type], entry->dpl, entry->p);
+ entry += 2;
+ } else {
+ V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %08x%02x%02x%04x | %20s | %x | %x\n", i, i,
+ entry->limit_hi, entry->limit_lo,
+ 0, entry->base_hi, entry->base_mid, entry->base_lo,
+ cd[entry->one1], entry->dpl, entry->p);
+ entry++;
+ }
+ }
+}
+
+void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
+ struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
+
+ V3_Print(core->vm_info, core, " selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
+ error->index, error->ti, error->idt, error->ext,
+ (unsigned long long)exit_info1);
+}
+
#elif __V3_64BIT__
void v3_print_GPRs(struct guest_info * core) {
}
}
+void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
+ addr_t base_hva;
+
+ if (v3_get_vm_cpu_mode(core)!=LONG) {
+ V3_Print(core->vm_info, core, "= IDT ========\n");
+ V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
+ return;
+ }
+
+ if (core->mem_mode == PHYSICAL_MEM) {
+ if (v3_gpa_to_hva(core,
+ get_addr_linear(core, idtr_base, &(core->segments.cs)),
+ &base_hva)) {
+ PrintError(core->vm_info, core, "Cannot translate address\n");
+ return;
+ }
+ } else if (core->mem_mode == VIRTUAL_MEM) {
+ if (v3_gva_to_hva(core,
+ get_addr_linear(core, idtr_base, &(core->segments.cs)),
+ &base_hva)) {
+ PrintError(core->vm_info, core, "Cannot translate address\n");
+ return;
+ }
+ }
+
+ // SANITY CHECK
+ if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
+ PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
+ }
+
+ if (!base_hva) {
+ PrintError(core->vm_info, core, "idtr address does not translate! skipping.\n");
+ return ;
+ }
+
+ int i;
+ char *types[16] = {"ILGL","ILGL"," LDT","ILGL","ILGL","ILGL","ILGL","ILGL","ILGL",
+ "aTSS","ILGL","bTSS","call","ILGL","intr","trap"};
+
+ struct int_trap_gate_long * entry;
+ entry = (struct int_trap_gate_long *)base_hva;
+ V3_Print(core->vm_info, core, "= IDT ========\n");
+ V3_Print(core->vm_info, core, " # | hex | selector | si:ti:rpl | offset | type | dpl | s | r | p\n");
+ for (i = 0; i < NUM_IDT_ENTRIES; i++) {
+ uint32_t tmp = entry->selector;
+ struct segment_selector * seg = (struct segment_selector *)(&tmp);
+ V3_Print(core->vm_info, core, "%3d | %3x | %04x | %03x:%x:%x | %08x%04x%04x | %s | %x | %x | %x | %x\n", i, i,
+ entry->selector,
+ seg->index, seg->ti, seg->rpl,
+ entry->offset_hi, entry->offset_mid, entry->offset_lo,
+ types[entry->type], entry->dpl, entry->s,
+ entry->s, entry->p);
+ entry++;
+ }
+}
+
+void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
+ addr_t base_hva;
+
+ if (v3_get_vm_cpu_mode(core)!=LONG) {
+ V3_Print(core->vm_info, core, "= GDT ========\n");
+ V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
+ return;
+ }
+
+ if (core->mem_mode == PHYSICAL_MEM) {
+ if (v3_gpa_to_hva(core,
+ get_addr_linear(core, gdtr_base, &(core->segments.cs)),
+ &base_hva)) {
+ PrintError(core->vm_info, core, "Cannot translate address\n");
+ return;
+ }
+ } else if (core->mem_mode == VIRTUAL_MEM) {
+ if (v3_gva_to_hva(core,
+ get_addr_linear(core, gdtr_base, &(core->segments.cs)),
+ &base_hva)) {
+ PrintError(core->vm_info, core, "Cannot translate address\n");
+ return;
+ }
+ }
+
+ // SANITY CHECK
+ if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
+ PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
+ }
+
+ if (!base_hva) {
+ PrintError(core->vm_info, core, "gdtr address does not translate! skipping.\n");
+ return ;
+ }
+
+ int i;
+ char* cd[2] = {" data"," code"};
+ // TODO: handle possibility of gate/segment descriptor
+ char *types[16] = {" ILGL"," ILGL"," LDT64"," ILGL"," ILGL"," ILGL"," ILGL"," ILGL",
+ " ILGL","aTSS64"," ILGL","bTSS64","call64"," ILGL","intr64","trap64"};
+
+ struct code_desc_long * entry;
+ entry = (struct code_desc_long *)base_hva;
+ V3_Print(core->vm_info, core, "= GDT ========\n");
+ V3_Print(core->vm_info, core, " # | hex | limit | base | c/d | dpl | p\n");
+ for (i = 0; i < (core->segments.gdtr.limit+1)/8; i++) {
+ if (entry->one2 == 0 && *(uint64_t*)entry != 0) { // this is a system descriptor
+ struct system_desc_long* sys = (struct system_desc_long*)entry;
+ V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %08x%02x%02x%04x | %s | %x | %x\n", i, i,
+ entry->limit_hi, entry->limit_lo,
+ sys->base_hi, entry->base_hi, entry->base_mid, entry->base_lo,
+ types[sys->type], entry->dpl, entry->p);
+ entry += 2;
+ i++;
+ } else {
+ V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %08x%02x%02x%04x | %s | %x | %x\n", i, i,
+ entry->limit_hi, entry->limit_lo,
+ 0, entry->base_hi, entry->base_mid, entry->base_lo,
+ cd[entry->one1], entry->dpl, entry->p);
+ entry++;
+ }
+ }
+}
+
+void v3_print_ldt(struct guest_info * core, addr_t ldtr_base) {
+ addr_t base_hva;
+
+ if (v3_get_vm_cpu_mode(core)!=LONG) {
+ V3_Print(core->vm_info, core, "= LDT ========\n");
+ V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
+ return;
+ }
+
+ V3_Print(core->vm_info, core, "= LDT ========\n");
+
+ if (ldtr_base == 0) {
+ V3_Print(core->vm_info, core, " (no LDT is installed)\n");
+ return;
+ }
+
+ if (core->mem_mode == PHYSICAL_MEM) {
+ if (v3_gpa_to_hva(core,
+ get_addr_linear(core, ldtr_base, &(core->segments.cs)),
+ &base_hva)) {
+ PrintError(core->vm_info, core, "Cannot translate address\n");
+ return;
+ }
+ } else if (core->mem_mode == VIRTUAL_MEM) {
+ if (v3_gva_to_hva(core,
+ get_addr_linear(core, ldtr_base, &(core->segments.cs)),
+ &base_hva)) {
+ PrintError(core->vm_info, core, "Cannot translate address\n");
+ return;
+ }
+ }
+
+ // SANITY CHECK
+ if (ldtr_base != get_addr_linear(core, ldtr_base, &(core->segments.cs))) {
+ PrintError(core->vm_info, core, "ldtr base address != linear translation, might be something funky with cs\n");
+ }
+
+ if (!base_hva) {
+ PrintError(core->vm_info, core, "ldtr address does not translate! skipping.\n");
+ return ;
+ }
+
+ int i;
+ char* cd[2] = {"data","code"};
+ // TODO: handle possibility of gate/segment descriptor
+
+ struct code_desc_long * entry;
+ entry = (struct code_desc_long *)base_hva;
+ V3_Print(core->vm_info, core, " # | hex | limit | base | c/d | dpl | p\n");
+ for (i = 0; i < NUM_LDT_ENTRIES; i++) {
+ V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s | %x | %x\n", i, i,
+ entry->limit_hi, entry->limit_lo,
+ entry->base_hi, entry->base_mid, entry->base_lo,
+ cd[entry->one1], entry->dpl, entry->p);
+ entry++;
+ }
+}
+
+void v3_print_tss(struct guest_info * core, addr_t tr_base) {
+ addr_t base_hva;
+ struct tss_long *t;
+
+ if (v3_get_vm_cpu_mode(core)!=LONG) {
+ V3_Print(core->vm_info, core, "= TSS ========\n");
+ V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
+ return;
+ }
+
+ V3_Print(core->vm_info, core, "= TSS ========\n");
+
+ if (tr_base == 0) {
+ V3_Print(core->vm_info, core, " (no TSS is installed)\n");
+ return;
+ }
+
+ if (core->mem_mode == PHYSICAL_MEM) {
+ if (v3_gpa_to_hva(core,
+ get_addr_linear(core, tr_base, &(core->segments.cs)),
+ &base_hva)) {
+ PrintError(core->vm_info, core, "Cannot translate address\n");
+ return;
+ }
+ } else if (core->mem_mode == VIRTUAL_MEM) {
+ if (v3_gva_to_hva(core,
+ get_addr_linear(core, tr_base, &(core->segments.cs)),
+ &base_hva)) {
+ PrintError(core->vm_info, core, "Cannot translate address\n");
+ return;
+ }
+ }
+
+ // SANITY CHECK
+ if (tr_base != get_addr_linear(core, tr_base, &(core->segments.cs))) {
+ PrintError(core->vm_info, core, "tr base address != linear translation, might be something funky with cs\n");
+ }
+
+ if (!base_hva) {
+ PrintError(core->vm_info, core, "tr address does not translate! skipping.\n");
+ return ;
+ }
+
+ t=(struct tss_long*)base_hva;
+
+ V3_Print(core->vm_info, core," res1 : 0x%llx\n", (uint64_t) t->res1);
+ V3_Print(core->vm_info, core," rsp0 : 0x%llx\n", t->rsp0);
+ V3_Print(core->vm_info, core," rsp1 : 0x%llx\n", t->rsp1);
+ V3_Print(core->vm_info, core," rsp2 : 0x%llx\n", t->rsp2);
+ V3_Print(core->vm_info, core," res2 : 0x%llx\n", t->res2);
+ V3_Print(core->vm_info, core," ist1 : 0x%llx\n", t->ist1);
+ V3_Print(core->vm_info, core," ist2 : 0x%llx\n", t->ist2);
+ V3_Print(core->vm_info, core," ist3 : 0x%llx\n", t->ist3);
+ V3_Print(core->vm_info, core," ist4 : 0x%llx\n", t->ist4);
+ V3_Print(core->vm_info, core," ist5 : 0x%llx\n", t->ist5);
+ V3_Print(core->vm_info, core," ist6 : 0x%llx\n", t->ist6);
+ V3_Print(core->vm_info, core," ist7 : 0x%llx\n", t->ist7);
+ V3_Print(core->vm_info, core," res3 : 0x%llx\n", t->res3);
+ V3_Print(core->vm_info, core," res4 : 0x%llx\n", (uint64_t) t->res4);
+ V3_Print(core->vm_info, core," iomap_base : 0x%llx\n", (uint64_t) t->iomap_base);
+ V3_Print(core->vm_info, core," (following io permission bitmap not currently printed)\n");
+
+}
+
+void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
+ struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
+
+ if (v3_get_vm_cpu_mode(core)!=LONG) {
+ V3_Print(core->vm_info, core, "= IDT ========\n");
+ V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
+ return;
+ }
+
+ V3_Print(core->vm_info, core, " selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
+ error->index, error->ti, error->idt, error->ext,
+ (unsigned long long)exit_info1);
+}
+
#endif