V3_Print("32 bit Ctrl Regs:\n");
for (i = 0; reg_names[i] != NULL; i++) {
- V3_Print("\t%s=0x%p\n", reg_names[i], (void *)(addr_t)reg_ptr[i]);
+ V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
}
V3_Print("\tEFER=0x%p\n", (void*)(addr_t)(guest_state->efer));
}
-
+#if 0
static int safe_gva_to_hva(struct guest_info * info, addr_t linear_addr, addr_t * host_addr) {
/* select the proper translation based on guest mode */
if (info->mem_mode == PHYSICAL_MEM) {
/* start disassembly 64 bytes before current RIP, continue 32 bytes after */
rip = (addr_t) info->rip - 64;
while ((int) (rip - info->rip) < 32) {
+ V3_Print("disassembly step\n");
+
/* always print RIP, even if the instructions before were bad */
if (!passed_rip && rip >= info->rip) {
if (rip != info->rip) {
rip++;
continue;
}
+
}
return 0;
}
+#endif
void v3_print_guest_state(struct guest_info * info) {
addr_t linear_addr = 0;
v3_print_stack(info);
- v3_print_disassembly(info);
+ // v3_print_disassembly(info);
+}
+
+void v3_print_guest_state_all(struct v3_vm_info * vm) {
+ int i = 0;
+
+ V3_Print("VM Core states for %s\n", vm->name);
+
+ for (i = 0; i < 80; i++) {
+ V3_Print("-");
+ }
+
+ for (i = 0; i < vm->num_cores; i++) {
+ v3_print_guest_state(&vm->cores[i]);
+ }
+
+ for (i = 0; i < 80; i++) {
+ V3_Print("-");
+ }
+
+ V3_Print("\n");
}
V3_Print("32 bit GPRs:\n");
for (i = 0; reg_names[i] != NULL; i++) {
- V3_Print("\t%s=0x%p\n", reg_names[i], (void *)(addr_t)reg_ptr[i]);
+ V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
}
}
V3_Print("64 bit GPRs:\n");
for (i = 0; reg_names[i] != NULL; i++) {
- V3_Print("\t%s=0x%p\n", reg_names[i], (void *)(addr_t)reg_ptr[i]);
+ V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
}
}
v3_init_cpuid_map(vm);
v3_init_host_events(vm);
v3_init_intr_routers(vm);
+ v3_init_ext_manager(vm);
// Initialize the memory map
if (v3_init_mem_map(vm) == -1) {