Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Cleanup and sanity-checking of endianness, dead code, unchecked returns (Coverity...
[palacios.git] / palacios / src / palacios / vmm_debug.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/vmm_debug.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_host_events.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_decoder.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_config.h>
28
29 #define PRINT_TELEMETRY  1
30 #define PRINT_CORE_STATE 2
31 #define PRINT_ARCH_STATE 3
32 #define PRINT_STACK      4
33 #define PRINT_BACKTRACE  5
34
35
36 #define PRINT_ALL        100 // Absolutely everything
37 #define PRINT_STATE      101 // telemetry, core state, arch state
38
39
40
41
42 static int core_handler(struct guest_info * core, uint32_t cmd) {
43
44
45     switch (cmd) {
46 #ifdef V3_CONFIG_TELEMETRY
47         case PRINT_TELEMETRY: 
48             v3_print_core_telemetry(core);
49             break;
50 #endif
51         
52         case PRINT_CORE_STATE:
53             v3_raise_barrier(core->vm_info, NULL);
54
55             v3_print_guest_state(core);
56
57             v3_lower_barrier(core->vm_info);
58             break;
59         case PRINT_ARCH_STATE:
60             v3_raise_barrier(core->vm_info, NULL);
61
62             v3_print_arch_state(core);
63
64             v3_lower_barrier(core->vm_info);
65             break;
66         case PRINT_STACK:
67             v3_raise_barrier(core->vm_info, NULL);
68
69             v3_print_stack(core);
70
71             v3_lower_barrier(core->vm_info);
72             break;
73         case PRINT_BACKTRACE:
74             v3_raise_barrier(core->vm_info, NULL);
75
76             v3_print_backtrace(core);
77             
78             v3_lower_barrier(core->vm_info);
79             break;
80
81         case PRINT_STATE:
82             v3_raise_barrier(core->vm_info, NULL);
83
84 #ifdef V3_CONFIG_TELEMETRY
85             v3_print_core_telemetry(core);
86 #endif
87             v3_print_guest_state(core);
88             v3_print_arch_state(core);
89
90             v3_lower_barrier(core->vm_info);
91             break;
92
93         case PRINT_ALL:
94             v3_raise_barrier(core->vm_info, NULL);
95
96 #ifdef V3_CONFIG_TELEMETRY
97             v3_print_core_telemetry(core);
98 #endif
99             v3_print_guest_state(core);
100             v3_print_arch_state(core);
101         v3_print_stack(core);
102         v3_print_backtrace(core);
103
104             v3_lower_barrier(core->vm_info);
105             break;
106
107     }
108
109     return 0;
110 }
111
112
113 static int evt_handler(struct v3_vm_info * vm, struct v3_debug_event * evt, void * priv_data) {
114
115     V3_Print(vm, VCORE_NONE,"Debug Event Handler for core %d cmd=%x\n", evt->core_id, evt->cmd);
116
117     if (evt->core_id == -1) {
118         int i = 0;
119         for (i = 0; i < vm->num_cores; i++) {
120             core_handler(&(vm->cores[i]), evt->cmd);
121         }
122     } else {
123         return core_handler(&vm->cores[evt->core_id], evt->cmd);
124     }
125
126     
127     return 0;
128 }
129
130
131 int v3_init_vm_debugging(struct v3_vm_info * vm) {
132     v3_hook_host_event(vm, HOST_DEBUG_EVT, 
133                        V3_HOST_EVENT_HANDLER(evt_handler), 
134                        NULL);
135
136
137     return 0;
138 }
139
140
141
142
143
144 void v3_print_segments(struct v3_segments * segs) {
145     int i = 0;
146     struct v3_segment * seg_ptr;
147
148     seg_ptr=(struct v3_segment *)segs;
149   
150     char *seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
151     V3_Print(VM_NONE, VCORE_NONE, "Segments\n");
152
153     for (i = 0; seg_names[i] != NULL; i++) {
154
155         V3_Print(VM_NONE, VCORE_NONE, "\t%s: selector=0x%x, base=%p, limit=0x%x type=0x%x system=0x%x dpl=0x%x present=0x%x avail=0x%x long_mode=0x%x db=0x%x granularity=0x%x unusable=0x%x\n", 
156                  seg_names[i], 
157                  seg_ptr[i].selector, 
158                  (void *)(addr_t)seg_ptr[i].base, 
159                  seg_ptr[i].limit,
160                  seg_ptr[i].type,
161                  seg_ptr[i].system,
162                  seg_ptr[i].dpl,
163                  seg_ptr[i].present,
164                  seg_ptr[i].avail,
165                  seg_ptr[i].long_mode,
166                  seg_ptr[i].db,
167                  seg_ptr[i].granularity,
168                  seg_ptr[i].unusable);
169     }
170 }
171
172
173
174 void v3_print_ctrl_regs(struct guest_info * core) {
175     struct v3_ctrl_regs * regs = &(core->ctrl_regs);
176     int i = 0;
177     v3_reg_t * reg_ptr;
178     char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", "EFER", NULL};
179    
180
181     reg_ptr = (v3_reg_t *)regs;
182
183     V3_Print(core->vm_info, core,"Ctrl Regs:\n");
184
185     for (i = 0; reg_names[i] != NULL; i++) {
186         V3_Print(core->vm_info, core, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
187     }
188
189
190 }
191
192 #if 0
193 static int safe_gva_to_hva(struct guest_info * core, addr_t linear_addr, addr_t * host_addr) {
194     /* select the proper translation based on guest mode */
195     if (core->mem_mode == PHYSICAL_MEM) {
196         if (v3_gpa_to_hva(core, linear_addr, host_addr) == -1) return -1;
197     } else if (core->mem_mode == VIRTUAL_MEM) {
198         if (v3_gva_to_hva(core, linear_addr, host_addr) == -1) return -1;
199     }
200     return 0;
201 }
202
203 static int v3_print_disassembly(struct guest_info * core) {
204     int passed_rip = 0;
205     addr_t rip, rip_linear, rip_host;
206
207     /* we don't know where the instructions preceding RIP start, so we just take
208      * a guess and hope the instruction stream synced up with our disassembly
209      * some time before RIP; if it has not we correct RIP at that point
210      */
211
212     /* start disassembly 64 bytes before current RIP, continue 32 bytes after */
213     rip = (addr_t) core->rip - 64;
214     while ((int) (rip - core->rip) < 32) {
215         V3_Print(info->vm_info, info, "disassembly step\n");
216
217         /* always print RIP, even if the instructions before were bad */
218         if (!passed_rip && rip >= core->rip) {
219             if (rip != core->rip) {
220                 V3_Print(info->vm_info, info, "***** bad disassembly up to this point *****\n");
221                 rip = core->rip;
222             }
223             passed_rip = 1;
224         }
225
226         /* look up host virtual address for this instruction */
227         rip_linear = get_addr_linear(core, rip, &(core->segments.cs));
228         if (safe_gva_to_hva(core, rip_linear, &rip_host) < 0) {
229             rip++;
230             continue;
231         }
232
233         /* print disassembled instrcution (updates rip) */
234         if (v3_disasm(core, (void *) rip_host, &rip, rip == core->rip) < 0) {
235             rip++;
236             continue;
237         }
238
239     }
240
241     return 0;
242 }
243
244 #endif
245
246 void v3_print_guest_state(struct guest_info * core) {
247     addr_t linear_addr = 0; 
248
249     V3_Print(core->vm_info, core, "RIP: %p\n", (void *)(addr_t)(core->rip));
250     linear_addr = get_addr_linear(core, core->rip, &(core->segments.cs));
251     V3_Print(core->vm_info, core, "RIP Linear: %p\n", (void *)linear_addr);
252
253     V3_Print(core->vm_info, core, "NumExits: %u\n", (uint32_t)core->num_exits);
254
255     V3_Print(core->vm_info, core, "IRQ STATE: started=%d, pending=%d\n", 
256              core->intr_core_state.irq_started, 
257              core->intr_core_state.irq_pending);
258     V3_Print(core->vm_info, core, "EXCP STATE: err_code_valid=%d, err_code=%x\n", 
259              core->excp_state.excp_error_code_valid, 
260              core->excp_state.excp_error_code);
261
262
263     v3_print_segments(&(core->segments));
264     v3_print_ctrl_regs(core);
265
266     if (core->shdw_pg_mode == SHADOW_PAGING) {
267         V3_Print(core->vm_info, core, "Shadow Paging Guest Registers:\n");
268         V3_Print(core->vm_info, core, "\tGuest CR0=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr0));
269         V3_Print(core->vm_info, core, "\tGuest CR3=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr3));
270         V3_Print(core->vm_info, core, "\tGuest EFER=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_efer.value));
271         // CR4
272     }
273     v3_print_GPRs(core);
274
275     v3_print_idt(core,core->segments.idtr.base);
276     v3_print_gdt(core,core->segments.gdtr.base);
277     v3_print_ldt(core,core->segments.ldtr.base);
278     v3_print_tss(core,core->segments.tr.base);
279
280     v3_print_mem_map(core->vm_info);
281
282     v3_print_stack(core);
283
284     //  v3_print_disassembly(core);
285 }
286
287
288 void v3_print_arch_state(struct guest_info * core) {
289
290
291 }
292
293
294 void v3_print_guest_state_all(struct v3_vm_info * vm) {
295     int i = 0;
296
297     V3_Print(vm, VCORE_NONE,"VM Core states for %s\n", vm->name);
298
299     for (i = 0; i < 80; i++) {
300       V3_Print(vm, VCORE_NONE, "-");
301     }
302
303     for (i = 0; i < vm->num_cores; i++) {
304         v3_print_guest_state(&vm->cores[i]);  
305     }
306     
307     for (i = 0; i < 80; i++) {
308         V3_Print(vm, VCORE_NONE, "-");
309     }
310
311     V3_Print(vm, VCORE_NONE, "\n");    
312 }
313
314
315
316 void v3_print_stack(struct guest_info * core) {
317     addr_t linear_addr = 0;
318     addr_t host_addr = 0;
319     int i = 0;
320     v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
321
322     linear_addr = get_addr_linear(core, core->vm_regs.rsp, &(core->segments.ss));
323  
324     V3_Print(core->vm_info, core, "Stack at %p:\n", (void *)linear_addr);
325    
326     if (core->mem_mode == PHYSICAL_MEM) {
327         if (v3_gpa_to_hva(core, linear_addr, &host_addr) == -1) {
328             PrintError(core->vm_info, core, "Could not translate Stack address\n");
329             return;
330         }
331     } else if (core->mem_mode == VIRTUAL_MEM) {
332         if (v3_gva_to_hva(core, linear_addr, &host_addr) == -1) {
333             PrintError(core->vm_info, core, "Could not translate Virtual Stack address\n");
334             return;
335         }
336     }
337     
338     V3_Print(core->vm_info, core, "Host Address of rsp = 0x%p\n", (void *)host_addr);
339  
340     // We start i at one because the current stack pointer points to an unused stack element
341     for (i = 0; i <= 24; i++) {
342
343         if (cpu_mode == REAL) {
344             V3_Print(core->vm_info, core, "\t0x%.4x\n", *((uint16_t *)host_addr + (i * 2)));
345         } else if (cpu_mode == LONG) {
346             V3_Print(core->vm_info, core, "\t%p\n", (void *)*(addr_t *)(host_addr + (i * 8)));
347         } else {
348             // 32 bit stacks...
349             V3_Print(core->vm_info, core, "\t0x%.8x\n", *(uint32_t *)(host_addr + (i * 4)));
350         }
351     }
352
353 }    
354
355
356 void v3_print_backtrace(struct guest_info * core) {
357     addr_t gla_rbp = 0;
358     int i = 0;
359     v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
360     struct v3_cfg_file * system_map = v3_cfg_get_file(core->vm_info, "System.map");
361
362     V3_Print(core->vm_info, core, "Performing Backtrace for Core %d\n", core->vcpu_id);
363     V3_Print(core->vm_info, core, "\tRSP=%p, RBP=%p\n", (void *)core->vm_regs.rsp, (void *)core->vm_regs.rbp);
364
365     gla_rbp = get_addr_linear(core, core->vm_regs.rbp, &(core->segments.ss));
366
367
368     for (i = 0; i < 30; i++) {
369         addr_t hva_rbp = 0; 
370         addr_t hva_rip = 0; 
371         char * sym_name = NULL;
372         addr_t rip_val = 0;
373
374         if (core->mem_mode == PHYSICAL_MEM) {
375             if (v3_gpa_to_hva(core, gla_rbp, &hva_rbp) == -1) {
376                 PrintError(core->vm_info, core, "Could not translate Stack address\n");
377                 return;
378             }
379         } else if (core->mem_mode == VIRTUAL_MEM) {
380             if (v3_gva_to_hva(core, gla_rbp, &hva_rbp) == -1) {
381                 PrintError(core->vm_info, core, "Could not translate Virtual Stack address\n");
382                 return;
383             }
384         }
385
386
387         hva_rip = hva_rbp + v3_get_addr_width(core);
388         
389         if (cpu_mode == REAL) {
390             rip_val = (addr_t)*(uint16_t *)hva_rip;
391         } else if (cpu_mode == LONG) {
392             rip_val = (addr_t)*(uint64_t *)hva_rip;
393         } else {
394             rip_val = (addr_t)*(uint32_t *)hva_rip;
395         }
396
397         if (system_map) {
398             char * tmp_ptr = system_map->data;
399             char * sym_ptr = NULL;
400             uint64_t file_offset = 0; 
401             uint64_t sym_offset = 0;
402
403             while (file_offset < system_map->size) {
404                 sym_offset = strtox(tmp_ptr, &tmp_ptr);
405
406                 tmp_ptr += 3; // pass over symbol type
407
408                 if (sym_offset > rip_val) {
409                     if (sym_ptr) {
410                         char * end_ptr = strchr(sym_ptr, '\n');
411                         
412                         if (end_ptr) {
413                             *end_ptr = 0; // null terminate symbol...
414                         }
415                         sym_name = sym_ptr;
416                     } else {
417                         sym_name = NULL;
418                     }
419                     break;
420                 }
421
422                 sym_ptr = tmp_ptr;
423
424                 { 
425                     char * end_ptr2 = strchr(tmp_ptr, '\n');
426
427                     if (!end_ptr2) {
428                         tmp_ptr += strlen(tmp_ptr) + 1;
429                     } else {
430                         tmp_ptr = end_ptr2 + 1;
431                     }
432                 }
433             }
434         }
435
436         if (!sym_name) {
437             sym_name = "?";
438         }
439
440         if (cpu_mode == REAL) {
441             V3_Print(core->vm_info, core, "Next RBP=0x%.4x, RIP=0x%.4x (%s)\n", 
442                      *(uint16_t *)hva_rbp,*(uint16_t *)hva_rip, 
443                      sym_name);
444             
445             gla_rbp = *(uint16_t *)hva_rbp;
446         } else if (cpu_mode == LONG) {
447             V3_Print(core->vm_info, core, "Next RBP=%p, RIP=%p (%s)\n", 
448                      (void *)*(uint64_t *)hva_rbp, (void *)*(uint64_t *)hva_rip,
449                      sym_name);
450             gla_rbp = *(uint64_t *)hva_rbp;
451         } else {
452             V3_Print(core->vm_info, core, "Next RBP=0x%.8x, RIP=0x%.8x (%s)\n", 
453                      *(uint32_t *)hva_rbp, *(uint32_t *)hva_rip,
454                      sym_name);
455             gla_rbp = *(uint32_t *)hva_rbp;
456         }
457
458     }
459 }
460
461
462 #ifdef __V3_32BIT__
463
464 void v3_print_GPRs(struct guest_info * core) {
465     struct v3_gprs * regs = &(core->vm_regs);
466     int i = 0;
467     v3_reg_t * reg_ptr;
468     char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", NULL};
469
470     reg_ptr = (v3_reg_t *)regs;
471
472     V3_Print(info->vm_info, info, "32 bit GPRs:\n");
473
474     for (i = 0; reg_names[i] != NULL; i++) {
475         V3_Print(info->vm_info, info, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
476     }
477 }
478
479 void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
480     addr_t base_hva;
481
482     if (v3_get_vm_cpu_mode(core)!=LONG) { 
483         V3_Print(core->vm_info, core, "= IDT ========\n");
484         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
485         return;
486     }
487         
488
489     if (core->mem_mode == PHYSICAL_MEM) {
490         if (v3_gpa_to_hva(core, 
491                           get_addr_linear(core, idtr_base, &(core->segments.cs)),
492                           &base_hva)) {
493             PrintError(core->vm_info, core, "Cannot translate address\n");
494             return;
495         }
496         PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
497     } else if (core->mem_mode == VIRTUAL_MEM) {
498         if (v3_gva_to_hva(core, 
499                           get_addr_linear(core, idtr_base, &(core->segments.cs)),
500                           &base_hva)) { 
501             PrintError(core->vm_info, core, "Cannot translate address\n");
502             return;
503         }
504     }
505
506     // SANITY CHECK
507     if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
508         PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
509     }
510
511     if (!base_hva) {
512         PrintError(core->vm_info, core "idtr address does not translate!  skipping.\n");
513         return ;
514     }
515
516     int i;
517     char *types[16] = {"  ILGL","aTSS16","   LDT","bTSS16","call16","  task","intr16","trap16",
518         "  ILGL","aTSS32","  ILGL","bTSS32","call32","  ILGL","intr32","trap32"};
519
520     struct int_trap_gate_lgcy * entry;
521     entry = (struct int_trap_gate_lgcy *)base_hva;
522     V3_Print(core->vm_info, core, "= IDT ========\n");
523     V3_Print(core->vm_info, core, "  # | hex | selector | si:ti:rpl |   offset | type | dpl | s | p\n");
524     for (i = 0; i < NUM_IDT_ENTRIES; i++) {
525         uint32_t tmp = entry->selector;
526         struct segment_selector * seg = (struct segment_selector *)(&tmp);
527         V3_Print(core->vm_info, core, "%3d | %3x |     %04x |   %03x:%x:%x | %04x%04x | %s |   %x | %x | %x | %x\n", i, i,
528                 entry->selector,
529                 seg->index, seg->ti, seg->rpl,
530                 entry->offset_hi, entry->offset_lo,
531                 types[entry->type], entry->dpl, entry->s, entry->p);
532         entry++;
533     }
534 }
535
536 void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
537     addr_t base_hva;
538
539     if (v3_get_vm_cpu_mode(core)!=LONG) { 
540         V3_Print(core->vm_info, core, "= GDT ========\n");
541         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
542         return;
543     }
544
545     if (core->mem_mode == PHYSICAL_MEM) {
546         if (v3_gpa_to_hva(core, 
547                           get_addr_linear(core, gdtr_base, &(core->segments.cs)),
548                           &base_hva)) {
549             PrintError(core->vm_info, core, "Cannot translate address\n");
550             return;
551         }
552         PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
553     } else if (core->mem_mode == VIRTUAL_MEM) {
554         if (v3_gva_to_hva(core, 
555                           get_addr_linear(core, gdtr_base, &(core->segments.cs)),
556                           &base_hva)) {
557             PrintError(core->vm_info, core, "Cannot translate address\n");
558             return;
559         }
560     }
561
562     // SANITY CHECK
563     if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
564         PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
565     }
566
567     if (!base_hva) {
568         PrintError(core->vm_info, core "gdtr address does not translate!  skipping.\n");
569         return ;
570     }
571
572     int i;
573     char* cd[2] = {"data","code"};
574     // TODO: handle possibility of gate/segment descriptor
575
576     struct code_desc_lgcy * entry;
577     entry = (struct code_desc_long *)base_hva;
578     V3_Print(core->vm_info, core, "= GDT ========\n");
579     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
580     for (i = 0; i < NUM_GDT_ENTRIES; i++) {
581         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
582                 entry->limit_hi, entry->limit_lo,
583                 entry->base_hi, entry->base_mid, entry->base_lo,
584                 cd[entry->one1], entry->dpl, entry->p);
585         entry++;
586     }
587 }
588
589 void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
590     struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
591
592     V3_Print(core->vm_info, core, "      selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
593             error->index, error->ti, error->idt, error->ext,
594             (unsigned long long)exit_info1);
595 }
596
597 #elif __V3_64BIT__
598
599 void v3_print_GPRs(struct guest_info * core) {
600     struct v3_gprs * regs = &(core->vm_regs);
601     int i = 0;
602     v3_reg_t * reg_ptr;
603     char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", \
604                            "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", NULL};
605
606     reg_ptr = (v3_reg_t *)regs;
607
608     V3_Print(core->vm_info, core, "64 bit GPRs:\n");
609
610     for (i = 0; reg_names[i] != NULL; i++) {
611         V3_Print(core->vm_info, core, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
612     }
613 }
614
615 void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
616     addr_t base_hva;
617
618     if (v3_get_vm_cpu_mode(core)!=LONG) { 
619         V3_Print(core->vm_info, core, "= IDT ========\n");
620         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
621         return;
622     }
623
624     if (core->mem_mode == PHYSICAL_MEM) {
625         if (v3_gpa_to_hva(core, 
626                           get_addr_linear(core, idtr_base, &(core->segments.cs)),
627                           &base_hva)) {
628             PrintError(core->vm_info, core, "Cannot translate address\n");
629             return;
630         }
631     } else if (core->mem_mode == VIRTUAL_MEM) {
632         if (v3_gva_to_hva(core, 
633                           get_addr_linear(core, idtr_base, &(core->segments.cs)),
634                           &base_hva)) {
635             PrintError(core->vm_info, core, "Cannot translate address\n");
636             return;
637         }
638     }
639
640     // SANITY CHECK
641     if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
642         PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
643     }
644
645     if (!base_hva) {
646         PrintError(core->vm_info, core, "idtr address does not translate!  skipping.\n");
647         return ;
648     }
649
650     int i;
651     char *types[16] = {"ILGL","ILGL"," LDT","ILGL","ILGL","ILGL","ILGL","ILGL","ILGL",
652         "aTSS","ILGL","bTSS","call","ILGL","intr","trap"};
653
654     struct int_trap_gate_long * entry;
655     entry = (struct int_trap_gate_long *)base_hva;
656     V3_Print(core->vm_info, core, "= IDT ========\n");
657     V3_Print(core->vm_info, core, "  # | hex | selector | si:ti:rpl |           offset | type | dpl | s | r | p\n");
658     for (i = 0; i < NUM_IDT_ENTRIES; i++) {
659         uint32_t tmp = entry->selector;
660         struct segment_selector * seg = (struct segment_selector *)(&tmp);
661         V3_Print(core->vm_info, core, "%3d | %3x |     %04x |   %03x:%x:%x | %08x%04x%04x | %s |   %x | %x | %x | %x\n", i, i,
662                 entry->selector,
663                 seg->index, seg->ti, seg->rpl,
664                 entry->offset_hi, entry->offset_mid, entry->offset_lo,
665                 types[entry->type], entry->dpl, entry->s,
666                 entry->s, entry->p);
667         entry++;
668     }
669 }
670
671 void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
672     addr_t base_hva;
673
674     if (v3_get_vm_cpu_mode(core)!=LONG) { 
675         V3_Print(core->vm_info, core, "= GDT ========\n");
676         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
677         return;
678     }
679
680     if (core->mem_mode == PHYSICAL_MEM) {
681         if (v3_gpa_to_hva(core,
682                           get_addr_linear(core, gdtr_base, &(core->segments.cs)),
683                           &base_hva)) { 
684             PrintError(core->vm_info, core, "Cannot translate address\n");
685             return;
686         }
687     } else if (core->mem_mode == VIRTUAL_MEM) {
688         if (v3_gva_to_hva(core, 
689                           get_addr_linear(core, gdtr_base, &(core->segments.cs)),
690                           &base_hva)) {
691             PrintError(core->vm_info, core, "Cannot translate address\n");
692             return;
693         }
694     }
695
696     // SANITY CHECK
697     if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
698         PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
699     }
700
701     if (!base_hva) {
702         PrintError(core->vm_info, core, "gdtr address does not translate!  skipping.\n");
703         return ;
704     }
705
706     int i;
707     char* cd[2] = {"data","code"};
708     // TODO: handle possibility of gate/segment descriptor
709
710     struct code_desc_long * entry;
711     entry = (struct code_desc_long *)base_hva;
712     V3_Print(core->vm_info, core, "= GDT ========\n");
713     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
714     for (i = 0; i < NUM_GDT_ENTRIES; i++) {
715         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
716                 entry->limit_hi, entry->limit_lo,
717                 entry->base_hi, entry->base_mid, entry->base_lo,
718                 cd[entry->one1], entry->dpl, entry->p);
719         entry++;
720     }
721 }
722
723 void v3_print_ldt(struct guest_info * core, addr_t ldtr_base) {
724     addr_t base_hva;
725
726     if (v3_get_vm_cpu_mode(core)!=LONG) { 
727         V3_Print(core->vm_info, core, "= LDT ========\n");
728         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
729         return;
730     }
731
732     V3_Print(core->vm_info, core, "= LDT ========\n");
733
734     if (ldtr_base == 0) {
735         V3_Print(core->vm_info, core, "        (no LDT is installed)\n");
736         return;
737     } 
738
739     if (core->mem_mode == PHYSICAL_MEM) {
740         if (v3_gpa_to_hva(core, 
741                           get_addr_linear(core, ldtr_base, &(core->segments.cs)),
742                           &base_hva)) {
743             PrintError(core->vm_info, core, "Cannot translate address\n");
744             return;
745         }
746     } else if (core->mem_mode == VIRTUAL_MEM) {
747         if (v3_gva_to_hva(core, 
748                           get_addr_linear(core, ldtr_base, &(core->segments.cs)),
749                           &base_hva)) {
750             PrintError(core->vm_info, core, "Cannot translate address\n");
751             return;
752         }
753     }
754
755     // SANITY CHECK
756     if (ldtr_base != get_addr_linear(core, ldtr_base, &(core->segments.cs))) {
757         PrintError(core->vm_info, core, "ldtr base address != linear translation, might be something funky with cs\n");
758     }
759
760     if (!base_hva) {
761         PrintError(core->vm_info, core, "ldtr address does not translate!  skipping.\n");
762         return ;
763     }
764
765     int i;
766     char* cd[2] = {"data","code"};
767     // TODO: handle possibility of gate/segment descriptor
768
769     struct code_desc_long * entry;
770     entry = (struct code_desc_long *)base_hva;
771     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
772     for (i = 0; i < NUM_LDT_ENTRIES; i++) {
773         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
774                 entry->limit_hi, entry->limit_lo,
775                 entry->base_hi, entry->base_mid, entry->base_lo,
776                 cd[entry->one1], entry->dpl, entry->p);
777         entry++;
778     }
779 }
780
781 void v3_print_tss(struct guest_info * core, addr_t tr_base) {
782     addr_t base_hva;
783     struct tss_long *t;
784
785     if (v3_get_vm_cpu_mode(core)!=LONG) { 
786         V3_Print(core->vm_info, core, "= TSS ========\n");
787         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
788         return;
789     }
790
791     V3_Print(core->vm_info, core, "= TSS ========\n");
792
793     if (tr_base == 0) {
794         V3_Print(core->vm_info, core, "        (no TSS is installed)\n");
795         return;
796     } 
797
798     if (core->mem_mode == PHYSICAL_MEM) {
799         if (v3_gpa_to_hva(core, 
800                           get_addr_linear(core, tr_base, &(core->segments.cs)),
801                           &base_hva)) {
802             PrintError(core->vm_info, core, "Cannot translate address\n");
803             return;
804         }
805     } else if (core->mem_mode == VIRTUAL_MEM) {
806         if (v3_gva_to_hva(core, 
807                           get_addr_linear(core, tr_base, &(core->segments.cs)),
808                           &base_hva)) {
809             PrintError(core->vm_info, core, "Cannot translate address\n");
810             return;
811         }
812     }
813
814     // SANITY CHECK
815     if (tr_base != get_addr_linear(core, tr_base, &(core->segments.cs))) {
816         PrintError(core->vm_info, core, "tr base address != linear translation, might be something funky with cs\n");
817     }
818
819     if (!base_hva) {
820         PrintError(core->vm_info, core, "tr address does not translate!  skipping.\n");
821         return ;
822     }
823
824     t=(struct tss_long*)base_hva;
825
826     V3_Print(core->vm_info, core," res1 : 0x%llx\n", (uint64_t) t->res1);
827     V3_Print(core->vm_info, core," rsp0 : 0x%llx\n", t->rsp0);
828     V3_Print(core->vm_info, core," rsp1 : 0x%llx\n", t->rsp1);
829     V3_Print(core->vm_info, core," rsp2 : 0x%llx\n", t->rsp2);
830     V3_Print(core->vm_info, core," res2 : 0x%llx\n", t->res2);
831     V3_Print(core->vm_info, core," ist1 : 0x%llx\n", t->ist1);
832     V3_Print(core->vm_info, core," ist2 : 0x%llx\n", t->ist2);
833     V3_Print(core->vm_info, core," ist3 : 0x%llx\n", t->ist3);
834     V3_Print(core->vm_info, core," ist4 : 0x%llx\n", t->ist4);
835     V3_Print(core->vm_info, core," ist5 : 0x%llx\n", t->ist5);
836     V3_Print(core->vm_info, core," ist6 : 0x%llx\n", t->ist6);
837     V3_Print(core->vm_info, core," ist7 : 0x%llx\n", t->ist7);
838     V3_Print(core->vm_info, core," res3 : 0x%llx\n", t->res3);
839     V3_Print(core->vm_info, core," res4 : 0x%llx\n", (uint64_t) t->res4);
840     V3_Print(core->vm_info, core," iomap_base : 0x%llx\n", (uint64_t) t->iomap_base);
841     V3_Print(core->vm_info, core," (following io permission bitmap not currently printed)\n");
842
843 }
844
845 void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
846     struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
847
848     if (v3_get_vm_cpu_mode(core)!=LONG) { 
849         V3_Print(core->vm_info, core, "= IDT ========\n");
850         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
851         return;
852     }
853
854     V3_Print(core->vm_info, core, "      selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
855             error->index, error->ti, error->idt, error->ext,
856             (unsigned long long)exit_info1);
857 }
858
859 #endif