Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


c2401e878e0ed8ff0b1bfc50f942a893297e6921
[palacios.git] / palacios / src / palacios / vmm_debug.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/vmm_debug.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_host_events.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_decoder.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_config.h>
28
29 #define PRINT_TELEMETRY  1
30 #define PRINT_CORE_STATE 2
31 #define PRINT_ARCH_STATE 3
32 #define PRINT_STACK      4
33 #define PRINT_BACKTRACE  5
34
35
36 #define PRINT_ALL        100 // Absolutely everything
37 #define PRINT_STATE      101 // telemetry, core state, arch state
38
39
40
41
42 static int core_handler(struct guest_info * core, uint32_t cmd) {
43
44
45     switch (cmd) {
46 #ifdef V3_CONFIG_TELEMETRY
47         case PRINT_TELEMETRY: 
48             v3_print_core_telemetry(core);
49             break;
50 #endif
51         
52         case PRINT_CORE_STATE:
53             v3_raise_barrier(core->vm_info, NULL);
54
55             v3_print_guest_state(core);
56
57             v3_lower_barrier(core->vm_info);
58             break;
59         case PRINT_ARCH_STATE:
60             v3_raise_barrier(core->vm_info, NULL);
61
62             v3_print_arch_state(core);
63
64             v3_lower_barrier(core->vm_info);
65             break;
66         case PRINT_STACK:
67             v3_raise_barrier(core->vm_info, NULL);
68
69             v3_print_stack(core);
70
71             v3_lower_barrier(core->vm_info);
72             break;
73         case PRINT_BACKTRACE:
74             v3_raise_barrier(core->vm_info, NULL);
75
76             v3_print_backtrace(core);
77             
78             v3_lower_barrier(core->vm_info);
79             break;
80
81         case PRINT_STATE:
82             v3_raise_barrier(core->vm_info, NULL);
83
84 #ifdef V3_CONFIG_TELEMETRY
85             v3_print_core_telemetry(core);
86 #endif
87             v3_print_guest_state(core);
88             v3_print_arch_state(core);
89
90             v3_lower_barrier(core->vm_info);
91             break;
92
93         case PRINT_ALL:
94             v3_raise_barrier(core->vm_info, NULL);
95
96 #ifdef V3_CONFIG_TELEMETRY
97             v3_print_core_telemetry(core);
98 #endif
99             v3_print_guest_state(core);
100             v3_print_arch_state(core);
101         v3_print_stack(core);
102         v3_print_backtrace(core);
103
104             v3_lower_barrier(core->vm_info);
105             break;
106
107     }
108
109     return 0;
110 }
111
112
113 static int evt_handler(struct v3_vm_info * vm, struct v3_debug_event * evt, void * priv_data) {
114
115     V3_Print(vm, VCORE_NONE,"Debug Event Handler for core %d cmd=%x\n", evt->core_id, evt->cmd);
116
117     if (evt->core_id == -1) {
118         int i = 0;
119         for (i = 0; i < vm->num_cores; i++) {
120             core_handler(&(vm->cores[i]), evt->cmd);
121         }
122     } else {
123         return core_handler(&vm->cores[evt->core_id], evt->cmd);
124     }
125
126     
127     return 0;
128 }
129
130
131 int v3_init_vm_debugging(struct v3_vm_info * vm) {
132     v3_hook_host_event(vm, HOST_DEBUG_EVT, 
133                        V3_HOST_EVENT_HANDLER(evt_handler), 
134                        NULL);
135
136
137     return 0;
138 }
139
140
141
142
143
144 void v3_print_segments(struct v3_segments * segs) {
145     int i = 0;
146     struct v3_segment * seg_ptr;
147
148     seg_ptr=(struct v3_segment *)segs;
149   
150     char *seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
151     V3_Print(VM_NONE, VCORE_NONE, "Segments\n");
152
153     for (i = 0; seg_names[i] != NULL; i++) {
154
155         V3_Print(VM_NONE, VCORE_NONE, "\t%s: selector=0x%x, base=%p, limit=0x%x type=0x%x system=0x%x dpl=0x%x present=0x%x avail=0x%x long_mode=0x%x db=0x%x granularity=0x%x unusable=0x%x\n", 
156                  seg_names[i], 
157                  seg_ptr[i].selector, 
158                  (void *)(addr_t)seg_ptr[i].base, 
159                  seg_ptr[i].limit,
160                  seg_ptr[i].type,
161                  seg_ptr[i].system,
162                  seg_ptr[i].dpl,
163                  seg_ptr[i].present,
164                  seg_ptr[i].avail,
165                  seg_ptr[i].long_mode,
166                  seg_ptr[i].db,
167                  seg_ptr[i].granularity,
168                  seg_ptr[i].unusable);
169     }
170 }
171
172
173
174 void v3_print_ctrl_regs(struct guest_info * core) {
175     struct v3_ctrl_regs * regs = &(core->ctrl_regs);
176     int i = 0;
177     v3_reg_t * reg_ptr;
178     char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", "EFER", NULL};
179    
180
181     reg_ptr = (v3_reg_t *)regs;
182
183     V3_Print(core->vm_info, core,"Ctrl Regs:\n");
184
185     for (i = 0; reg_names[i] != NULL; i++) {
186         V3_Print(core->vm_info, core, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
187     }
188
189
190 }
191
192 #if 0
193 static int safe_gva_to_hva(struct guest_info * core, addr_t linear_addr, addr_t * host_addr) {
194     /* select the proper translation based on guest mode */
195     if (core->mem_mode == PHYSICAL_MEM) {
196         if (v3_gpa_to_hva(core, linear_addr, host_addr) == -1) return -1;
197     } else if (core->mem_mode == VIRTUAL_MEM) {
198         if (v3_gva_to_hva(core, linear_addr, host_addr) == -1) return -1;
199     }
200     return 0;
201 }
202
203 static int v3_print_disassembly(struct guest_info * core) {
204     int passed_rip = 0;
205     addr_t rip, rip_linear, rip_host;
206
207     /* we don't know where the instructions preceding RIP start, so we just take
208      * a guess and hope the instruction stream synced up with our disassembly
209      * some time before RIP; if it has not we correct RIP at that point
210      */
211
212     /* start disassembly 64 bytes before current RIP, continue 32 bytes after */
213     rip = (addr_t) core->rip - 64;
214     while ((int) (rip - core->rip) < 32) {
215         V3_Print(info->vm_info, info, "disassembly step\n");
216
217         /* always print RIP, even if the instructions before were bad */
218         if (!passed_rip && rip >= core->rip) {
219             if (rip != core->rip) {
220                 V3_Print(info->vm_info, info, "***** bad disassembly up to this point *****\n");
221                 rip = core->rip;
222             }
223             passed_rip = 1;
224         }
225
226         /* look up host virtual address for this instruction */
227         rip_linear = get_addr_linear(core, rip, &(core->segments.cs));
228         if (safe_gva_to_hva(core, rip_linear, &rip_host) < 0) {
229             rip++;
230             continue;
231         }
232
233         /* print disassembled instrcution (updates rip) */
234         if (v3_disasm(core, (void *) rip_host, &rip, rip == core->rip) < 0) {
235             rip++;
236             continue;
237         }
238
239     }
240
241     return 0;
242 }
243
244 #endif
245
246 void v3_print_guest_state(struct guest_info * core) {
247     addr_t linear_addr = 0; 
248
249     V3_Print(core->vm_info, core, "RIP: %p\n", (void *)(addr_t)(core->rip));
250     linear_addr = get_addr_linear(core, core->rip, &(core->segments.cs));
251     V3_Print(core->vm_info, core, "RIP Linear: %p\n", (void *)linear_addr);
252
253     V3_Print(core->vm_info, core, "NumExits: %u\n", (uint32_t)core->num_exits);
254
255     V3_Print(core->vm_info, core, "IRQ STATE: started=%d, pending=%d\n", 
256              core->intr_core_state.irq_started, 
257              core->intr_core_state.irq_pending);
258     V3_Print(core->vm_info, core, "EXCP STATE: err_code_valid=%d, err_code=%x\n", 
259              core->excp_state.excp_error_code_valid, 
260              core->excp_state.excp_error_code);
261
262
263     v3_print_segments(&(core->segments));
264     v3_print_ctrl_regs(core);
265
266     if (core->shdw_pg_mode == SHADOW_PAGING) {
267         V3_Print(core->vm_info, core, "Shadow Paging Guest Registers:\n");
268         V3_Print(core->vm_info, core, "\tGuest CR0=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr0));
269         V3_Print(core->vm_info, core, "\tGuest CR3=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr3));
270         V3_Print(core->vm_info, core, "\tGuest EFER=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_efer.value));
271         // CR4
272     }
273     v3_print_GPRs(core);
274
275     v3_print_idt(core,core->segments.idtr.base);
276     v3_print_gdt(core,core->segments.gdtr.base);
277     v3_print_ldt(core,core->segments.ldtr.base);
278     v3_print_tss(core,core->segments.tr.base);
279
280     v3_print_mem_map(core->vm_info);
281
282     v3_print_stack(core);
283
284     //  v3_print_disassembly(core);
285 }
286
287
288 void v3_print_arch_state(struct guest_info * core) {
289
290
291 }
292
293
294 void v3_print_guest_state_all(struct v3_vm_info * vm) {
295     int i = 0;
296
297     V3_Print(vm, VCORE_NONE,"VM Core states for %s\n", vm->name);
298
299     for (i = 0; i < 80; i++) {
300       V3_Print(vm, VCORE_NONE, "-");
301     }
302
303     for (i = 0; i < vm->num_cores; i++) {
304         v3_print_guest_state(&vm->cores[i]);  
305     }
306     
307     for (i = 0; i < 80; i++) {
308         V3_Print(vm, VCORE_NONE, "-");
309     }
310
311     V3_Print(vm, VCORE_NONE, "\n");    
312 }
313
314
315
316 void v3_print_stack(struct guest_info * core) {
317     addr_t linear_addr = 0;
318     addr_t host_addr = 0;
319     int i = 0;
320     v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
321
322     linear_addr = get_addr_linear(core, core->vm_regs.rsp, &(core->segments.ss));
323  
324     V3_Print(core->vm_info, core, "Stack at %p:\n", (void *)linear_addr);
325    
326     if (core->mem_mode == PHYSICAL_MEM) {
327         if (v3_gpa_to_hva(core, linear_addr, &host_addr) == -1) {
328             PrintError(core->vm_info, core, "Could not translate Stack address\n");
329             return;
330         }
331     } else if (core->mem_mode == VIRTUAL_MEM) {
332         if (v3_gva_to_hva(core, linear_addr, &host_addr) == -1) {
333             PrintError(core->vm_info, core, "Could not translate Virtual Stack address\n");
334             return;
335         }
336     }
337     
338     V3_Print(core->vm_info, core, "Host Address of rsp = 0x%p\n", (void *)host_addr);
339  
340     // We start i at one because the current stack pointer points to an unused stack element
341     for (i = 0; i <= 24; i++) {
342
343         if (cpu_mode == REAL) {
344             V3_Print(core->vm_info, core, "\t0x%.4x\n", *((uint16_t *)host_addr + (i * 2)));
345         } else if (cpu_mode == LONG) {
346             V3_Print(core->vm_info, core, "\t%p\n", (void *)*(addr_t *)(host_addr + (i * 8)));
347         } else {
348             // 32 bit stacks...
349             V3_Print(core->vm_info, core, "\t0x%.8x\n", *(uint32_t *)(host_addr + (i * 4)));
350         }
351     }
352
353 }    
354
355
356 void v3_print_backtrace(struct guest_info * core) {
357     addr_t gla_rbp = 0;
358     int i = 0;
359     v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
360     struct v3_cfg_file * system_map = v3_cfg_get_file(core->vm_info, "System.map");
361
362     V3_Print(core->vm_info, core, "Performing Backtrace for Core %d\n", core->vcpu_id);
363     V3_Print(core->vm_info, core, "\tRSP=%p, RBP=%p\n", (void *)core->vm_regs.rsp, (void *)core->vm_regs.rbp);
364
365     gla_rbp = get_addr_linear(core, core->vm_regs.rbp, &(core->segments.ss));
366
367
368     for (i = 0; i < 30; i++) {
369         addr_t hva_rbp = 0; 
370         addr_t hva_rip = 0; 
371         char * sym_name = NULL;
372         addr_t rip_val = 0;
373
374         if (core->mem_mode == PHYSICAL_MEM) {
375             if (v3_gpa_to_hva(core, gla_rbp, &hva_rbp) == -1) {
376                 PrintError(core->vm_info, core, "Could not translate Stack address\n");
377                 return;
378             }
379         } else if (core->mem_mode == VIRTUAL_MEM) {
380             if (v3_gva_to_hva(core, gla_rbp, &hva_rbp) == -1) {
381                 PrintError(core->vm_info, core, "Could not translate Virtual Stack address\n");
382                 return;
383             }
384         }
385
386
387         hva_rip = hva_rbp + v3_get_addr_width(core);
388         
389         if (cpu_mode == REAL) {
390             rip_val = (addr_t)*(uint16_t *)hva_rip;
391         } else if (cpu_mode == LONG) {
392             rip_val = (addr_t)*(uint64_t *)hva_rip;
393         } else {
394             rip_val = (addr_t)*(uint32_t *)hva_rip;
395         }
396
397         if (system_map) {
398             char * tmp_ptr = system_map->data;
399             char * sym_ptr = NULL;
400             uint64_t file_offset = 0; 
401             uint64_t sym_offset = 0;
402
403             while (file_offset < system_map->size) {
404                 sym_offset = strtox(tmp_ptr, &tmp_ptr);
405
406                 tmp_ptr += 3; // pass over symbol type
407
408                 if (sym_offset > rip_val) {
409                     char * end_ptr = strchr(sym_ptr, '\n');
410
411                     if (end_ptr) {
412                         *end_ptr = 0; // null terminate symbol...
413                     }
414
415                     sym_name = sym_ptr;
416                     break;
417                 }
418
419                 sym_ptr = tmp_ptr;
420                 { 
421                     char * end_ptr2 = strchr(tmp_ptr, '\n');
422
423                     if (!end_ptr2) {
424                         tmp_ptr += strlen(tmp_ptr) + 1;
425                     } else {
426                         tmp_ptr = end_ptr2 + 1;
427                     }
428                 }
429             }
430         }
431
432         if (!sym_name) {
433             sym_name = "?";
434         }
435
436         if (cpu_mode == REAL) {
437             V3_Print(core->vm_info, core, "Next RBP=0x%.4x, RIP=0x%.4x (%s)\n", 
438                      *(uint16_t *)hva_rbp,*(uint16_t *)hva_rip, 
439                      sym_name);
440             
441             gla_rbp = *(uint16_t *)hva_rbp;
442         } else if (cpu_mode == LONG) {
443             V3_Print(core->vm_info, core, "Next RBP=%p, RIP=%p (%s)\n", 
444                      (void *)*(uint64_t *)hva_rbp, (void *)*(uint64_t *)hva_rip,
445                      sym_name);
446             gla_rbp = *(uint64_t *)hva_rbp;
447         } else {
448             V3_Print(core->vm_info, core, "Next RBP=0x%.8x, RIP=0x%.8x (%s)\n", 
449                      *(uint32_t *)hva_rbp, *(uint32_t *)hva_rip,
450                      sym_name);
451             gla_rbp = *(uint32_t *)hva_rbp;
452         }
453
454     }
455 }
456
457
458 #ifdef __V3_32BIT__
459
460 void v3_print_GPRs(struct guest_info * core) {
461     struct v3_gprs * regs = &(core->vm_regs);
462     int i = 0;
463     v3_reg_t * reg_ptr;
464     char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", NULL};
465
466     reg_ptr = (v3_reg_t *)regs;
467
468     V3_Print(info->vm_info, info, "32 bit GPRs:\n");
469
470     for (i = 0; reg_names[i] != NULL; i++) {
471         V3_Print(info->vm_info, info, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
472     }
473 }
474
475 void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
476     addr_t base_hva;
477
478     if (v3_get_vm_cpu_mode(core)!=LONG) { 
479         V3_Print(core->vm_info, core, "= IDT ========\n");
480         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
481         return;
482     }
483         
484
485     if (core->mem_mode == PHYSICAL_MEM) {
486         v3_gpa_to_hva(core, 
487                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
488                       &base_hva);
489         PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
490     } else if (core->mem_mode == VIRTUAL_MEM) {
491         v3_gva_to_hva(core, 
492                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
493                       &base_hva);
494     }
495
496     // SANITY CHECK
497     if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
498         PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
499     }
500
501     if (!base_hva) {
502         PrintError(core->vm_info, core "idtr address does not translate!  skipping.\n");
503         return ;
504     }
505
506     int i;
507     char *types[16] = {"  ILGL","aTSS16","   LDT","bTSS16","call16","  task","intr16","trap16",
508         "  ILGL","aTSS32","  ILGL","bTSS32","call32","  ILGL","intr32","trap32"};
509
510     struct int_trap_gate_lgcy * entry;
511     entry = (struct int_trap_gate_lgcy *)base_hva;
512     V3_Print(core->vm_info, core, "= IDT ========\n");
513     V3_Print(core->vm_info, core, "  # | hex | selector | si:ti:rpl |   offset | type | dpl | s | p\n");
514     for (i = 0; i < NUM_IDT_ENTRIES; i++) {
515         uint32_t tmp = entry->selector;
516         struct segment_selector * seg = (struct segment_selector *)(&tmp);
517         V3_Print(core->vm_info, core, "%3d | %3x |     %04x |   %03x:%x:%x | %04x%04x | %s |   %x | %x | %x | %x\n", i, i,
518                 entry->selector,
519                 seg->index, seg->ti, seg->rpl,
520                 entry->offset_hi, entry->offset_lo,
521                 types[entry->type], entry->dpl, entry->s, entry->p);
522         entry++;
523     }
524 }
525
526 void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
527     addr_t base_hva;
528
529     if (v3_get_vm_cpu_mode(core)!=LONG) { 
530         V3_Print(core->vm_info, core, "= GDT ========\n");
531         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
532         return;
533     }
534
535     if (core->mem_mode == PHYSICAL_MEM) {
536         v3_gpa_to_hva(core, 
537                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
538                       &base_hva);
539         PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
540     } else if (core->mem_mode == VIRTUAL_MEM) {
541         v3_gva_to_hva(core, 
542                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
543                       &base_hva);
544     }
545
546     // SANITY CHECK
547     if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
548         PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
549     }
550
551     if (!base_hva) {
552         PrintError(core->vm_info, core "gdtr address does not translate!  skipping.\n");
553         return ;
554     }
555
556     int i;
557     char* cd[2] = {"data","code"};
558     // TODO: handle possibility of gate/segment descriptor
559
560     struct code_desc_lgcy * entry;
561     entry = (struct code_desc_long *)base_hva;
562     V3_Print(core->vm_info, core, "= GDT ========\n");
563     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
564     for (i = 0; i < NUM_GDT_ENTRIES; i++) {
565         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
566                 entry->limit_hi, entry->limit_lo,
567                 entry->base_hi, entry->base_mid, entry->base_lo,
568                 cd[entry->one1], entry->dpl, entry->p);
569         entry++;
570     }
571 }
572
573 void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
574     struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
575
576     V3_Print(core->vm_info, core, "      selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
577             error->index, error->ti, error->idt, error->ext,
578             (unsigned long long)exit_info1);
579 }
580
581 #elif __V3_64BIT__
582
583 void v3_print_GPRs(struct guest_info * core) {
584     struct v3_gprs * regs = &(core->vm_regs);
585     int i = 0;
586     v3_reg_t * reg_ptr;
587     char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", \
588                            "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", NULL};
589
590     reg_ptr = (v3_reg_t *)regs;
591
592     V3_Print(core->vm_info, core, "64 bit GPRs:\n");
593
594     for (i = 0; reg_names[i] != NULL; i++) {
595         V3_Print(core->vm_info, core, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
596     }
597 }
598
599 void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
600     addr_t base_hva;
601
602     if (v3_get_vm_cpu_mode(core)!=LONG) { 
603         V3_Print(core->vm_info, core, "= IDT ========\n");
604         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
605         return;
606     }
607
608     if (core->mem_mode == PHYSICAL_MEM) {
609         v3_gpa_to_hva(core, 
610                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
611                       &base_hva);
612     } else if (core->mem_mode == VIRTUAL_MEM) {
613         v3_gva_to_hva(core, 
614                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
615                       &base_hva);
616     }
617
618     // SANITY CHECK
619     if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
620         PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
621     }
622
623     if (!base_hva) {
624         PrintError(core->vm_info, core, "idtr address does not translate!  skipping.\n");
625         return ;
626     }
627
628     int i;
629     char *types[16] = {"ILGL","ILGL"," LDT","ILGL","ILGL","ILGL","ILGL","ILGL","ILGL",
630         "aTSS","ILGL","bTSS","call","ILGL","intr","trap"};
631
632     struct int_trap_gate_long * entry;
633     entry = (struct int_trap_gate_long *)base_hva;
634     V3_Print(core->vm_info, core, "= IDT ========\n");
635     V3_Print(core->vm_info, core, "  # | hex | selector | si:ti:rpl |           offset | type | dpl | s | r | p\n");
636     for (i = 0; i < NUM_IDT_ENTRIES; i++) {
637         uint32_t tmp = entry->selector;
638         struct segment_selector * seg = (struct segment_selector *)(&tmp);
639         V3_Print(core->vm_info, core, "%3d | %3x |     %04x |   %03x:%x:%x | %08x%04x%04x | %s |   %x | %x | %x | %x\n", i, i,
640                 entry->selector,
641                 seg->index, seg->ti, seg->rpl,
642                 entry->offset_hi, entry->offset_mid, entry->offset_lo,
643                 types[entry->type], entry->dpl, entry->s,
644                 entry->s, entry->p);
645         entry++;
646     }
647 }
648
649 void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
650     addr_t base_hva;
651
652     if (v3_get_vm_cpu_mode(core)!=LONG) { 
653         V3_Print(core->vm_info, core, "= GDT ========\n");
654         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
655         return;
656     }
657
658     if (core->mem_mode == PHYSICAL_MEM) {
659         v3_gpa_to_hva(core, 
660                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
661                       &base_hva);
662     } else if (core->mem_mode == VIRTUAL_MEM) {
663         v3_gva_to_hva(core, 
664                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
665                       &base_hva);
666     }
667
668     // SANITY CHECK
669     if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
670         PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
671     }
672
673     if (!base_hva) {
674         PrintError(core->vm_info, core, "gdtr address does not translate!  skipping.\n");
675         return ;
676     }
677
678     int i;
679     char* cd[2] = {"data","code"};
680     // TODO: handle possibility of gate/segment descriptor
681
682     struct code_desc_long * entry;
683     entry = (struct code_desc_long *)base_hva;
684     V3_Print(core->vm_info, core, "= GDT ========\n");
685     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
686     for (i = 0; i < NUM_GDT_ENTRIES; i++) {
687         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
688                 entry->limit_hi, entry->limit_lo,
689                 entry->base_hi, entry->base_mid, entry->base_lo,
690                 cd[entry->one1], entry->dpl, entry->p);
691         entry++;
692     }
693 }
694
695 void v3_print_ldt(struct guest_info * core, addr_t ldtr_base) {
696     addr_t base_hva;
697
698     if (v3_get_vm_cpu_mode(core)!=LONG) { 
699         V3_Print(core->vm_info, core, "= LDT ========\n");
700         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
701         return;
702     }
703
704     V3_Print(core->vm_info, core, "= LDT ========\n");
705
706     if (ldtr_base == 0) {
707         V3_Print(core->vm_info, core, "        (no LDT is installed)\n");
708         return;
709     } 
710
711     if (core->mem_mode == PHYSICAL_MEM) {
712         v3_gpa_to_hva(core, 
713                       get_addr_linear(core, ldtr_base, &(core->segments.cs)),
714                       &base_hva);
715     } else if (core->mem_mode == VIRTUAL_MEM) {
716         v3_gva_to_hva(core, 
717                       get_addr_linear(core, ldtr_base, &(core->segments.cs)),
718                       &base_hva);
719     }
720
721     // SANITY CHECK
722     if (ldtr_base != get_addr_linear(core, ldtr_base, &(core->segments.cs))) {
723         PrintError(core->vm_info, core, "ldtr base address != linear translation, might be something funky with cs\n");
724     }
725
726     if (!base_hva) {
727         PrintError(core->vm_info, core, "ldtr address does not translate!  skipping.\n");
728         return ;
729     }
730
731     int i;
732     char* cd[2] = {"data","code"};
733     // TODO: handle possibility of gate/segment descriptor
734
735     struct code_desc_long * entry;
736     entry = (struct code_desc_long *)base_hva;
737     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
738     for (i = 0; i < NUM_LDT_ENTRIES; i++) {
739         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
740                 entry->limit_hi, entry->limit_lo,
741                 entry->base_hi, entry->base_mid, entry->base_lo,
742                 cd[entry->one1], entry->dpl, entry->p);
743         entry++;
744     }
745 }
746
747 void v3_print_tss(struct guest_info * core, addr_t tr_base) {
748     addr_t base_hva;
749     struct tss_long *t;
750
751     if (v3_get_vm_cpu_mode(core)!=LONG) { 
752         V3_Print(core->vm_info, core, "= TSS ========\n");
753         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
754         return;
755     }
756
757     V3_Print(core->vm_info, core, "= TSS ========\n");
758
759     if (tr_base == 0) {
760         V3_Print(core->vm_info, core, "        (no TSS is installed)\n");
761         return;
762     } 
763
764     if (core->mem_mode == PHYSICAL_MEM) {
765         v3_gpa_to_hva(core, 
766                       get_addr_linear(core, tr_base, &(core->segments.cs)),
767                       &base_hva);
768     } else if (core->mem_mode == VIRTUAL_MEM) {
769         v3_gva_to_hva(core, 
770                       get_addr_linear(core, tr_base, &(core->segments.cs)),
771                       &base_hva);
772     }
773
774     // SANITY CHECK
775     if (tr_base != get_addr_linear(core, tr_base, &(core->segments.cs))) {
776         PrintError(core->vm_info, core, "tr base address != linear translation, might be something funky with cs\n");
777     }
778
779     if (!base_hva) {
780         PrintError(core->vm_info, core, "tr address does not translate!  skipping.\n");
781         return ;
782     }
783
784     t=(struct tss_long*)base_hva;
785
786     V3_Print(core->vm_info, core," res1 : 0x%llx\n", (uint64_t) t->res1);
787     V3_Print(core->vm_info, core," rsp0 : 0x%llx\n", t->rsp0);
788     V3_Print(core->vm_info, core," rsp1 : 0x%llx\n", t->rsp1);
789     V3_Print(core->vm_info, core," rsp2 : 0x%llx\n", t->rsp2);
790     V3_Print(core->vm_info, core," res2 : 0x%llx\n", t->res2);
791     V3_Print(core->vm_info, core," ist1 : 0x%llx\n", t->ist1);
792     V3_Print(core->vm_info, core," ist2 : 0x%llx\n", t->ist2);
793     V3_Print(core->vm_info, core," ist3 : 0x%llx\n", t->ist3);
794     V3_Print(core->vm_info, core," ist4 : 0x%llx\n", t->ist4);
795     V3_Print(core->vm_info, core," ist5 : 0x%llx\n", t->ist5);
796     V3_Print(core->vm_info, core," ist6 : 0x%llx\n", t->ist6);
797     V3_Print(core->vm_info, core," ist7 : 0x%llx\n", t->ist7);
798     V3_Print(core->vm_info, core," res3 : 0x%llx\n", t->res3);
799     V3_Print(core->vm_info, core," res4 : 0x%llx\n", (uint64_t) t->res4);
800     V3_Print(core->vm_info, core," iomap_base : 0x%llx\n", (uint64_t) t->iomap_base);
801     V3_Print(core->vm_info, core," (following io permission bitmap not currently printed)\n");
802
803 }
804
805 void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
806     struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
807
808     if (v3_get_vm_cpu_mode(core)!=LONG) { 
809         V3_Print(core->vm_info, core, "= IDT ========\n");
810         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
811         return;
812     }
813
814     V3_Print(core->vm_info, core, "      selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
815             error->index, error->ti, error->idt, error->ext,
816             (unsigned long long)exit_info1);
817 }
818
819 #endif