Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


e209d5427768857ce09e26b0ad5f1037b9f6e12e
[palacios.git] / palacios / src / palacios / vmm_debug.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/vmm_debug.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_host_events.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_decoder.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_config.h>
28
29 #define PRINT_TELEMETRY  1
30 #define PRINT_CORE_STATE 2
31 #define PRINT_ARCH_STATE 3
32 #define PRINT_STACK      4
33 #define PRINT_BACKTRACE  5
34
35
36 #define PRINT_ALL        100 // Absolutely everything
37 #define PRINT_STATE      101 // telemetry, core state, arch state
38
39
40
41
42 static int core_handler(struct guest_info * core, uint32_t cmd) {
43
44
45     switch (cmd) {
46 #ifdef V3_CONFIG_TELEMETRY
47         case PRINT_TELEMETRY: 
48             v3_print_core_telemetry(core);
49             break;
50 #endif
51         
52         case PRINT_CORE_STATE:
53             v3_raise_barrier(core->vm_info, NULL);
54
55             v3_print_guest_state(core);
56
57             v3_lower_barrier(core->vm_info);
58             break;
59         case PRINT_ARCH_STATE:
60             v3_raise_barrier(core->vm_info, NULL);
61
62             v3_print_arch_state(core);
63
64             v3_lower_barrier(core->vm_info);
65             break;
66         case PRINT_STACK:
67             v3_raise_barrier(core->vm_info, NULL);
68
69             v3_print_stack(core);
70
71             v3_lower_barrier(core->vm_info);
72             break;
73         case PRINT_BACKTRACE:
74             v3_raise_barrier(core->vm_info, NULL);
75
76             v3_print_backtrace(core);
77             
78             v3_lower_barrier(core->vm_info);
79             break;
80
81         case PRINT_STATE:
82             v3_raise_barrier(core->vm_info, NULL);
83
84 #ifdef V3_CONFIG_TELEMETRY
85             v3_print_core_telemetry(core);
86 #endif
87             v3_print_guest_state(core);
88             v3_print_arch_state(core);
89
90             v3_lower_barrier(core->vm_info);
91             break;
92
93         case PRINT_ALL:
94             v3_raise_barrier(core->vm_info, NULL);
95
96 #ifdef V3_CONFIG_TELEMETRY
97             v3_print_core_telemetry(core);
98 #endif
99             v3_print_guest_state(core);
100             v3_print_arch_state(core);
101         v3_print_stack(core);
102         v3_print_backtrace(core);
103
104             v3_lower_barrier(core->vm_info);
105             break;
106
107     }
108
109     return 0;
110 }
111
112
113 static int evt_handler(struct v3_vm_info * vm, struct v3_debug_event * evt, void * priv_data) {
114
115     V3_Print(vm, VCORE_NONE,"Debug Event Handler for core %d cmd=%x\n", evt->core_id, evt->cmd);
116
117     if (evt->core_id == -1) {
118         int i = 0;
119         for (i = 0; i < vm->num_cores; i++) {
120             core_handler(&(vm->cores[i]), evt->cmd);
121         }
122     } else {
123         return core_handler(&vm->cores[evt->core_id], evt->cmd);
124     }
125
126     
127     return 0;
128 }
129
130
131 int v3_init_vm_debugging(struct v3_vm_info * vm) {
132     v3_hook_host_event(vm, HOST_DEBUG_EVT, 
133                        V3_HOST_EVENT_HANDLER(evt_handler), 
134                        NULL);
135
136
137     return 0;
138 }
139
140
141
142
143
144 void v3_print_segments(struct v3_segments * segs) {
145     int i = 0;
146     struct v3_segment * seg_ptr;
147
148     seg_ptr=(struct v3_segment *)segs;
149   
150     char *seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
151     V3_Print(VM_NONE, VCORE_NONE, "Segments\n");
152
153     for (i = 0; seg_names[i] != NULL; i++) {
154
155         V3_Print(VM_NONE, VCORE_NONE, "\t%s: Sel=%x, base=%p, limit=%x (long_mode=%d, db=%d)\n", seg_names[i], seg_ptr[i].selector, 
156                    (void *)(addr_t)seg_ptr[i].base, seg_ptr[i].limit,
157                    seg_ptr[i].long_mode, seg_ptr[i].db);
158
159     }
160 }
161
162
163
164 void v3_print_ctrl_regs(struct guest_info * core) {
165     struct v3_ctrl_regs * regs = &(core->ctrl_regs);
166     int i = 0;
167     v3_reg_t * reg_ptr;
168     char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", "EFER", NULL};
169    
170
171     reg_ptr = (v3_reg_t *)regs;
172
173     V3_Print(core->vm_info, core,"Ctrl Regs:\n");
174
175     for (i = 0; reg_names[i] != NULL; i++) {
176         V3_Print(core->vm_info, core, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
177     }
178
179
180 }
181
182 #if 0
183 static int safe_gva_to_hva(struct guest_info * core, addr_t linear_addr, addr_t * host_addr) {
184     /* select the proper translation based on guest mode */
185     if (core->mem_mode == PHYSICAL_MEM) {
186         if (v3_gpa_to_hva(core, linear_addr, host_addr) == -1) return -1;
187     } else if (core->mem_mode == VIRTUAL_MEM) {
188         if (v3_gva_to_hva(core, linear_addr, host_addr) == -1) return -1;
189     }
190     return 0;
191 }
192
193 static int v3_print_disassembly(struct guest_info * core) {
194     int passed_rip = 0;
195     addr_t rip, rip_linear, rip_host;
196
197     /* we don't know where the instructions preceding RIP start, so we just take
198      * a guess and hope the instruction stream synced up with our disassembly
199      * some time before RIP; if it has not we correct RIP at that point
200      */
201
202     /* start disassembly 64 bytes before current RIP, continue 32 bytes after */
203     rip = (addr_t) core->rip - 64;
204     while ((int) (rip - core->rip) < 32) {
205         V3_Print(info->vm_info, info, "disassembly step\n");
206
207         /* always print RIP, even if the instructions before were bad */
208         if (!passed_rip && rip >= core->rip) {
209             if (rip != core->rip) {
210                 V3_Print(info->vm_info, info, "***** bad disassembly up to this point *****\n");
211                 rip = core->rip;
212             }
213             passed_rip = 1;
214         }
215
216         /* look up host virtual address for this instruction */
217         rip_linear = get_addr_linear(core, rip, &(core->segments.cs));
218         if (safe_gva_to_hva(core, rip_linear, &rip_host) < 0) {
219             rip++;
220             continue;
221         }
222
223         /* print disassembled instrcution (updates rip) */
224         if (v3_disasm(core, (void *) rip_host, &rip, rip == core->rip) < 0) {
225             rip++;
226             continue;
227         }
228
229     }
230
231     return 0;
232 }
233
234 #endif
235
236 void v3_print_guest_state(struct guest_info * core) {
237     addr_t linear_addr = 0; 
238
239     V3_Print(core->vm_info, core, "RIP: %p\n", (void *)(addr_t)(core->rip));
240     linear_addr = get_addr_linear(core, core->rip, &(core->segments.cs));
241     V3_Print(core->vm_info, core, "RIP Linear: %p\n", (void *)linear_addr);
242
243     V3_Print(core->vm_info, core, "NumExits: %u\n", (uint32_t)core->num_exits);
244
245     V3_Print(core->vm_info, core, "IRQ STATE: started=%d, pending=%d\n", 
246              core->intr_core_state.irq_started, 
247              core->intr_core_state.irq_pending);
248     V3_Print(core->vm_info, core, "EXCP STATE: err_code_valid=%d, err_code=%x\n", 
249              core->excp_state.excp_error_code_valid, 
250              core->excp_state.excp_error_code);
251
252
253     v3_print_segments(&(core->segments));
254     v3_print_ctrl_regs(core);
255
256     if (core->shdw_pg_mode == SHADOW_PAGING) {
257         V3_Print(core->vm_info, core, "Shadow Paging Guest Registers:\n");
258         V3_Print(core->vm_info, core, "\tGuest CR0=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr0));
259         V3_Print(core->vm_info, core, "\tGuest CR3=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr3));
260         V3_Print(core->vm_info, core, "\tGuest EFER=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_efer.value));
261         // CR4
262     }
263     v3_print_GPRs(core);
264
265     v3_print_idt(core,core->segments.idtr.base);
266     v3_print_gdt(core,core->segments.gdtr.base);
267     v3_print_ldt(core,core->segments.ldtr.base);
268     v3_print_tss(core,core->segments.tr.base);
269
270     v3_print_mem_map(core->vm_info);
271
272     v3_print_stack(core);
273
274     //  v3_print_disassembly(core);
275 }
276
277
278 void v3_print_arch_state(struct guest_info * core) {
279
280
281 }
282
283
284 void v3_print_guest_state_all(struct v3_vm_info * vm) {
285     int i = 0;
286
287     V3_Print(vm, VCORE_NONE,"VM Core states for %s\n", vm->name);
288
289     for (i = 0; i < 80; i++) {
290       V3_Print(vm, VCORE_NONE, "-");
291     }
292
293     for (i = 0; i < vm->num_cores; i++) {
294         v3_print_guest_state(&vm->cores[i]);  
295     }
296     
297     for (i = 0; i < 80; i++) {
298         V3_Print(vm, VCORE_NONE, "-");
299     }
300
301     V3_Print(vm, VCORE_NONE, "\n");    
302 }
303
304
305
306 void v3_print_stack(struct guest_info * core) {
307     addr_t linear_addr = 0;
308     addr_t host_addr = 0;
309     int i = 0;
310     v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
311
312     linear_addr = get_addr_linear(core, core->vm_regs.rsp, &(core->segments.ss));
313  
314     V3_Print(core->vm_info, core, "Stack at %p:\n", (void *)linear_addr);
315    
316     if (core->mem_mode == PHYSICAL_MEM) {
317         if (v3_gpa_to_hva(core, linear_addr, &host_addr) == -1) {
318             PrintError(core->vm_info, core, "Could not translate Stack address\n");
319             return;
320         }
321     } else if (core->mem_mode == VIRTUAL_MEM) {
322         if (v3_gva_to_hva(core, linear_addr, &host_addr) == -1) {
323             PrintError(core->vm_info, core, "Could not translate Virtual Stack address\n");
324             return;
325         }
326     }
327     
328     V3_Print(core->vm_info, core, "Host Address of rsp = 0x%p\n", (void *)host_addr);
329  
330     // We start i at one because the current stack pointer points to an unused stack element
331     for (i = 0; i <= 24; i++) {
332
333         if (cpu_mode == REAL) {
334             V3_Print(core->vm_info, core, "\t0x%.4x\n", *((uint16_t *)host_addr + (i * 2)));
335         } else if (cpu_mode == LONG) {
336             V3_Print(core->vm_info, core, "\t%p\n", (void *)*(addr_t *)(host_addr + (i * 8)));
337         } else {
338             // 32 bit stacks...
339             V3_Print(core->vm_info, core, "\t0x%.8x\n", *(uint32_t *)(host_addr + (i * 4)));
340         }
341     }
342
343 }    
344
345
346 void v3_print_backtrace(struct guest_info * core) {
347     addr_t gla_rbp = 0;
348     int i = 0;
349     v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
350     struct v3_cfg_file * system_map = v3_cfg_get_file(core->vm_info, "System.map");
351
352     V3_Print(core->vm_info, core, "Performing Backtrace for Core %d\n", core->vcpu_id);
353     V3_Print(core->vm_info, core, "\tRSP=%p, RBP=%p\n", (void *)core->vm_regs.rsp, (void *)core->vm_regs.rbp);
354
355     gla_rbp = get_addr_linear(core, core->vm_regs.rbp, &(core->segments.ss));
356
357
358     for (i = 0; i < 30; i++) {
359         addr_t hva_rbp = 0; 
360         addr_t hva_rip = 0; 
361         char * sym_name = NULL;
362         addr_t rip_val = 0;
363
364         if (core->mem_mode == PHYSICAL_MEM) {
365             if (v3_gpa_to_hva(core, gla_rbp, &hva_rbp) == -1) {
366                 PrintError(core->vm_info, core, "Could not translate Stack address\n");
367                 return;
368             }
369         } else if (core->mem_mode == VIRTUAL_MEM) {
370             if (v3_gva_to_hva(core, gla_rbp, &hva_rbp) == -1) {
371                 PrintError(core->vm_info, core, "Could not translate Virtual Stack address\n");
372                 return;
373             }
374         }
375
376
377         hva_rip = hva_rbp + v3_get_addr_width(core);
378         
379         if (cpu_mode == REAL) {
380             rip_val = (addr_t)*(uint16_t *)hva_rip;
381         } else if (cpu_mode == LONG) {
382             rip_val = (addr_t)*(uint64_t *)hva_rip;
383         } else {
384             rip_val = (addr_t)*(uint32_t *)hva_rip;
385         }
386
387         if (system_map) {
388             char * tmp_ptr = system_map->data;
389             char * sym_ptr = NULL;
390             uint64_t file_offset = 0; 
391             uint64_t sym_offset = 0;
392
393             while (file_offset < system_map->size) {
394                 sym_offset = strtox(tmp_ptr, &tmp_ptr);
395
396                 tmp_ptr += 3; // pass over symbol type
397
398                 if (sym_offset > rip_val) {
399                     char * end_ptr = strchr(sym_ptr, '\n');
400
401                     if (end_ptr) {
402                         *end_ptr = 0; // null terminate symbol...
403                     }
404
405                     sym_name = sym_ptr;
406                     break;
407                 }
408
409                 sym_ptr = tmp_ptr;
410                 { 
411                     char * end_ptr2 = strchr(tmp_ptr, '\n');
412
413                     if (!end_ptr2) {
414                         tmp_ptr += strlen(tmp_ptr) + 1;
415                     } else {
416                         tmp_ptr = end_ptr2 + 1;
417                     }
418                 }
419             }
420         }
421
422         if (!sym_name) {
423             sym_name = "?";
424         }
425
426         if (cpu_mode == REAL) {
427             V3_Print(core->vm_info, core, "Next RBP=0x%.4x, RIP=0x%.4x (%s)\n", 
428                      *(uint16_t *)hva_rbp,*(uint16_t *)hva_rip, 
429                      sym_name);
430             
431             gla_rbp = *(uint16_t *)hva_rbp;
432         } else if (cpu_mode == LONG) {
433             V3_Print(core->vm_info, core, "Next RBP=%p, RIP=%p (%s)\n", 
434                      (void *)*(uint64_t *)hva_rbp, (void *)*(uint64_t *)hva_rip,
435                      sym_name);
436             gla_rbp = *(uint64_t *)hva_rbp;
437         } else {
438             V3_Print(core->vm_info, core, "Next RBP=0x%.8x, RIP=0x%.8x (%s)\n", 
439                      *(uint32_t *)hva_rbp, *(uint32_t *)hva_rip,
440                      sym_name);
441             gla_rbp = *(uint32_t *)hva_rbp;
442         }
443
444     }
445 }
446
447
448 #ifdef __V3_32BIT__
449
450 void v3_print_GPRs(struct guest_info * core) {
451     struct v3_gprs * regs = &(core->vm_regs);
452     int i = 0;
453     v3_reg_t * reg_ptr;
454     char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", NULL};
455
456     reg_ptr = (v3_reg_t *)regs;
457
458     V3_Print(info->vm_info, info, "32 bit GPRs:\n");
459
460     for (i = 0; reg_names[i] != NULL; i++) {
461         V3_Print(info->vm_info, info, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
462     }
463 }
464
465 void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
466     addr_t base_hva;
467
468     if (v3_get_vm_cpu_mode(core)!=LONG) { 
469         V3_Print(core->vm_info, core, "= IDT ========\n");
470         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
471         return;
472     }
473         
474
475     if (core->mem_mode == PHYSICAL_MEM) {
476         v3_gpa_to_hva(core, 
477                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
478                       &base_hva);
479         PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
480     } else if (core->mem_mode == VIRTUAL_MEM) {
481         v3_gva_to_hva(core, 
482                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
483                       &base_hva);
484     }
485
486     // SANITY CHECK
487     if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
488         PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
489     }
490
491     if (!base_hva) {
492         PrintError(core->vm_info, core "idtr address does not translate!  skipping.\n");
493         return ;
494     }
495
496     int i;
497     char *types[16] = {"  ILGL","aTSS16","   LDT","bTSS16","call16","  task","intr16","trap16",
498         "  ILGL","aTSS32","  ILGL","bTSS32","call32","  ILGL","intr32","trap32"};
499
500     struct int_trap_gate_lgcy * entry;
501     entry = (struct int_trap_gate_lgcy *)base_hva;
502     V3_Print(core->vm_info, core, "= IDT ========\n");
503     V3_Print(core->vm_info, core, "  # | hex | selector | si:ti:rpl |   offset | type | dpl | s | p\n");
504     for (i = 0; i < NUM_IDT_ENTRIES; i++) {
505         uint32_t tmp = entry->selector;
506         struct segment_selector * seg = (struct segment_selector *)(&tmp);
507         V3_Print(core->vm_info, core, "%3d | %3x |     %04x |   %03x:%x:%x | %04x%04x | %s |   %x | %x | %x | %x\n", i, i,
508                 entry->selector,
509                 seg->index, seg->ti, seg->rpl,
510                 entry->offset_hi, entry->offset_lo,
511                 types[entry->type], entry->dpl, entry->s, entry->p);
512         entry++;
513     }
514 }
515
516 void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
517     addr_t base_hva;
518
519     if (v3_get_vm_cpu_mode(core)!=LONG) { 
520         V3_Print(core->vm_info, core, "= GDT ========\n");
521         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
522         return;
523     }
524
525     if (core->mem_mode == PHYSICAL_MEM) {
526         v3_gpa_to_hva(core, 
527                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
528                       &base_hva);
529         PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
530     } else if (core->mem_mode == VIRTUAL_MEM) {
531         v3_gva_to_hva(core, 
532                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
533                       &base_hva);
534     }
535
536     // SANITY CHECK
537     if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
538         PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
539     }
540
541     if (!base_hva) {
542         PrintError(core->vm_info, core "gdtr address does not translate!  skipping.\n");
543         return ;
544     }
545
546     int i;
547     char* cd[2] = {"data","code"};
548     // TODO: handle possibility of gate/segment descriptor
549
550     struct code_desc_lgcy * entry;
551     entry = (struct code_desc_long *)base_hva;
552     V3_Print(core->vm_info, core, "= GDT ========\n");
553     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
554     for (i = 0; i < NUM_GDT_ENTRIES; i++) {
555         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
556                 entry->limit_hi, entry->limit_lo,
557                 entry->base_hi, entry->base_mid, entry->base_lo,
558                 cd[entry->one1], entry->dpl, entry->p);
559         entry++;
560     }
561 }
562
563 void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
564     struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
565
566     V3_Print(core->vm_info, core, "      selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
567             error->index, error->ti, error->idt, error->ext,
568             (unsigned long long)exit_info1);
569 }
570
571 #elif __V3_64BIT__
572
573 void v3_print_GPRs(struct guest_info * core) {
574     struct v3_gprs * regs = &(core->vm_regs);
575     int i = 0;
576     v3_reg_t * reg_ptr;
577     char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", \
578                            "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", NULL};
579
580     reg_ptr = (v3_reg_t *)regs;
581
582     V3_Print(core->vm_info, core, "64 bit GPRs:\n");
583
584     for (i = 0; reg_names[i] != NULL; i++) {
585         V3_Print(core->vm_info, core, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
586     }
587 }
588
589 void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
590     addr_t base_hva;
591
592     if (v3_get_vm_cpu_mode(core)!=LONG) { 
593         V3_Print(core->vm_info, core, "= IDT ========\n");
594         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
595         return;
596     }
597
598     if (core->mem_mode == PHYSICAL_MEM) {
599         v3_gpa_to_hva(core, 
600                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
601                       &base_hva);
602     } else if (core->mem_mode == VIRTUAL_MEM) {
603         v3_gva_to_hva(core, 
604                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
605                       &base_hva);
606     }
607
608     // SANITY CHECK
609     if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
610         PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
611     }
612
613     if (!base_hva) {
614         PrintError(core->vm_info, core, "idtr address does not translate!  skipping.\n");
615         return ;
616     }
617
618     int i;
619     char *types[16] = {"ILGL","ILGL"," LDT","ILGL","ILGL","ILGL","ILGL","ILGL","ILGL",
620         "aTSS","ILGL","bTSS","call","ILGL","intr","trap"};
621
622     struct int_trap_gate_long * entry;
623     entry = (struct int_trap_gate_long *)base_hva;
624     V3_Print(core->vm_info, core, "= IDT ========\n");
625     V3_Print(core->vm_info, core, "  # | hex | selector | si:ti:rpl |           offset | type | dpl | s | r | p\n");
626     for (i = 0; i < NUM_IDT_ENTRIES; i++) {
627         uint32_t tmp = entry->selector;
628         struct segment_selector * seg = (struct segment_selector *)(&tmp);
629         V3_Print(core->vm_info, core, "%3d | %3x |     %04x |   %03x:%x:%x | %08x%04x%04x | %s |   %x | %x | %x | %x\n", i, i,
630                 entry->selector,
631                 seg->index, seg->ti, seg->rpl,
632                 entry->offset_hi, entry->offset_mid, entry->offset_lo,
633                 types[entry->type], entry->dpl, entry->s,
634                 entry->s, entry->p);
635         entry++;
636     }
637 }
638
639 void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
640     addr_t base_hva;
641
642     if (v3_get_vm_cpu_mode(core)!=LONG) { 
643         V3_Print(core->vm_info, core, "= GDT ========\n");
644         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
645         return;
646     }
647
648     if (core->mem_mode == PHYSICAL_MEM) {
649         v3_gpa_to_hva(core, 
650                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
651                       &base_hva);
652     } else if (core->mem_mode == VIRTUAL_MEM) {
653         v3_gva_to_hva(core, 
654                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
655                       &base_hva);
656     }
657
658     // SANITY CHECK
659     if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
660         PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
661     }
662
663     if (!base_hva) {
664         PrintError(core->vm_info, core, "gdtr address does not translate!  skipping.\n");
665         return ;
666     }
667
668     int i;
669     char* cd[2] = {"data","code"};
670     // TODO: handle possibility of gate/segment descriptor
671
672     struct code_desc_long * entry;
673     entry = (struct code_desc_long *)base_hva;
674     V3_Print(core->vm_info, core, "= GDT ========\n");
675     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
676     for (i = 0; i < NUM_GDT_ENTRIES; i++) {
677         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
678                 entry->limit_hi, entry->limit_lo,
679                 entry->base_hi, entry->base_mid, entry->base_lo,
680                 cd[entry->one1], entry->dpl, entry->p);
681         entry++;
682     }
683 }
684
685 void v3_print_ldt(struct guest_info * core, addr_t ldtr_base) {
686     addr_t base_hva;
687
688     if (v3_get_vm_cpu_mode(core)!=LONG) { 
689         V3_Print(core->vm_info, core, "= LDT ========\n");
690         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
691         return;
692     }
693
694     V3_Print(core->vm_info, core, "= LDT ========\n");
695
696     if (ldtr_base == 0) {
697         V3_Print(core->vm_info, core, "        (no LDT is installed)\n");
698         return;
699     } 
700
701     if (core->mem_mode == PHYSICAL_MEM) {
702         v3_gpa_to_hva(core, 
703                       get_addr_linear(core, ldtr_base, &(core->segments.cs)),
704                       &base_hva);
705     } else if (core->mem_mode == VIRTUAL_MEM) {
706         v3_gva_to_hva(core, 
707                       get_addr_linear(core, ldtr_base, &(core->segments.cs)),
708                       &base_hva);
709     }
710
711     // SANITY CHECK
712     if (ldtr_base != get_addr_linear(core, ldtr_base, &(core->segments.cs))) {
713         PrintError(core->vm_info, core, "ldtr base address != linear translation, might be something funky with cs\n");
714     }
715
716     if (!base_hva) {
717         PrintError(core->vm_info, core, "ldtr address does not translate!  skipping.\n");
718         return ;
719     }
720
721     int i;
722     char* cd[2] = {"data","code"};
723     // TODO: handle possibility of gate/segment descriptor
724
725     struct code_desc_long * entry;
726     entry = (struct code_desc_long *)base_hva;
727     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
728     for (i = 0; i < NUM_LDT_ENTRIES; i++) {
729         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
730                 entry->limit_hi, entry->limit_lo,
731                 entry->base_hi, entry->base_mid, entry->base_lo,
732                 cd[entry->one1], entry->dpl, entry->p);
733         entry++;
734     }
735 }
736
737 void v3_print_tss(struct guest_info * core, addr_t tr_base) {
738     addr_t base_hva;
739     struct tss_long *t;
740
741     if (v3_get_vm_cpu_mode(core)!=LONG) { 
742         V3_Print(core->vm_info, core, "= TSS ========\n");
743         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
744         return;
745     }
746
747     V3_Print(core->vm_info, core, "= TSS ========\n");
748
749     if (tr_base == 0) {
750         V3_Print(core->vm_info, core, "        (no TSS is installed)\n");
751         return;
752     } 
753
754     if (core->mem_mode == PHYSICAL_MEM) {
755         v3_gpa_to_hva(core, 
756                       get_addr_linear(core, tr_base, &(core->segments.cs)),
757                       &base_hva);
758     } else if (core->mem_mode == VIRTUAL_MEM) {
759         v3_gva_to_hva(core, 
760                       get_addr_linear(core, tr_base, &(core->segments.cs)),
761                       &base_hva);
762     }
763
764     // SANITY CHECK
765     if (tr_base != get_addr_linear(core, tr_base, &(core->segments.cs))) {
766         PrintError(core->vm_info, core, "tr base address != linear translation, might be something funky with cs\n");
767     }
768
769     if (!base_hva) {
770         PrintError(core->vm_info, core, "tr address does not translate!  skipping.\n");
771         return ;
772     }
773
774     t=(struct tss_long*)base_hva;
775
776     V3_Print(core->vm_info, core," res1 : 0x%llx\n", (uint64_t) t->res1);
777     V3_Print(core->vm_info, core," rsp0 : 0x%llx\n", t->rsp0);
778     V3_Print(core->vm_info, core," rsp1 : 0x%llx\n", t->rsp1);
779     V3_Print(core->vm_info, core," rsp2 : 0x%llx\n", t->rsp2);
780     V3_Print(core->vm_info, core," res2 : 0x%llx\n", t->res2);
781     V3_Print(core->vm_info, core," ist1 : 0x%llx\n", t->ist1);
782     V3_Print(core->vm_info, core," ist2 : 0x%llx\n", t->ist2);
783     V3_Print(core->vm_info, core," ist3 : 0x%llx\n", t->ist3);
784     V3_Print(core->vm_info, core," ist4 : 0x%llx\n", t->ist4);
785     V3_Print(core->vm_info, core," ist5 : 0x%llx\n", t->ist5);
786     V3_Print(core->vm_info, core," ist6 : 0x%llx\n", t->ist6);
787     V3_Print(core->vm_info, core," ist7 : 0x%llx\n", t->ist7);
788     V3_Print(core->vm_info, core," res3 : 0x%llx\n", t->res3);
789     V3_Print(core->vm_info, core," res4 : 0x%llx\n", (uint64_t) t->res4);
790     V3_Print(core->vm_info, core," iomap_base : 0x%llx\n", (uint64_t) t->iomap_base);
791     V3_Print(core->vm_info, core," (following io permission bitmap not currently printed)\n");
792
793 }
794
795 void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
796     struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
797
798     if (v3_get_vm_cpu_mode(core)!=LONG) { 
799         V3_Print(core->vm_info, core, "= IDT ========\n");
800         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
801         return;
802     }
803
804     V3_Print(core->vm_info, core, "      selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
805             error->index, error->ti, error->idt, error->ext,
806             (unsigned long long)exit_info1);
807 }
808
809 #endif