Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


cc31a524d6febd92297e38c40e9d7faa6a0615f3
[palacios.git] / palacios / src / palacios / vmm_debug.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/vmm_debug.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_host_events.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_decoder.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_config.h>
28
29 #define PRINT_TELEMETRY  1
30 #define PRINT_CORE_STATE 2
31 #define PRINT_ARCH_STATE 3
32 #define PRINT_STACK      4
33 #define PRINT_BACKTRACE  5
34
35
36 #define PRINT_ALL        100 // Absolutely everything
37 #define PRINT_STATE      101 // telemetry, core state, arch state
38
39
40
41
42 static int core_handler(struct guest_info * core, uint32_t cmd) {
43
44
45     switch (cmd) {
46 #ifdef V3_CONFIG_TELEMETRY
47         case PRINT_TELEMETRY: 
48             v3_print_core_telemetry(core);
49             break;
50 #endif
51         
52         case PRINT_CORE_STATE:
53             v3_raise_barrier(core->vm_info, NULL);
54
55             v3_print_guest_state(core);
56
57             v3_lower_barrier(core->vm_info);
58             break;
59         case PRINT_ARCH_STATE:
60             v3_raise_barrier(core->vm_info, NULL);
61
62             v3_print_arch_state(core);
63
64             v3_lower_barrier(core->vm_info);
65             break;
66         case PRINT_STACK:
67             v3_raise_barrier(core->vm_info, NULL);
68
69             v3_print_stack(core);
70
71             v3_lower_barrier(core->vm_info);
72             break;
73         case PRINT_BACKTRACE:
74             v3_raise_barrier(core->vm_info, NULL);
75
76             v3_print_backtrace(core);
77             
78             v3_lower_barrier(core->vm_info);
79             break;
80
81         case PRINT_STATE:
82             v3_raise_barrier(core->vm_info, NULL);
83
84 #ifdef V3_CONFIG_TELEMETRY
85             v3_print_core_telemetry(core);
86 #endif
87             v3_print_guest_state(core);
88             v3_print_arch_state(core);
89
90             v3_lower_barrier(core->vm_info);
91             break;
92
93         case PRINT_ALL:
94             v3_raise_barrier(core->vm_info, NULL);
95
96 #ifdef V3_CONFIG_TELEMETRY
97             v3_print_core_telemetry(core);
98 #endif
99             v3_print_guest_state(core);
100             v3_print_arch_state(core);
101         v3_print_stack(core);
102         v3_print_backtrace(core);
103
104             v3_lower_barrier(core->vm_info);
105             break;
106
107     }
108
109     return 0;
110 }
111
112
113 static int evt_handler(struct v3_vm_info * vm, struct v3_debug_event * evt, void * priv_data) {
114
115     V3_Print(vm, VCORE_NONE,"Debug Event Handler for core %d cmd=%x\n", evt->core_id, evt->cmd);
116
117     if (evt->core_id == -1) {
118         int i = 0;
119         for (i = 0; i < vm->num_cores; i++) {
120             core_handler(&(vm->cores[i]), evt->cmd);
121         }
122     } else {
123         return core_handler(&vm->cores[evt->core_id], evt->cmd);
124     }
125
126     
127     return 0;
128 }
129
130
131 int v3_init_vm_debugging(struct v3_vm_info * vm) {
132     v3_hook_host_event(vm, HOST_DEBUG_EVT, 
133                        V3_HOST_EVENT_HANDLER(evt_handler), 
134                        NULL);
135
136
137     return 0;
138 }
139
140
141
142
143
144 void v3_print_segments(struct v3_segments * segs) {
145     int i = 0;
146     struct v3_segment * seg_ptr;
147
148     seg_ptr=(struct v3_segment *)segs;
149   
150     char *seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
151     V3_Print(VM_NONE, VCORE_NONE, "Segments\n");
152
153     for (i = 0; seg_names[i] != NULL; i++) {
154
155         V3_Print(VM_NONE, VCORE_NONE, "\t%s: Sel=%x, base=%p, limit=%x (long_mode=%d, db=%d)\n", seg_names[i], seg_ptr[i].selector, 
156                    (void *)(addr_t)seg_ptr[i].base, seg_ptr[i].limit,
157                    seg_ptr[i].long_mode, seg_ptr[i].db);
158
159     }
160 }
161
162
163
164 void v3_print_ctrl_regs(struct guest_info * core) {
165     struct v3_ctrl_regs * regs = &(core->ctrl_regs);
166     int i = 0;
167     v3_reg_t * reg_ptr;
168     char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", "EFER", NULL};
169    
170
171     reg_ptr = (v3_reg_t *)regs;
172
173     V3_Print(core->vm_info, core,"Ctrl Regs:\n");
174
175     for (i = 0; reg_names[i] != NULL; i++) {
176         V3_Print(core->vm_info, core, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
177     }
178
179
180 }
181
182 #if 0
183 static int safe_gva_to_hva(struct guest_info * core, addr_t linear_addr, addr_t * host_addr) {
184     /* select the proper translation based on guest mode */
185     if (core->mem_mode == PHYSICAL_MEM) {
186         if (v3_gpa_to_hva(core, linear_addr, host_addr) == -1) return -1;
187     } else if (core->mem_mode == VIRTUAL_MEM) {
188         if (v3_gva_to_hva(core, linear_addr, host_addr) == -1) return -1;
189     }
190     return 0;
191 }
192
193 static int v3_print_disassembly(struct guest_info * core) {
194     int passed_rip = 0;
195     addr_t rip, rip_linear, rip_host;
196
197     /* we don't know where the instructions preceding RIP start, so we just take
198      * a guess and hope the instruction stream synced up with our disassembly
199      * some time before RIP; if it has not we correct RIP at that point
200      */
201
202     /* start disassembly 64 bytes before current RIP, continue 32 bytes after */
203     rip = (addr_t) core->rip - 64;
204     while ((int) (rip - core->rip) < 32) {
205         V3_Print(info->vm_info, info, "disassembly step\n");
206
207         /* always print RIP, even if the instructions before were bad */
208         if (!passed_rip && rip >= core->rip) {
209             if (rip != core->rip) {
210                 V3_Print(info->vm_info, info, "***** bad disassembly up to this point *****\n");
211                 rip = core->rip;
212             }
213             passed_rip = 1;
214         }
215
216         /* look up host virtual address for this instruction */
217         rip_linear = get_addr_linear(core, rip, &(core->segments.cs));
218         if (safe_gva_to_hva(core, rip_linear, &rip_host) < 0) {
219             rip++;
220             continue;
221         }
222
223         /* print disassembled instrcution (updates rip) */
224         if (v3_disasm(core, (void *) rip_host, &rip, rip == core->rip) < 0) {
225             rip++;
226             continue;
227         }
228
229     }
230
231     return 0;
232 }
233
234 #endif
235
236 void v3_print_guest_state(struct guest_info * core) {
237     addr_t linear_addr = 0; 
238
239     V3_Print(core->vm_info, core, "RIP: %p\n", (void *)(addr_t)(core->rip));
240     linear_addr = get_addr_linear(core, core->rip, &(core->segments.cs));
241     V3_Print(core->vm_info, core, "RIP Linear: %p\n", (void *)linear_addr);
242
243     V3_Print(core->vm_info, core, "NumExits: %u\n", (uint32_t)core->num_exits);
244
245     V3_Print(core->vm_info, core, "IRQ STATE: started=%d, pending=%d\n", 
246              core->intr_core_state.irq_started, 
247              core->intr_core_state.irq_pending);
248     V3_Print(core->vm_info, core, "EXCP STATE: err_code_valid=%d, err_code=%x\n", 
249              core->excp_state.excp_error_code_valid, 
250              core->excp_state.excp_error_code);
251
252
253     v3_print_segments(&(core->segments));
254     v3_print_ctrl_regs(core);
255
256     if (core->shdw_pg_mode == SHADOW_PAGING) {
257         V3_Print(core->vm_info, core, "Shadow Paging Guest Registers:\n");
258         V3_Print(core->vm_info, core, "\tGuest CR0=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr0));
259         V3_Print(core->vm_info, core, "\tGuest CR3=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr3));
260         V3_Print(core->vm_info, core, "\tGuest EFER=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_efer.value));
261         // CR4
262     }
263     v3_print_GPRs(core);
264
265     v3_print_idt(core,core->segments.idtr.base);
266     v3_print_gdt(core,core->segments.gdtr.base);
267     v3_print_ldt(core,core->segments.ldtr.base);
268     v3_print_tss(core,core->segments.tr.base);
269
270     v3_print_mem_map(core->vm_info);
271
272     v3_print_stack(core);
273
274     //  v3_print_disassembly(core);
275 }
276
277
278 void v3_print_arch_state(struct guest_info * core) {
279
280
281 }
282
283
284 void v3_print_guest_state_all(struct v3_vm_info * vm) {
285     int i = 0;
286
287     V3_Print(vm, VCORE_NONE,"VM Core states for %s\n", vm->name);
288
289     for (i = 0; i < 80; i++) {
290       V3_Print(vm, VCORE_NONE, "-");
291     }
292
293     for (i = 0; i < vm->num_cores; i++) {
294         v3_print_guest_state(&vm->cores[i]);  
295     }
296     
297     for (i = 0; i < 80; i++) {
298         V3_Print(vm, VCORE_NONE, "-");
299     }
300
301     V3_Print(vm, VCORE_NONE, "\n");    
302 }
303
304
305
306 void v3_print_stack(struct guest_info * core) {
307     addr_t linear_addr = 0;
308     addr_t host_addr = 0;
309     int i = 0;
310     v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
311
312     linear_addr = get_addr_linear(core, core->vm_regs.rsp, &(core->segments.ss));
313  
314     V3_Print(core->vm_info, core, "Stack at %p:\n", (void *)linear_addr);
315    
316     if (core->mem_mode == PHYSICAL_MEM) {
317         if (v3_gpa_to_hva(core, linear_addr, &host_addr) == -1) {
318             PrintError(core->vm_info, core, "Could not translate Stack address\n");
319             return;
320         }
321     } else if (core->mem_mode == VIRTUAL_MEM) {
322         if (v3_gva_to_hva(core, linear_addr, &host_addr) == -1) {
323             PrintError(core->vm_info, core, "Could not translate Virtual Stack address\n");
324             return;
325         }
326     }
327     
328     V3_Print(core->vm_info, core, "Host Address of rsp = 0x%p\n", (void *)host_addr);
329  
330     // We start i at one because the current stack pointer points to an unused stack element
331     for (i = 0; i <= 24; i++) {
332
333         if (cpu_mode == REAL) {
334             V3_Print(core->vm_info, core, "\t0x%.4x\n", *((uint16_t *)host_addr + (i * 2)));
335         } else if (cpu_mode == LONG) {
336             V3_Print(core->vm_info, core, "\t%p\n", (void *)*(addr_t *)(host_addr + (i * 8)));
337         } else {
338             // 32 bit stacks...
339             V3_Print(core->vm_info, core, "\t0x%.8x\n", *(uint32_t *)(host_addr + (i * 4)));
340         }
341     }
342
343 }    
344
345
346 void v3_print_backtrace(struct guest_info * core) {
347     addr_t gla_rbp = 0;
348     int i = 0;
349     v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
350     struct v3_cfg_file * system_map = v3_cfg_get_file(core->vm_info, "System.map");
351
352     V3_Print(core->vm_info, core, "Performing Backtrace for Core %d\n", core->vcpu_id);
353     V3_Print(core->vm_info, core, "\tRSP=%p, RBP=%p\n", (void *)core->vm_regs.rsp, (void *)core->vm_regs.rbp);
354
355     gla_rbp = get_addr_linear(core, core->vm_regs.rbp, &(core->segments.ss));
356
357
358     for (i = 0; i < 30; i++) {
359         addr_t hva_rbp = 0; 
360         addr_t hva_rip = 0; 
361         char * sym_name = NULL;
362         addr_t rip_val = 0;
363
364         if (core->mem_mode == PHYSICAL_MEM) {
365             if (v3_gpa_to_hva(core, gla_rbp, &hva_rbp) == -1) {
366                 PrintError(core->vm_info, core, "Could not translate Stack address\n");
367                 return;
368             }
369         } else if (core->mem_mode == VIRTUAL_MEM) {
370             if (v3_gva_to_hva(core, gla_rbp, &hva_rbp) == -1) {
371                 PrintError(core->vm_info, core, "Could not translate Virtual Stack address\n");
372                 return;
373             }
374         }
375
376
377         hva_rip = hva_rbp + v3_get_addr_width(core);
378         
379         if (cpu_mode == REAL) {
380             rip_val = (addr_t)*(uint16_t *)hva_rip;
381         } else if (cpu_mode == LONG) {
382             rip_val = (addr_t)*(uint64_t *)hva_rip;
383         } else {
384             rip_val = (addr_t)*(uint32_t *)hva_rip;
385         }
386
387         if (system_map) {
388             char * tmp_ptr = system_map->data;
389             char * sym_ptr = NULL;
390             uint64_t file_offset = 0; 
391             uint64_t sym_offset = 0;
392
393             while (file_offset < system_map->size) {
394                 sym_offset = strtox(tmp_ptr, &tmp_ptr);
395
396                 tmp_ptr += 3; // pass over symbol type
397
398                 if (sym_offset > rip_val) {
399                     char * end_ptr = strchr(sym_ptr, '\n');
400
401                     if (end_ptr) {
402                         *end_ptr = 0; // null terminate symbol...
403                     }
404
405                     sym_name = sym_ptr;
406                     break;
407                 }
408
409                 sym_ptr = tmp_ptr;
410                 { 
411                     char * end_ptr2 = strchr(tmp_ptr, '\n');
412
413                     if (!end_ptr2) {
414                         tmp_ptr += strlen(tmp_ptr) + 1;
415                     } else {
416                         tmp_ptr = end_ptr2 + 1;
417                     }
418                 }
419             }
420         }
421
422         if (!sym_name) {
423             sym_name = "?";
424         }
425
426         if (cpu_mode == REAL) {
427             V3_Print(core->vm_info, core, "Next RBP=0x%.4x, RIP=0x%.4x (%s)\n", 
428                      *(uint16_t *)hva_rbp,*(uint16_t *)hva_rip, 
429                      sym_name);
430             
431             gla_rbp = *(uint16_t *)hva_rbp;
432         } else if (cpu_mode == LONG) {
433             V3_Print(core->vm_info, core, "Next RBP=%p, RIP=%p (%s)\n", 
434                      (void *)*(uint64_t *)hva_rbp, (void *)*(uint64_t *)hva_rip,
435                      sym_name);
436             gla_rbp = *(uint64_t *)hva_rbp;
437         } else {
438             V3_Print(core->vm_info, core, "Next RBP=0x%.8x, RIP=0x%.8x (%s)\n", 
439                      *(uint32_t *)hva_rbp, *(uint32_t *)hva_rip,
440                      sym_name);
441             gla_rbp = *(uint32_t *)hva_rbp;
442         }
443
444     }
445 }
446
447
448 #ifdef __V3_32BIT__
449
450 void v3_print_GPRs(struct guest_info * core) {
451     struct v3_gprs * regs = &(core->vm_regs);
452     int i = 0;
453     v3_reg_t * reg_ptr;
454     char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", NULL};
455
456     reg_ptr = (v3_reg_t *)regs;
457
458     V3_Print(info->vm_info, info, "32 bit GPRs:\n");
459
460     for (i = 0; reg_names[i] != NULL; i++) {
461         V3_Print(info->vm_info, info, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
462     }
463 }
464
465 void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
466     addr_t base_hva;
467
468     if (v3_get_vm_cpu_mode(core)!=LONG) { 
469         V3_Print(core->vm_info, core, "= IDT ========\n");
470         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
471         return;
472     }
473         
474
475     if (core->mem_mode == PHYSICAL_MEM) {
476         v3_gpa_to_hva(core, 
477                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
478                       &base_hva);
479         PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
480     } else if (core->mem_mode == VIRTUAL_MEM) {
481         v3_gva_to_hva(core, 
482                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
483                       &base_hva);
484     }
485
486     // SANITY CHECK
487     if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
488         PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
489     }
490
491     int i;
492     char *types[16] = {"  ILGL","aTSS16","   LDT","bTSS16","call16","  task","intr16","trap16",
493         "  ILGL","aTSS32","  ILGL","bTSS32","call32","  ILGL","intr32","trap32"};
494
495     struct int_trap_gate_lgcy * entry;
496     entry = (struct int_trap_gate_lgcy *)base_hva;
497     V3_Print(core->vm_info, core, "= IDT ========\n");
498     V3_Print(core->vm_info, core, "  # | hex | selector | si:ti:rpl |   offset | type | dpl | s | p\n");
499     for (i = 0; i < NUM_IDT_ENTRIES; i++) {
500         uint32_t tmp = entry->selector;
501         struct segment_selector * seg = (struct segment_selector *)(&tmp);
502         V3_Print(core->vm_info, core, "%3d | %3x |     %04x |   %03x:%x:%x | %04x%04x | %s |   %x | %x | %x | %x\n", i, i,
503                 entry->selector,
504                 seg->index, seg->ti, seg->rpl,
505                 entry->offset_hi, entry->offset_lo,
506                 types[entry->type], entry->dpl, entry->s, entry->p);
507         entry++;
508     }
509 }
510
511 void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
512     addr_t base_hva;
513
514     if (v3_get_vm_cpu_mode(core)!=LONG) { 
515         V3_Print(core->vm_info, core, "= GDT ========\n");
516         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
517         return;
518     }
519
520     if (core->mem_mode == PHYSICAL_MEM) {
521         v3_gpa_to_hva(core, 
522                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
523                       &base_hva);
524         PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
525     } else if (core->mem_mode == VIRTUAL_MEM) {
526         v3_gva_to_hva(core, 
527                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
528                       &base_hva);
529     }
530
531     // SANITY CHECK
532     if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
533         PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
534     }
535
536     int i;
537     char* cd[2] = {"data","code"};
538     // TODO: handle possibility of gate/segment descriptor
539
540     struct code_desc_lgcy * entry;
541     entry = (struct code_desc_long *)base_hva;
542     V3_Print(core->vm_info, core, "= GDT ========\n");
543     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
544     for (i = 0; i < NUM_GDT_ENTRIES; i++) {
545         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
546                 entry->limit_hi, entry->limit_lo,
547                 entry->base_hi, entry->base_mid, entry->base_lo,
548                 cd[entry->one1], entry->dpl, entry->p);
549         entry++;
550     }
551 }
552
553 void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
554     struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
555
556     V3_Print(core->vm_info, core, "      selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
557             error->index, error->ti, error->idt, error->ext,
558             (unsigned long long)exit_info1);
559 }
560
561 #elif __V3_64BIT__
562
563 void v3_print_GPRs(struct guest_info * core) {
564     struct v3_gprs * regs = &(core->vm_regs);
565     int i = 0;
566     v3_reg_t * reg_ptr;
567     char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", \
568                            "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", NULL};
569
570     reg_ptr = (v3_reg_t *)regs;
571
572     V3_Print(core->vm_info, core, "64 bit GPRs:\n");
573
574     for (i = 0; reg_names[i] != NULL; i++) {
575         V3_Print(core->vm_info, core, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
576     }
577 }
578
579 void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
580     addr_t base_hva;
581
582     if (v3_get_vm_cpu_mode(core)!=LONG) { 
583         V3_Print(core->vm_info, core, "= IDT ========\n");
584         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
585         return;
586     }
587
588     if (core->mem_mode == PHYSICAL_MEM) {
589         v3_gpa_to_hva(core, 
590                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
591                       &base_hva);
592     } else if (core->mem_mode == VIRTUAL_MEM) {
593         v3_gva_to_hva(core, 
594                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
595                       &base_hva);
596     }
597
598     // SANITY CHECK
599     if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
600         PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
601     }
602
603     int i;
604     char *types[16] = {"ILGL","ILGL"," LDT","ILGL","ILGL","ILGL","ILGL","ILGL","ILGL",
605         "aTSS","ILGL","bTSS","call","ILGL","intr","trap"};
606
607     struct int_trap_gate_long * entry;
608     entry = (struct int_trap_gate_long *)base_hva;
609     V3_Print(core->vm_info, core, "= IDT ========\n");
610     V3_Print(core->vm_info, core, "  # | hex | selector | si:ti:rpl |           offset | type | dpl | s | r | p\n");
611     for (i = 0; i < NUM_IDT_ENTRIES; i++) {
612         uint32_t tmp = entry->selector;
613         struct segment_selector * seg = (struct segment_selector *)(&tmp);
614         V3_Print(core->vm_info, core, "%3d | %3x |     %04x |   %03x:%x:%x | %08x%04x%04x | %s |   %x | %x | %x | %x\n", i, i,
615                 entry->selector,
616                 seg->index, seg->ti, seg->rpl,
617                 entry->offset_hi, entry->offset_mid, entry->offset_lo,
618                 types[entry->type], entry->dpl, entry->s,
619                 entry->s, entry->p);
620         entry++;
621     }
622 }
623
624 void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
625     addr_t base_hva;
626
627     if (v3_get_vm_cpu_mode(core)!=LONG) { 
628         V3_Print(core->vm_info, core, "= GDT ========\n");
629         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
630         return;
631     }
632
633     if (core->mem_mode == PHYSICAL_MEM) {
634         v3_gpa_to_hva(core, 
635                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
636                       &base_hva);
637     } else if (core->mem_mode == VIRTUAL_MEM) {
638         v3_gva_to_hva(core, 
639                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
640                       &base_hva);
641     }
642
643     // SANITY CHECK
644     if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
645         PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
646     }
647
648     int i;
649     char* cd[2] = {"data","code"};
650     // TODO: handle possibility of gate/segment descriptor
651
652     struct code_desc_long * entry;
653     entry = (struct code_desc_long *)base_hva;
654     V3_Print(core->vm_info, core, "= GDT ========\n");
655     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
656     for (i = 0; i < NUM_GDT_ENTRIES; i++) {
657         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
658                 entry->limit_hi, entry->limit_lo,
659                 entry->base_hi, entry->base_mid, entry->base_lo,
660                 cd[entry->one1], entry->dpl, entry->p);
661         entry++;
662     }
663 }
664
665 void v3_print_ldt(struct guest_info * core, addr_t ldtr_base) {
666     addr_t base_hva;
667
668     if (v3_get_vm_cpu_mode(core)!=LONG) { 
669         V3_Print(core->vm_info, core, "= LDT ========\n");
670         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
671         return;
672     }
673
674     V3_Print(core->vm_info, core, "= LDT ========\n");
675
676     if (ldtr_base == 0) {
677         V3_Print(core->vm_info, core, "        (no LDT is installed)\n");
678         return;
679     } 
680
681     if (core->mem_mode == PHYSICAL_MEM) {
682         v3_gpa_to_hva(core, 
683                       get_addr_linear(core, ldtr_base, &(core->segments.cs)),
684                       &base_hva);
685     } else if (core->mem_mode == VIRTUAL_MEM) {
686         v3_gva_to_hva(core, 
687                       get_addr_linear(core, ldtr_base, &(core->segments.cs)),
688                       &base_hva);
689     }
690
691     // SANITY CHECK
692     if (ldtr_base != get_addr_linear(core, ldtr_base, &(core->segments.cs))) {
693         PrintError(core->vm_info, core, "ldtr base address != linear translation, might be something funky with cs\n");
694     }
695
696     int i;
697     char* cd[2] = {"data","code"};
698     // TODO: handle possibility of gate/segment descriptor
699
700     struct code_desc_long * entry;
701     entry = (struct code_desc_long *)base_hva;
702     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
703     for (i = 0; i < NUM_LDT_ENTRIES; i++) {
704         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
705                 entry->limit_hi, entry->limit_lo,
706                 entry->base_hi, entry->base_mid, entry->base_lo,
707                 cd[entry->one1], entry->dpl, entry->p);
708         entry++;
709     }
710 }
711
712 void v3_print_tss(struct guest_info * core, addr_t tr_base) {
713     addr_t base_hva;
714     struct tss_long *t;
715
716     if (v3_get_vm_cpu_mode(core)!=LONG) { 
717         V3_Print(core->vm_info, core, "= TSS ========\n");
718         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
719         return;
720     }
721
722     V3_Print(core->vm_info, core, "= TSS ========\n");
723
724     if (tr_base == 0) {
725         V3_Print(core->vm_info, core, "        (no TSS is installed)\n");
726         return;
727     } 
728
729     if (core->mem_mode == PHYSICAL_MEM) {
730         v3_gpa_to_hva(core, 
731                       get_addr_linear(core, tr_base, &(core->segments.cs)),
732                       &base_hva);
733     } else if (core->mem_mode == VIRTUAL_MEM) {
734         v3_gva_to_hva(core, 
735                       get_addr_linear(core, tr_base, &(core->segments.cs)),
736                       &base_hva);
737     }
738
739     // SANITY CHECK
740     if (tr_base != get_addr_linear(core, tr_base, &(core->segments.cs))) {
741         PrintError(core->vm_info, core, "tr base address != linear translation, might be something funky with cs\n");
742     }
743     t=(struct tss_long*)base_hva;
744
745     V3_Print(core->vm_info, core," res1 : 0x%llx\n", (uint64_t) t->res1);
746     V3_Print(core->vm_info, core," rsp0 : 0x%llx\n", t->rsp0);
747     V3_Print(core->vm_info, core," rsp1 : 0x%llx\n", t->rsp1);
748     V3_Print(core->vm_info, core," rsp2 : 0x%llx\n", t->rsp2);
749     V3_Print(core->vm_info, core," res2 : 0x%llx\n", t->res2);
750     V3_Print(core->vm_info, core," ist1 : 0x%llx\n", t->ist1);
751     V3_Print(core->vm_info, core," ist2 : 0x%llx\n", t->ist2);
752     V3_Print(core->vm_info, core," ist3 : 0x%llx\n", t->ist3);
753     V3_Print(core->vm_info, core," ist4 : 0x%llx\n", t->ist4);
754     V3_Print(core->vm_info, core," ist5 : 0x%llx\n", t->ist5);
755     V3_Print(core->vm_info, core," ist6 : 0x%llx\n", t->ist6);
756     V3_Print(core->vm_info, core," ist7 : 0x%llx\n", t->ist7);
757     V3_Print(core->vm_info, core," res3 : 0x%llx\n", t->res3);
758     V3_Print(core->vm_info, core," res4 : 0x%llx\n", (uint64_t) t->res4);
759     V3_Print(core->vm_info, core," iomap_base : 0x%llx\n", (uint64_t) t->iomap_base);
760     V3_Print(core->vm_info, core," (following io permission bitmap not currently printed)\n");
761
762 }
763
764 void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
765     struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
766
767     if (v3_get_vm_cpu_mode(core)!=LONG) { 
768         V3_Print(core->vm_info, core, "= IDT ========\n");
769         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
770         return;
771     }
772
773     V3_Print(core->vm_info, core, "      selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
774             error->index, error->ti, error->idt, error->ext,
775             (unsigned long long)exit_info1);
776 }
777
778 #endif