Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


85bd8d22300ee734d5f6a593a2d0f496b7ad5f83
[palacios.git] / palacios / src / palacios / vmm_debug.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/vmm_debug.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_host_events.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_decoder.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_config.h>
28
29 #define PRINT_TELEMETRY  1
30 #define PRINT_CORE_STATE 2
31 #define PRINT_ARCH_STATE 3
32 #define PRINT_STACK      4
33 #define PRINT_BACKTRACE  5
34
35
36 #define PRINT_ALL        100 // Absolutely everything
37 #define PRINT_STATE      101 // telemetry, core state, arch state
38
39
40
41
42 static int core_handler(struct guest_info * core, uint32_t cmd) {
43
44
45     switch (cmd) {
46 #ifdef V3_CONFIG_TELEMETRY
47         case PRINT_TELEMETRY: 
48             v3_print_core_telemetry(core);
49             break;
50 #endif
51         
52         case PRINT_CORE_STATE:
53             v3_raise_barrier(core->vm_info, NULL);
54
55             v3_print_guest_state(core);
56
57             v3_lower_barrier(core->vm_info);
58             break;
59         case PRINT_ARCH_STATE:
60             v3_raise_barrier(core->vm_info, NULL);
61
62             v3_print_arch_state(core);
63
64             v3_lower_barrier(core->vm_info);
65             break;
66         case PRINT_STACK:
67             v3_raise_barrier(core->vm_info, NULL);
68
69             v3_print_stack(core);
70
71             v3_lower_barrier(core->vm_info);
72             break;
73         case PRINT_BACKTRACE:
74             v3_raise_barrier(core->vm_info, NULL);
75
76             v3_print_backtrace(core);
77             
78             v3_lower_barrier(core->vm_info);
79             break;
80
81         case PRINT_STATE:
82             v3_raise_barrier(core->vm_info, NULL);
83
84 #ifdef V3_CONFIG_TELEMETRY
85             v3_print_core_telemetry(core);
86 #endif
87             v3_print_guest_state(core);
88             v3_print_arch_state(core);
89
90             v3_lower_barrier(core->vm_info);
91             break;
92
93         case PRINT_ALL:
94             v3_raise_barrier(core->vm_info, NULL);
95
96 #ifdef V3_CONFIG_TELEMETRY
97             v3_print_core_telemetry(core);
98 #endif
99             v3_print_guest_state(core);
100             v3_print_arch_state(core);
101         v3_print_stack(core);
102         v3_print_backtrace(core);
103
104             v3_lower_barrier(core->vm_info);
105             break;
106
107     }
108
109     return 0;
110 }
111
112
113 static int evt_handler(struct v3_vm_info * vm, struct v3_debug_event * evt, void * priv_data) {
114
115     V3_Print(vm, VCORE_NONE,"Debug Event Handler for core %d cmd=%x\n", evt->core_id, evt->cmd);
116
117     if (evt->core_id == -1) {
118         int i = 0;
119         for (i = 0; i < vm->num_cores; i++) {
120             core_handler(&(vm->cores[i]), evt->cmd);
121         }
122     } else {
123         return core_handler(&vm->cores[evt->core_id], evt->cmd);
124     }
125
126     
127     return 0;
128 }
129
130
131 int v3_init_vm_debugging(struct v3_vm_info * vm) {
132     v3_hook_host_event(vm, HOST_DEBUG_EVT, 
133                        V3_HOST_EVENT_HANDLER(evt_handler), 
134                        NULL);
135
136
137     return 0;
138 }
139
140
141
142
143
144 void v3_print_segments(struct v3_segments * segs) {
145     int i = 0;
146     struct v3_segment * seg_ptr;
147
148     seg_ptr=(struct v3_segment *)segs;
149   
150     char *seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
151     V3_Print(VM_NONE, VCORE_NONE, "Segments\n");
152
153     for (i = 0; seg_names[i] != NULL; i++) {
154
155         V3_Print(VM_NONE, VCORE_NONE, "\t%s: selector=0x%x, base=%p, limit=0x%x type=0x%x system=0x%x dpl=0x%x present=0x%x avail=0x%x long_mode=0x%x db=0x%x granularity=0x%x unusable=0x%x\n", 
156                  seg_names[i], 
157                  seg_ptr[i].selector, 
158                  (void *)(addr_t)seg_ptr[i].base, 
159                  seg_ptr[i].limit,
160                  seg_ptr[i].type,
161                  seg_ptr[i].system,
162                  seg_ptr[i].dpl,
163                  seg_ptr[i].present,
164                  seg_ptr[i].avail,
165                  seg_ptr[i].long_mode,
166                  seg_ptr[i].db,
167                  seg_ptr[i].granularity,
168                  seg_ptr[i].unusable);
169     }
170 }
171
172
173
174 void v3_print_ctrl_regs(struct guest_info * core) {
175     struct v3_ctrl_regs * regs = &(core->ctrl_regs);
176     int i = 0;
177     v3_reg_t * reg_ptr;
178     char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", "EFER", NULL};
179    
180
181     reg_ptr = (v3_reg_t *)regs;
182
183     V3_Print(core->vm_info, core,"Ctrl Regs:\n");
184
185     for (i = 0; reg_names[i] != NULL; i++) {
186         V3_Print(core->vm_info, core, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
187     }
188
189
190 }
191
192 #if 0
193 static int safe_gva_to_hva(struct guest_info * core, addr_t linear_addr, addr_t * host_addr) {
194     /* select the proper translation based on guest mode */
195     if (core->mem_mode == PHYSICAL_MEM) {
196         if (v3_gpa_to_hva(core, linear_addr, host_addr) == -1) return -1;
197     } else if (core->mem_mode == VIRTUAL_MEM) {
198         if (v3_gva_to_hva(core, linear_addr, host_addr) == -1) return -1;
199     }
200     return 0;
201 }
202
203 static int v3_print_disassembly(struct guest_info * core) {
204     int passed_rip = 0;
205     addr_t rip, rip_linear, rip_host;
206
207     /* we don't know where the instructions preceding RIP start, so we just take
208      * a guess and hope the instruction stream synced up with our disassembly
209      * some time before RIP; if it has not we correct RIP at that point
210      */
211
212     /* start disassembly 64 bytes before current RIP, continue 32 bytes after */
213     rip = (addr_t) core->rip - 64;
214     while ((int) (rip - core->rip) < 32) {
215         V3_Print(info->vm_info, info, "disassembly step\n");
216
217         /* always print RIP, even if the instructions before were bad */
218         if (!passed_rip && rip >= core->rip) {
219             if (rip != core->rip) {
220                 V3_Print(info->vm_info, info, "***** bad disassembly up to this point *****\n");
221                 rip = core->rip;
222             }
223             passed_rip = 1;
224         }
225
226         /* look up host virtual address for this instruction */
227         rip_linear = get_addr_linear(core, rip, &(core->segments.cs));
228         if (safe_gva_to_hva(core, rip_linear, &rip_host) < 0) {
229             rip++;
230             continue;
231         }
232
233         /* print disassembled instrcution (updates rip) */
234         if (v3_disasm(core, (void *) rip_host, &rip, rip == core->rip) < 0) {
235             rip++;
236             continue;
237         }
238
239     }
240
241     return 0;
242 }
243
244 #endif
245
246 void v3_print_guest_state(struct guest_info * core) {
247     addr_t linear_addr = 0; 
248
249     V3_Print(core->vm_info, core, "RIP: %p\n", (void *)(addr_t)(core->rip));
250     linear_addr = get_addr_linear(core, core->rip, &(core->segments.cs));
251     V3_Print(core->vm_info, core, "RIP Linear: %p\n", (void *)linear_addr);
252
253     V3_Print(core->vm_info, core, "NumExits: %u\n", (uint32_t)core->num_exits);
254
255     V3_Print(core->vm_info, core, "IRQ STATE: started=%d, pending=%d\n", 
256              core->intr_core_state.irq_started, 
257              core->intr_core_state.irq_pending);
258     V3_Print(core->vm_info, core, "EXCP STATE: err_code_valid=%d, err_code=%x\n", 
259              core->excp_state.excp_error_code_valid, 
260              core->excp_state.excp_error_code);
261
262
263     v3_print_segments(&(core->segments));
264     v3_print_ctrl_regs(core);
265
266     if (core->shdw_pg_mode == SHADOW_PAGING) {
267         V3_Print(core->vm_info, core, "Shadow Paging Guest Registers:\n");
268         V3_Print(core->vm_info, core, "\tGuest CR0=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr0));
269         V3_Print(core->vm_info, core, "\tGuest CR3=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_cr3));
270         V3_Print(core->vm_info, core, "\tGuest EFER=%p\n", (void *)(addr_t)(core->shdw_pg_state.guest_efer.value));
271         // CR4
272     }
273     v3_print_GPRs(core);
274
275     v3_print_idt(core,core->segments.idtr.base);
276     v3_print_gdt(core,core->segments.gdtr.base);
277     v3_print_ldt(core,core->segments.ldtr.base);
278     v3_print_tss(core,core->segments.tr.base);
279
280     v3_print_mem_map(core->vm_info);
281
282     v3_print_stack(core);
283
284     //  v3_print_disassembly(core);
285 }
286
287
288 void v3_print_arch_state(struct guest_info * core) {
289
290
291 }
292
293
294 void v3_print_guest_state_all(struct v3_vm_info * vm) {
295     int i = 0;
296
297     V3_Print(vm, VCORE_NONE,"VM Core states for %s\n", vm->name);
298
299     for (i = 0; i < 80; i++) {
300       V3_Print(vm, VCORE_NONE, "-");
301     }
302
303     for (i = 0; i < vm->num_cores; i++) {
304         v3_print_guest_state(&vm->cores[i]);  
305     }
306     
307     for (i = 0; i < 80; i++) {
308         V3_Print(vm, VCORE_NONE, "-");
309     }
310
311     V3_Print(vm, VCORE_NONE, "\n");    
312 }
313
314
315
316 void v3_print_stack(struct guest_info * core) {
317     addr_t linear_addr = 0;
318     addr_t host_addr = 0;
319     int i = 0;
320     v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
321
322     linear_addr = get_addr_linear(core, core->vm_regs.rsp, &(core->segments.ss));
323  
324     V3_Print(core->vm_info, core, "Stack at %p:\n", (void *)linear_addr);
325    
326     if (core->mem_mode == PHYSICAL_MEM) {
327         if (v3_gpa_to_hva(core, linear_addr, &host_addr) == -1) {
328             PrintError(core->vm_info, core, "Could not translate Stack address\n");
329             return;
330         }
331     } else if (core->mem_mode == VIRTUAL_MEM) {
332         if (v3_gva_to_hva(core, linear_addr, &host_addr) == -1) {
333             PrintError(core->vm_info, core, "Could not translate Virtual Stack address\n");
334             return;
335         }
336     }
337     
338     V3_Print(core->vm_info, core, "Host Address of rsp = 0x%p\n", (void *)host_addr);
339  
340     // We start i at one because the current stack pointer points to an unused stack element
341     for (i = 0; i <= 24; i++) {
342
343         if (cpu_mode == REAL) {
344             V3_Print(core->vm_info, core, "\t0x%.4x\n", *((uint16_t *)host_addr + (i * 2)));
345         } else if (cpu_mode == LONG) {
346             V3_Print(core->vm_info, core, "\t%p\n", (void *)*(addr_t *)(host_addr + (i * 8)));
347         } else {
348             // 32 bit stacks...
349             V3_Print(core->vm_info, core, "\t0x%.8x\n", *(uint32_t *)(host_addr + (i * 4)));
350         }
351     }
352
353 }    
354
355
356 void v3_print_backtrace(struct guest_info * core) {
357     addr_t gla_rbp = 0;
358     int i = 0;
359     v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(core);
360     struct v3_cfg_file * system_map = v3_cfg_get_file(core->vm_info, "System.map");
361
362     V3_Print(core->vm_info, core, "Performing Backtrace for Core %d\n", core->vcpu_id);
363     V3_Print(core->vm_info, core, "\tRSP=%p, RBP=%p\n", (void *)core->vm_regs.rsp, (void *)core->vm_regs.rbp);
364
365     gla_rbp = get_addr_linear(core, core->vm_regs.rbp, &(core->segments.ss));
366
367
368     for (i = 0; i < 30; i++) {
369         addr_t hva_rbp = 0; 
370         addr_t hva_rip = 0; 
371         char * sym_name = NULL;
372         addr_t rip_val = 0;
373
374         if (core->mem_mode == PHYSICAL_MEM) {
375             if (v3_gpa_to_hva(core, gla_rbp, &hva_rbp) == -1) {
376                 PrintError(core->vm_info, core, "Could not translate Stack address\n");
377                 return;
378             }
379         } else if (core->mem_mode == VIRTUAL_MEM) {
380             if (v3_gva_to_hva(core, gla_rbp, &hva_rbp) == -1) {
381                 PrintError(core->vm_info, core, "Could not translate Virtual Stack address\n");
382                 return;
383             }
384         }
385
386
387         hva_rip = hva_rbp + v3_get_addr_width(core);
388         
389         if (cpu_mode == REAL) {
390             rip_val = (addr_t)*(uint16_t *)hva_rip;
391         } else if (cpu_mode == LONG) {
392             rip_val = (addr_t)*(uint64_t *)hva_rip;
393         } else {
394             rip_val = (addr_t)*(uint32_t *)hva_rip;
395         }
396
397         if (system_map) {
398             char * tmp_ptr = system_map->data;
399             char * sym_ptr = NULL;
400             uint64_t file_offset = 0; 
401             uint64_t sym_offset = 0;
402
403             while (file_offset < system_map->size) {
404                 sym_offset = strtox(tmp_ptr, &tmp_ptr);
405
406                 tmp_ptr += 3; // pass over symbol type
407
408                 if (sym_offset > rip_val) {
409                     if (sym_ptr) {
410                         char * end_ptr = strchr(sym_ptr, '\n');
411                         
412                         if (end_ptr) {
413                             *end_ptr = 0; // null terminate symbol...
414                         }
415                         sym_name = sym_ptr;
416                     } else {
417                         sym_name = NULL;
418                     }
419                     break;
420                 }
421
422                 sym_ptr = tmp_ptr;
423
424                 { 
425                     char * end_ptr2 = strchr(tmp_ptr, '\n');
426
427                     if (!end_ptr2) {
428                         tmp_ptr += strlen(tmp_ptr) + 1;
429                     } else {
430                         tmp_ptr = end_ptr2 + 1;
431                     }
432                 }
433             }
434         }
435
436         if (!sym_name) {
437             sym_name = "?";
438         }
439
440         if (cpu_mode == REAL) {
441             V3_Print(core->vm_info, core, "Next RBP=0x%.4x, RIP=0x%.4x (%s)\n", 
442                      *(uint16_t *)hva_rbp,*(uint16_t *)hva_rip, 
443                      sym_name);
444             
445             gla_rbp = *(uint16_t *)hva_rbp;
446         } else if (cpu_mode == LONG) {
447             V3_Print(core->vm_info, core, "Next RBP=%p, RIP=%p (%s)\n", 
448                      (void *)*(uint64_t *)hva_rbp, (void *)*(uint64_t *)hva_rip,
449                      sym_name);
450             gla_rbp = *(uint64_t *)hva_rbp;
451         } else {
452             V3_Print(core->vm_info, core, "Next RBP=0x%.8x, RIP=0x%.8x (%s)\n", 
453                      *(uint32_t *)hva_rbp, *(uint32_t *)hva_rip,
454                      sym_name);
455             gla_rbp = *(uint32_t *)hva_rbp;
456         }
457
458     }
459 }
460
461
462 #ifdef __V3_32BIT__
463
464 void v3_print_GPRs(struct guest_info * core) {
465     struct v3_gprs * regs = &(core->vm_regs);
466     int i = 0;
467     v3_reg_t * reg_ptr;
468     char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", NULL};
469
470     reg_ptr = (v3_reg_t *)regs;
471
472     V3_Print(info->vm_info, info, "32 bit GPRs:\n");
473
474     for (i = 0; reg_names[i] != NULL; i++) {
475         V3_Print(info->vm_info, info, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
476     }
477 }
478
479 void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
480     addr_t base_hva;
481
482     if (v3_get_vm_cpu_mode(core)!=LONG) { 
483         V3_Print(core->vm_info, core, "= IDT ========\n");
484         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
485         return;
486     }
487         
488
489     if (core->mem_mode == PHYSICAL_MEM) {
490         v3_gpa_to_hva(core, 
491                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
492                       &base_hva);
493         PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
494     } else if (core->mem_mode == VIRTUAL_MEM) {
495         v3_gva_to_hva(core, 
496                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
497                       &base_hva);
498     }
499
500     // SANITY CHECK
501     if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
502         PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
503     }
504
505     if (!base_hva) {
506         PrintError(core->vm_info, core "idtr address does not translate!  skipping.\n");
507         return ;
508     }
509
510     int i;
511     char *types[16] = {"  ILGL","aTSS16","   LDT","bTSS16","call16","  task","intr16","trap16",
512         "  ILGL","aTSS32","  ILGL","bTSS32","call32","  ILGL","intr32","trap32"};
513
514     struct int_trap_gate_lgcy * entry;
515     entry = (struct int_trap_gate_lgcy *)base_hva;
516     V3_Print(core->vm_info, core, "= IDT ========\n");
517     V3_Print(core->vm_info, core, "  # | hex | selector | si:ti:rpl |   offset | type | dpl | s | p\n");
518     for (i = 0; i < NUM_IDT_ENTRIES; i++) {
519         uint32_t tmp = entry->selector;
520         struct segment_selector * seg = (struct segment_selector *)(&tmp);
521         V3_Print(core->vm_info, core, "%3d | %3x |     %04x |   %03x:%x:%x | %04x%04x | %s |   %x | %x | %x | %x\n", i, i,
522                 entry->selector,
523                 seg->index, seg->ti, seg->rpl,
524                 entry->offset_hi, entry->offset_lo,
525                 types[entry->type], entry->dpl, entry->s, entry->p);
526         entry++;
527     }
528 }
529
530 void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
531     addr_t base_hva;
532
533     if (v3_get_vm_cpu_mode(core)!=LONG) { 
534         V3_Print(core->vm_info, core, "= GDT ========\n");
535         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
536         return;
537     }
538
539     if (core->mem_mode == PHYSICAL_MEM) {
540         v3_gpa_to_hva(core, 
541                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
542                       &base_hva);
543         PrintError(core->vm_info, core, "Kind of weird that we got here.... physical mem?\n");
544     } else if (core->mem_mode == VIRTUAL_MEM) {
545         v3_gva_to_hva(core, 
546                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
547                       &base_hva);
548     }
549
550     // SANITY CHECK
551     if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
552         PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
553     }
554
555     if (!base_hva) {
556         PrintError(core->vm_info, core "gdtr address does not translate!  skipping.\n");
557         return ;
558     }
559
560     int i;
561     char* cd[2] = {"data","code"};
562     // TODO: handle possibility of gate/segment descriptor
563
564     struct code_desc_lgcy * entry;
565     entry = (struct code_desc_long *)base_hva;
566     V3_Print(core->vm_info, core, "= GDT ========\n");
567     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
568     for (i = 0; i < NUM_GDT_ENTRIES; i++) {
569         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
570                 entry->limit_hi, entry->limit_lo,
571                 entry->base_hi, entry->base_mid, entry->base_lo,
572                 cd[entry->one1], entry->dpl, entry->p);
573         entry++;
574     }
575 }
576
577 void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
578     struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
579
580     V3_Print(core->vm_info, core, "      selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
581             error->index, error->ti, error->idt, error->ext,
582             (unsigned long long)exit_info1);
583 }
584
585 #elif __V3_64BIT__
586
587 void v3_print_GPRs(struct guest_info * core) {
588     struct v3_gprs * regs = &(core->vm_regs);
589     int i = 0;
590     v3_reg_t * reg_ptr;
591     char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", \
592                            "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", NULL};
593
594     reg_ptr = (v3_reg_t *)regs;
595
596     V3_Print(core->vm_info, core, "64 bit GPRs:\n");
597
598     for (i = 0; reg_names[i] != NULL; i++) {
599         V3_Print(core->vm_info, core, "\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
600     }
601 }
602
603 void v3_print_idt(struct guest_info * core, addr_t idtr_base) {
604     addr_t base_hva;
605
606     if (v3_get_vm_cpu_mode(core)!=LONG) { 
607         V3_Print(core->vm_info, core, "= IDT ========\n");
608         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
609         return;
610     }
611
612     if (core->mem_mode == PHYSICAL_MEM) {
613         v3_gpa_to_hva(core, 
614                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
615                       &base_hva);
616     } else if (core->mem_mode == VIRTUAL_MEM) {
617         v3_gva_to_hva(core, 
618                       get_addr_linear(core, idtr_base, &(core->segments.cs)),
619                       &base_hva);
620     }
621
622     // SANITY CHECK
623     if (idtr_base != get_addr_linear(core, idtr_base, &(core->segments.cs))) {
624         PrintError(core->vm_info, core, "idtr base address != linear translation, might be something funky with cs\n");
625     }
626
627     if (!base_hva) {
628         PrintError(core->vm_info, core, "idtr address does not translate!  skipping.\n");
629         return ;
630     }
631
632     int i;
633     char *types[16] = {"ILGL","ILGL"," LDT","ILGL","ILGL","ILGL","ILGL","ILGL","ILGL",
634         "aTSS","ILGL","bTSS","call","ILGL","intr","trap"};
635
636     struct int_trap_gate_long * entry;
637     entry = (struct int_trap_gate_long *)base_hva;
638     V3_Print(core->vm_info, core, "= IDT ========\n");
639     V3_Print(core->vm_info, core, "  # | hex | selector | si:ti:rpl |           offset | type | dpl | s | r | p\n");
640     for (i = 0; i < NUM_IDT_ENTRIES; i++) {
641         uint32_t tmp = entry->selector;
642         struct segment_selector * seg = (struct segment_selector *)(&tmp);
643         V3_Print(core->vm_info, core, "%3d | %3x |     %04x |   %03x:%x:%x | %08x%04x%04x | %s |   %x | %x | %x | %x\n", i, i,
644                 entry->selector,
645                 seg->index, seg->ti, seg->rpl,
646                 entry->offset_hi, entry->offset_mid, entry->offset_lo,
647                 types[entry->type], entry->dpl, entry->s,
648                 entry->s, entry->p);
649         entry++;
650     }
651 }
652
653 void v3_print_gdt(struct guest_info * core, addr_t gdtr_base) {
654     addr_t base_hva;
655
656     if (v3_get_vm_cpu_mode(core)!=LONG) { 
657         V3_Print(core->vm_info, core, "= GDT ========\n");
658         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
659         return;
660     }
661
662     if (core->mem_mode == PHYSICAL_MEM) {
663         v3_gpa_to_hva(core, 
664                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
665                       &base_hva);
666     } else if (core->mem_mode == VIRTUAL_MEM) {
667         v3_gva_to_hva(core, 
668                       get_addr_linear(core, gdtr_base, &(core->segments.cs)),
669                       &base_hva);
670     }
671
672     // SANITY CHECK
673     if (gdtr_base != get_addr_linear(core, gdtr_base, &(core->segments.cs))) {
674         PrintError(core->vm_info, core, "gdtr base address != linear translation, might be something funky with cs\n");
675     }
676
677     if (!base_hva) {
678         PrintError(core->vm_info, core, "gdtr address does not translate!  skipping.\n");
679         return ;
680     }
681
682     int i;
683     char* cd[2] = {"data","code"};
684     // TODO: handle possibility of gate/segment descriptor
685
686     struct code_desc_long * entry;
687     entry = (struct code_desc_long *)base_hva;
688     V3_Print(core->vm_info, core, "= GDT ========\n");
689     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
690     for (i = 0; i < NUM_GDT_ENTRIES; i++) {
691         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
692                 entry->limit_hi, entry->limit_lo,
693                 entry->base_hi, entry->base_mid, entry->base_lo,
694                 cd[entry->one1], entry->dpl, entry->p);
695         entry++;
696     }
697 }
698
699 void v3_print_ldt(struct guest_info * core, addr_t ldtr_base) {
700     addr_t base_hva;
701
702     if (v3_get_vm_cpu_mode(core)!=LONG) { 
703         V3_Print(core->vm_info, core, "= LDT ========\n");
704         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
705         return;
706     }
707
708     V3_Print(core->vm_info, core, "= LDT ========\n");
709
710     if (ldtr_base == 0) {
711         V3_Print(core->vm_info, core, "        (no LDT is installed)\n");
712         return;
713     } 
714
715     if (core->mem_mode == PHYSICAL_MEM) {
716         v3_gpa_to_hva(core, 
717                       get_addr_linear(core, ldtr_base, &(core->segments.cs)),
718                       &base_hva);
719     } else if (core->mem_mode == VIRTUAL_MEM) {
720         v3_gva_to_hva(core, 
721                       get_addr_linear(core, ldtr_base, &(core->segments.cs)),
722                       &base_hva);
723     }
724
725     // SANITY CHECK
726     if (ldtr_base != get_addr_linear(core, ldtr_base, &(core->segments.cs))) {
727         PrintError(core->vm_info, core, "ldtr base address != linear translation, might be something funky with cs\n");
728     }
729
730     if (!base_hva) {
731         PrintError(core->vm_info, core, "ldtr address does not translate!  skipping.\n");
732         return ;
733     }
734
735     int i;
736     char* cd[2] = {"data","code"};
737     // TODO: handle possibility of gate/segment descriptor
738
739     struct code_desc_long * entry;
740     entry = (struct code_desc_long *)base_hva;
741     V3_Print(core->vm_info, core, "  # | hex | limit |     base |  c/d | dpl | p\n");
742     for (i = 0; i < NUM_LDT_ENTRIES; i++) {
743         V3_Print(core->vm_info, core, "%3d | %3x | %x%04x | %02x%02x%04x | %s |   %x | %x\n", i, i,
744                 entry->limit_hi, entry->limit_lo,
745                 entry->base_hi, entry->base_mid, entry->base_lo,
746                 cd[entry->one1], entry->dpl, entry->p);
747         entry++;
748     }
749 }
750
751 void v3_print_tss(struct guest_info * core, addr_t tr_base) {
752     addr_t base_hva;
753     struct tss_long *t;
754
755     if (v3_get_vm_cpu_mode(core)!=LONG) { 
756         V3_Print(core->vm_info, core, "= TSS ========\n");
757         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
758         return;
759     }
760
761     V3_Print(core->vm_info, core, "= TSS ========\n");
762
763     if (tr_base == 0) {
764         V3_Print(core->vm_info, core, "        (no TSS is installed)\n");
765         return;
766     } 
767
768     if (core->mem_mode == PHYSICAL_MEM) {
769         v3_gpa_to_hva(core, 
770                       get_addr_linear(core, tr_base, &(core->segments.cs)),
771                       &base_hva);
772     } else if (core->mem_mode == VIRTUAL_MEM) {
773         v3_gva_to_hva(core, 
774                       get_addr_linear(core, tr_base, &(core->segments.cs)),
775                       &base_hva);
776     }
777
778     // SANITY CHECK
779     if (tr_base != get_addr_linear(core, tr_base, &(core->segments.cs))) {
780         PrintError(core->vm_info, core, "tr base address != linear translation, might be something funky with cs\n");
781     }
782
783     if (!base_hva) {
784         PrintError(core->vm_info, core, "tr address does not translate!  skipping.\n");
785         return ;
786     }
787
788     t=(struct tss_long*)base_hva;
789
790     V3_Print(core->vm_info, core," res1 : 0x%llx\n", (uint64_t) t->res1);
791     V3_Print(core->vm_info, core," rsp0 : 0x%llx\n", t->rsp0);
792     V3_Print(core->vm_info, core," rsp1 : 0x%llx\n", t->rsp1);
793     V3_Print(core->vm_info, core," rsp2 : 0x%llx\n", t->rsp2);
794     V3_Print(core->vm_info, core," res2 : 0x%llx\n", t->res2);
795     V3_Print(core->vm_info, core," ist1 : 0x%llx\n", t->ist1);
796     V3_Print(core->vm_info, core," ist2 : 0x%llx\n", t->ist2);
797     V3_Print(core->vm_info, core," ist3 : 0x%llx\n", t->ist3);
798     V3_Print(core->vm_info, core," ist4 : 0x%llx\n", t->ist4);
799     V3_Print(core->vm_info, core," ist5 : 0x%llx\n", t->ist5);
800     V3_Print(core->vm_info, core," ist6 : 0x%llx\n", t->ist6);
801     V3_Print(core->vm_info, core," ist7 : 0x%llx\n", t->ist7);
802     V3_Print(core->vm_info, core," res3 : 0x%llx\n", t->res3);
803     V3_Print(core->vm_info, core," res4 : 0x%llx\n", (uint64_t) t->res4);
804     V3_Print(core->vm_info, core," iomap_base : 0x%llx\n", (uint64_t) t->iomap_base);
805     V3_Print(core->vm_info, core," (following io permission bitmap not currently printed)\n");
806
807 }
808
809 void v3_print_gp_error(struct guest_info * core, addr_t exit_info1) {
810     struct selector_error_code * error = (struct selector_error_code *)(&exit_info1);
811
812     if (v3_get_vm_cpu_mode(core)!=LONG) { 
813         V3_Print(core->vm_info, core, "= IDT ========\n");
814         V3_Print(core->vm_info, core, "(currently only supported in long mode)\n");
815         return;
816     }
817
818     V3_Print(core->vm_info, core, "      selector index: %x, TI: %x, IDT: %x, EXT: %x (error=%llx)\n",
819             error->index, error->ti, error->idt, error->ext,
820             (unsigned long long)exit_info1);
821 }
822
823 #endif