Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


more 64 bit guest support
[palacios.git] / palacios / src / palacios / vmm_paging.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm_paging.h>
21
22 #include <palacios/vmm.h>
23
24 #include <palacios/vm_guest_mem.h>
25
26
27
28
29   
30
31
32 void delete_page_tables_32(pde32_t * pde) {
33   int i;
34
35   if (pde == NULL) { 
36     return;
37   }
38
39   for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
40     if (pde[i].present) {
41       // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
42       PrintDebug("PTE base addr %x \n", pde[i].pt_base_addr);
43       pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
44
45       PrintDebug("Deleting PTE %d (%p)\n", i, pte);
46       V3_FreePage(pte);
47     }
48   }
49
50   PrintDebug("Deleting PDE (%p)\n", pde);
51   V3_FreePage(V3_PAddr(pde));
52 }
53
54 void delete_page_tables_32PAE(pdpe32pae_t * pdpe) { 
55   PrintError("Unimplemented function\n");
56 }
57
58 void delete_page_tables_64(pml4e64_t * pml4) {
59   PrintError("Unimplemented function\n");
60 }
61
62
63
64
65
66 int pt32_lookup(pde32_t * pd, addr_t vaddr, addr_t * paddr) {
67   addr_t pde_entry;
68   pde32_entry_type_t pde_entry_type;
69
70   if (pd == 0) {
71     return -1;
72   }
73
74   pde_entry_type = pde32_lookup(pd, vaddr, &pde_entry);
75
76   if (pde_entry_type == PDE32_ENTRY_PTE32) {
77     return pte32_lookup((pte32_t *)pde_entry, vaddr, paddr);
78   } else if (pde_entry_type == PDE32_ENTRY_LARGE_PAGE) {
79     *paddr = pde_entry;
80     return 0;
81   }
82
83   return -1;
84 }
85
86
87
88 /* We can't do a full lookup because we don't know what context the page tables are in...
89  * The entry addresses could be pointing to either guest physical memory or host physical memory
90  * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
91  */
92 pde32_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
93   pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
94
95   if (!pde_entry->present) {
96     *entry = 0;
97     return PDE32_ENTRY_NOT_PRESENT;
98   } else  {
99
100     if (pde_entry->large_page) {
101       pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
102
103       *entry = PDE32_4MB_T_ADDR(*large_pde);
104       *entry += PD32_4MB_PAGE_OFFSET(addr);
105       return PDE32_ENTRY_LARGE_PAGE;
106     } else {
107       *entry = PDE32_T_ADDR(*pde_entry);
108       return PDE32_ENTRY_PTE32;
109     }
110   }  
111   return PDE32_ENTRY_NOT_PRESENT;
112 }
113
114
115
116 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
117  */
118 int pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
119   pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
120
121   if (!pte_entry->present) {
122     *entry = 0;
123     PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
124     return -1;
125   } else {
126     *entry = PTE32_T_ADDR(*pte_entry) + PT32_PAGE_OFFSET(addr);
127     return 0;
128   }
129
130   return -1;
131 }
132
133
134
135 pt_access_status_t can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
136   pde32_t * entry = &pde[PDE32_INDEX(addr)];
137
138   if (entry->present == 0) {
139     return PT_ENTRY_NOT_PRESENT;
140   } else if ((entry->writable == 0) && (access_type.write == 1)) {
141     return PT_WRITE_ERROR;
142   } else if ((entry->user_page == 0) && (access_type.user == 1)) {
143     // Check CR0.WP?
144     return PT_USER_ERROR;
145   }
146
147   return PT_ACCESS_OK;
148 }
149
150
151 pt_access_status_t can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
152   pte32_t * entry = &pte[PTE32_INDEX(addr)];
153
154   if (entry->present == 0) {
155     return PT_ENTRY_NOT_PRESENT;
156   } else if ((entry->writable == 0) && (access_type.write == 1)) {
157     return PT_WRITE_ERROR;
158   } else if ((entry->user_page == 0) && (access_type.user == 1)) {
159     // Check CR0.WP?
160     return PT_USER_ERROR;
161   }
162
163   return PT_ACCESS_OK;
164 }
165
166
167
168
169 /* We generate a page table to correspond to a given memory layout
170  * pulling pages from the mem_list when necessary
171  * If there are any gaps in the layout, we add them as unmapped pages
172  */
173 pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
174   addr_t current_page_addr = 0;
175   int i, j;
176   struct shadow_map * map = &(guest_info->mem_map);
177
178   pde32_t * pde = V3_VAddr(V3_AllocPages(1));
179
180   for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
181     int pte_present = 0;
182     pte32_t * pte = V3_VAddr(V3_AllocPages(1));
183     
184
185     for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
186       struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
187
188       if (!region || 
189           (region->host_type == HOST_REGION_HOOK) || 
190           (region->host_type == HOST_REGION_UNALLOCATED) || 
191           (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) || 
192           (region->host_type == HOST_REGION_REMOTE) ||
193           (region->host_type == HOST_REGION_SWAPPED)) {
194         pte[j].present = 0;
195         pte[j].writable = 0;
196         pte[j].user_page = 0;
197         pte[j].write_through = 0;
198         pte[j].cache_disable = 0;
199         pte[j].accessed = 0;
200         pte[j].dirty = 0;
201         pte[j].pte_attr = 0;
202         pte[j].global_page = 0;
203         pte[j].vmm_info = 0;
204         pte[j].page_base_addr = 0;
205       } else {
206         addr_t host_addr;
207         pte[j].present = 1;
208         pte[j].writable = 1;
209         pte[j].user_page = 1;
210         pte[j].write_through = 0;
211         pte[j].cache_disable = 0;
212         pte[j].accessed = 0;
213         pte[j].dirty = 0;
214         pte[j].pte_attr = 0;
215         pte[j].global_page = 0;
216         pte[j].vmm_info = 0;
217
218         if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
219           // BIG ERROR
220           // PANIC
221           return NULL;
222         }
223         
224         pte[j].page_base_addr = host_addr >> 12;
225         
226         pte_present = 1;
227       }
228
229       current_page_addr += PAGE_SIZE;
230     }
231
232     if (pte_present == 0) { 
233       V3_FreePage(V3_PAddr(pte));
234
235       pde[i].present = 0;
236       pde[i].writable = 0;
237       pde[i].user_page = 0;
238       pde[i].write_through = 0;
239       pde[i].cache_disable = 0;
240       pde[i].accessed = 0;
241       pde[i].reserved = 0;
242       pde[i].large_page = 0;
243       pde[i].global_page = 0;
244       pde[i].vmm_info = 0;
245       pde[i].pt_base_addr = 0;
246     } else {
247       pde[i].present = 1;
248       pde[i].writable = 1;
249       pde[i].user_page = 1;
250       pde[i].write_through = 0;
251       pde[i].cache_disable = 0;
252       pde[i].accessed = 0;
253       pde[i].reserved = 0;
254       pde[i].large_page = 0;
255       pde[i].global_page = 0;
256       pde[i].vmm_info = 0;
257       pde[i].pt_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pte));
258     }
259
260   }
261
262   return pde;
263 }
264
265
266 /* We generate a page table to correspond to a given memory layout
267  * pulling pages from the mem_list when necessary
268  * If there are any gaps in the layout, we add them as unmapped pages
269  */
270 pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
271   addr_t current_page_addr = 0;
272   int i, j, k;
273   struct shadow_map * map = &(guest_info->mem_map);
274
275   pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
276   memset(pdpe, 0, PAGE_SIZE);
277
278   for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
279     int pde_present = 0;
280     pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
281
282     for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
283
284
285       int pte_present = 0;
286       pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
287       
288       
289       for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
290         struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
291         
292         if (!region || 
293             (region->host_type == HOST_REGION_HOOK) || 
294             (region->host_type == HOST_REGION_UNALLOCATED) || 
295             (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) || 
296             (region->host_type == HOST_REGION_REMOTE) ||
297             (region->host_type == HOST_REGION_SWAPPED)) {
298           pte[k].present = 0;
299           pte[k].writable = 0;
300           pte[k].user_page = 0;
301           pte[k].write_through = 0;
302           pte[k].cache_disable = 0;
303           pte[k].accessed = 0;
304           pte[k].dirty = 0;
305           pte[k].pte_attr = 0;
306           pte[k].global_page = 0;
307           pte[k].vmm_info = 0;
308           pte[k].page_base_addr = 0;
309           pte[k].rsvd = 0;
310         } else {
311           addr_t host_addr;
312           pte[k].present = 1;
313           pte[k].writable = 1;
314           pte[k].user_page = 1;
315           pte[k].write_through = 0;
316           pte[k].cache_disable = 0;
317           pte[k].accessed = 0;
318           pte[k].dirty = 0;
319           pte[k].pte_attr = 0;
320           pte[k].global_page = 0;
321           pte[k].vmm_info = 0;
322           
323           if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
324             // BIG ERROR
325             // PANIC
326             return NULL;
327           }
328           
329           pte[k].page_base_addr = host_addr >> 12;
330           pte[k].rsvd = 0;
331
332           pte_present = 1;
333         }
334         
335         current_page_addr += PAGE_SIZE;
336       }
337       
338       if (pte_present == 0) { 
339         V3_FreePage(V3_PAddr(pte));
340         
341         pde[j].present = 0;
342         pde[j].writable = 0;
343         pde[j].user_page = 0;
344         pde[j].write_through = 0;
345         pde[j].cache_disable = 0;
346         pde[j].accessed = 0;
347         pde[j].avail = 0;
348         pde[j].large_page = 0;
349         pde[j].global_page = 0;
350         pde[j].vmm_info = 0;
351         pde[j].pt_base_addr = 0;
352         pde[j].rsvd = 0;
353       } else {
354         pde[j].present = 1;
355         pde[j].writable = 1;
356         pde[j].user_page = 1;
357         pde[j].write_through = 0;
358         pde[j].cache_disable = 0;
359         pde[j].accessed = 0;
360         pde[j].avail = 0;
361         pde[j].large_page = 0;
362         pde[j].global_page = 0;
363         pde[j].vmm_info = 0;
364         pde[j].pt_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pte));
365         pde[j].rsvd = 0;
366
367         pde_present = 1;
368       }
369       
370     }
371     
372     if (pde_present == 0) { 
373       V3_FreePage(V3_PAddr(pde));
374       
375       pdpe[i].present = 0;
376       pdpe[i].rsvd = 0;
377       pdpe[i].write_through = 0;
378       pdpe[i].cache_disable = 0;
379       pdpe[i].accessed = 0;
380       pdpe[i].avail = 0;
381       pdpe[i].rsvd2 = 0;
382       pdpe[i].vmm_info = 0;
383       pdpe[i].pd_base_addr = 0;
384       pdpe[i].rsvd3 = 0;
385     } else {
386       pdpe[i].present = 1;
387       pdpe[i].rsvd = 0;
388       pdpe[i].write_through = 0;
389       pdpe[i].cache_disable = 0;
390       pdpe[i].accessed = 0;
391       pdpe[i].avail = 0;
392       pdpe[i].rsvd2 = 0;
393       pdpe[i].vmm_info = 0;
394       pdpe[i].pd_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pde));
395       pdpe[i].rsvd3 = 0;
396     }
397     
398   }
399
400
401   return pdpe;
402 }
403
404
405
406
407
408
409 pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
410   addr_t current_page_addr = 0;
411   int i, j, k, m;
412   struct shadow_map * map = &(info->mem_map);
413   
414   pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
415
416   for (i = 0; i < 1; i++) {
417     int pdpe_present = 0;
418     pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
419
420     for (j = 0; j < 20; j++) {
421       int pde_present = 0;
422       pde64_t * pde = V3_VAddr(V3_AllocPages(1));
423
424       for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
425         int pte_present = 0;
426         pte64_t * pte = V3_VAddr(V3_AllocPages(1));
427
428
429         for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
430           struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
431           
432
433           
434           if (!region || 
435               (region->host_type == HOST_REGION_HOOK) || 
436               (region->host_type == HOST_REGION_UNALLOCATED) || 
437               (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) || 
438               (region->host_type == HOST_REGION_REMOTE) ||
439               (region->host_type == HOST_REGION_SWAPPED)) {
440             pte[m].present = 0;
441             pte[m].writable = 0;
442             pte[m].user_page = 0;
443             pte[m].write_through = 0;
444             pte[m].cache_disable = 0;
445             pte[m].accessed = 0;
446             pte[m].dirty = 0;
447             pte[m].pte_attr = 0;
448             pte[m].global_page = 0;
449             pte[m].vmm_info = 0;
450             pte[m].page_base_addr = 0;
451           } else {
452             addr_t host_addr;
453             pte[m].present = 1;
454             pte[m].writable = 1;
455             pte[m].user_page = 1;
456             pte[m].write_through = 0;
457             pte[m].cache_disable = 0;
458             pte[m].accessed = 0;
459             pte[m].dirty = 0;
460             pte[m].pte_attr = 0;
461             pte[m].global_page = 0;
462             pte[m].vmm_info = 0;
463             
464             if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
465               // BIG ERROR
466               // PANIC
467               return NULL;
468             }
469
470             pte[m].page_base_addr = PTE64_BASE_ADDR(host_addr);
471
472             //PrintPTE64(current_page_addr, &(pte[m]));
473
474             pte_present = 1;      
475           }
476
477
478
479
480           current_page_addr += PAGE_SIZE;
481         }
482         
483         if (pte_present == 0) {
484           V3_FreePage(V3_PAddr(pte));
485
486           pde[k].present = 0;
487           pde[k].writable = 0;
488           pde[k].user_page = 0;
489           pde[k].write_through = 0;
490           pde[k].cache_disable = 0;
491           pde[k].accessed = 0;
492           pde[k].reserved = 0;
493           pde[k].large_page = 0;
494           //pde[k].global_page = 0;
495           pde[k].vmm_info = 0;
496           pde[k].pt_base_addr = 0;
497         } else {
498           pde[k].present = 1;
499           pde[k].writable = 1;
500           pde[k].user_page = 1;
501           pde[k].write_through = 0;
502           pde[k].cache_disable = 0;
503           pde[k].accessed = 0;
504           pde[k].reserved = 0;
505           pde[k].large_page = 0;
506           //pde[k].global_page = 0;
507           pde[k].vmm_info = 0;
508           pde[k].pt_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pte));
509
510           pde_present = 1;
511         }
512       }
513
514       if (pde_present == 0) {
515         V3_FreePage(V3_PAddr(pde));
516         
517         pdpe[j].present = 0;
518         pdpe[j].writable = 0;
519         pdpe[j].user_page = 0;
520         pdpe[j].write_through = 0;
521         pdpe[j].cache_disable = 0;
522         pdpe[j].accessed = 0;
523         pdpe[j].reserved = 0;
524         pdpe[j].large_page = 0;
525         //pdpe[j].global_page = 0;
526         pdpe[j].vmm_info = 0;
527         pdpe[j].pd_base_addr = 0;
528       } else {
529         pdpe[j].present = 1;
530         pdpe[j].writable = 1;
531         pdpe[j].user_page = 1;
532         pdpe[j].write_through = 0;
533         pdpe[j].cache_disable = 0;
534         pdpe[j].accessed = 0;
535         pdpe[j].reserved = 0;
536         pdpe[j].large_page = 0;
537         //pdpe[j].global_page = 0;
538         pdpe[j].vmm_info = 0;
539         pdpe[j].pd_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pde));
540
541
542         pdpe_present = 1;
543       }
544
545     }
546
547     PrintDebug("PML index=%d\n", i);
548
549     if (pdpe_present == 0) {
550       V3_FreePage(V3_PAddr(pdpe));
551       
552       pml[i].present = 0;
553       pml[i].writable = 0;
554       pml[i].user_page = 0;
555       pml[i].write_through = 0;
556       pml[i].cache_disable = 0;
557       pml[i].accessed = 0;
558       pml[i].reserved = 0;
559       //pml[i].large_page = 0;
560       //pml[i].global_page = 0;
561       pml[i].vmm_info = 0;
562       pml[i].pdp_base_addr = 0;
563     } else {
564       pml[i].present = 1;
565       pml[i].writable = 1;
566       pml[i].user_page = 1;
567       pml[i].write_through = 0;
568       pml[i].cache_disable = 0;
569       pml[i].accessed = 0;
570       pml[i].reserved = 0;
571       //pml[i].large_page = 0;
572       //pml[i].global_page = 0;
573       pml[i].vmm_info = 0;
574       pml[i].pdp_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pdpe));
575     }
576   }
577
578   return pml;
579 }
580
581
582
583
584
585
586
587
588 void PrintPDE32(addr_t virtual_address, pde32_t * pde)
589 {
590   PrintDebug("PDE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
591              (void *)virtual_address,
592              (void *)(addr_t) (pde->pt_base_addr << PAGE_POWER),
593              pde->present,
594              pde->writable,
595              pde->user_page, 
596              pde->write_through,
597              pde->cache_disable,
598              pde->accessed,
599              pde->reserved,
600              pde->large_page,
601              pde->global_page,
602              pde->vmm_info);
603 }
604
605   
606 void PrintPTE32(addr_t virtual_address, pte32_t * pte)
607 {
608   PrintDebug("PTE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
609              (void *)virtual_address,
610              (void*)(addr_t)(pte->page_base_addr << PAGE_POWER),
611              pte->present,
612              pte->writable,
613              pte->user_page,
614              pte->write_through,
615              pte->cache_disable,
616              pte->accessed,
617              pte->dirty,
618              pte->pte_attr,
619              pte->global_page,
620              pte->vmm_info);
621 }
622
623
624
625
626
627
628
629
630
631
632 void PrintPD32(pde32_t * pde)
633 {
634   int i;
635
636   PrintDebug("Page Directory at %p:\n", pde);
637   for (i = 0; (i < MAX_PDE32_ENTRIES); i++) { 
638     if ( pde[i].present) {
639       PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
640     }
641   }
642 }
643
644 void PrintPT32(addr_t starting_address, pte32_t * pte) 
645 {
646   int i;
647
648   PrintDebug("Page Table at %p:\n", pte);
649   for (i = 0; (i < MAX_PTE32_ENTRIES) ; i++) { 
650     if (pte[i].present) {
651       PrintPTE32(starting_address + (PAGE_SIZE * i), &(pte[i]));
652     }
653   }
654 }
655
656
657
658
659
660
661
662 void PrintDebugPageTables(pde32_t * pde)
663 {
664   int i;
665   
666   PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
667
668   for (i = 0; (i < MAX_PDE32_ENTRIES); i++) { 
669     if (pde[i].present) {
670       PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
671       PrintPT32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), (pte32_t *)V3_VAddr((void *)(addr_t)(pde[i].pt_base_addr << PAGE_POWER)));
672     }
673   }
674 }
675     
676
677
678
679
680
681
682
683 void PrintPDPE32PAE(addr_t virtual_address, pdpe32pae_t * pdpe)
684 {
685   PrintDebug("PDPE %p -> %p : present=%x, wt=%x, cd=%x, accessed=%x, kernelInfo=%x\n",
686              (void *)virtual_address,
687              (void *)(addr_t) (pdpe->pd_base_addr << PAGE_POWER),
688              pdpe->present,
689              pdpe->write_through,
690              pdpe->cache_disable,
691              pdpe->accessed,
692              pdpe->vmm_info);
693 }
694
695 void PrintPDE32PAE(addr_t virtual_address, pde32pae_t * pde)
696 {
697   PrintDebug("PDE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
698              (void *)virtual_address,
699              (void *)(addr_t) (pde->pt_base_addr << PAGE_POWER),
700              pde->present,
701              pde->writable,
702              pde->user_page, 
703              pde->write_through,
704              pde->cache_disable,
705              pde->accessed,
706              pde->large_page,
707              pde->global_page,
708              pde->vmm_info);
709 }
710
711   
712 void PrintPTE32PAE(addr_t virtual_address, pte32pae_t * pte)
713 {
714   PrintDebug("PTE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
715              (void *)virtual_address,
716              (void*)(addr_t)(pte->page_base_addr << PAGE_POWER),
717              pte->present,
718              pte->writable,
719              pte->user_page,
720              pte->write_through,
721              pte->cache_disable,
722              pte->accessed,
723              pte->dirty,
724              pte->pte_attr,
725              pte->global_page,
726              pte->vmm_info);
727 }
728
729
730
731
732
733
734 void PrintDebugPageTables32PAE(pdpe32pae_t * pdpe)
735 {
736   int i, j, k;
737   pde32pae_t * pde;
738   pte32pae_t * pte;
739   addr_t virtual_addr = 0;
740
741   PrintDebug("Dumping the pages starting with the pde page at %p\n", pdpe);
742
743   for (i = 0; (i < MAX_PDPE32PAE_ENTRIES); i++) { 
744
745     if (pdpe[i].present) {
746       pde = (pde32pae_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(pdpe[i].pd_base_addr));
747
748       PrintPDPE32PAE(virtual_addr, &(pdpe[i]));
749
750       for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
751
752         if (pde[j].present) {
753           pte = (pte32pae_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(pde[j].pt_base_addr));
754
755           PrintPDE32PAE(virtual_addr, &(pde[j]));
756
757           for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
758             if (pte[k].present) {
759               PrintPTE32PAE(virtual_addr, &(pte[k]));
760             }
761
762             virtual_addr += PAGE_SIZE;
763           }
764         } else {
765           virtual_addr += PAGE_SIZE * MAX_PTE32PAE_ENTRIES;
766         }
767       }
768     } else {
769       virtual_addr += PAGE_SIZE * MAX_PDE32PAE_ENTRIES * MAX_PTE32PAE_ENTRIES;
770     }
771   }
772 }
773     
774
775
776 void PrintPML4e64(addr_t virtual_address, pml4e64_t * pml)
777 {
778   PrintDebug("PML4e64 %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, kernelInfo=%x\n",
779              (void *)virtual_address,
780              (void *)(addr_t) (BASE_TO_PAGE_ADDR(pml->pdp_base_addr)),
781              pml->present,
782              pml->writable,
783              pml->user_page, 
784              pml->write_through,
785              pml->cache_disable,
786              pml->accessed,
787              pml->reserved,
788              pml->vmm_info);
789 }
790
791 void PrintPDPE64(addr_t virtual_address, pdpe64_t * pdpe)
792 {
793   PrintDebug("PDPE64 %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
794              (void *)virtual_address,
795              (void *)(addr_t) (BASE_TO_PAGE_ADDR(pdpe->pd_base_addr)),
796              pdpe->present,
797              pdpe->writable,
798              pdpe->user_page, 
799              pdpe->write_through,
800              pdpe->cache_disable,
801              pdpe->accessed,
802              pdpe->reserved,
803              pdpe->large_page,
804              0,//pdpe->global_page,
805              pdpe->vmm_info);
806 }
807
808
809
810 void PrintPDE64(addr_t virtual_address, pde64_t * pde)
811 {
812   PrintDebug("PDE64 %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
813              (void *)virtual_address,
814              (void *)(addr_t) (BASE_TO_PAGE_ADDR(pde->pt_base_addr)),
815              pde->present,
816              pde->writable,
817              pde->user_page, 
818              pde->write_through,
819              pde->cache_disable,
820              pde->accessed,
821              pde->reserved,
822              pde->large_page,
823              0,//pde->global_page,
824              pde->vmm_info);
825 }
826
827   
828 void PrintPTE64(addr_t virtual_address, pte64_t * pte)
829 {
830   PrintDebug("PTE64 %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
831              (void *)virtual_address,
832              (void*)(addr_t)(BASE_TO_PAGE_ADDR(pte->page_base_addr)),
833              pte->present,
834              pte->writable,
835              pte->user_page,
836              pte->write_through,
837              pte->cache_disable,
838              pte->accessed,
839              pte->dirty,
840              pte->pte_attr,
841              pte->global_page,
842              pte->vmm_info);
843 }
844
845   
846
847
848
849 void PrintPageTree_64(addr_t virtual_addr, pml4e64_t * pml) {
850   uint_t pml4_index = PML4E64_INDEX(virtual_addr);
851   uint_t pdpe_index = PDPE64_INDEX(virtual_addr);
852   uint_t pde_index = PDE64_INDEX(virtual_addr);
853   uint_t pte_index = PTE64_INDEX(virtual_addr);
854
855   PrintPML4e64(virtual_addr, &(pml[pml4_index]));
856   if (pml[pml4_index].present) {
857     pdpe64_t * pdpe = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(pml[pml4_index].pdp_base_addr));
858     PrintPDPE64(virtual_addr, &(pdpe[pdpe_index]));
859
860     if (pdpe[pdpe_index].present) {
861       pde64_t * pde = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr));
862       PrintPDE64(virtual_addr, &(pde[pde_index]));
863       
864       if (pde[pde_index].present) {
865         pte64_t * pte = (pte64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr));
866         PrintPTE64(virtual_addr, &(pte[pte_index]));
867       }
868
869     }
870
871   }
872
873 }
874
875
876
877
878 void PrintPageTree(v3_vm_cpu_mode_t cpu_mode, addr_t virtual_addr, addr_t cr3) {
879   switch (cpu_mode) {
880   case LONG:
881   case LONG_32_COMPAT:
882   case LONG_16_COMPAT:
883     PrintPageTree_64(virtual_addr, CR3_TO_PML4E64(cr3));
884     break;
885   default:
886     PrintError("Unsupported CPU MODE %d\n", cpu_mode);
887     break;
888   }
889 }