2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
27 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry);
28 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry);
30 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry);
31 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry);
32 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry);
34 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry);
35 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry);
36 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry);
37 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry);
42 #define USE_VMM_PAGING_DEBUG
43 // All of the debug functions defined in vmm_paging.h are implemented in this file
44 #include "vmm_paging_debug.h"
45 #undef USE_VMM_PAGING_DEBUG
49 #ifndef DEBUG_SHADOW_PAGING
51 #define PrintDebug(fmt, args...)
56 void delete_page_tables_32(pde32_t * pde) {
62 PrintDebug("Deleting Page Tables -- PDE (%p)\n", pde);
64 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
66 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
68 pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
76 V3_FreePage(V3_PAddr(pde));
79 void delete_page_tables_32PAE(pdpe32pae_t * pdpe) {
80 PrintError("Unimplemented function\n");
83 void delete_page_tables_64(pml4e64_t * pml4) {
84 PrintError("Unimplemented function\n");
90 static int translate_pt_32_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
91 addr_t * paddr = (addr_t *)private_data;
98 *paddr = page_pa + PAGE_OFFSET_4MB(vaddr);
101 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
104 PrintError("Inavlid page type (%s) in tranlate pt 32 callback\n", v3_page_type_to_str(type));
109 static int translate_pt_32pae_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
110 addr_t * paddr = (addr_t *)private_data;
118 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
121 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
124 PrintError("Inavlid page type (%s) in translate pt 32pae callback\n", v3_page_type_to_str(type));
129 static int translate_pt_64_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
130 addr_t * paddr = (addr_t *)private_data;
139 *paddr = page_pa + PAGE_OFFSET_1GB(vaddr);
142 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
145 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
148 PrintError("Inavlid page type (%s) in translate pt 64 callback\n", v3_page_type_to_str(type));
154 int v3_translate_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
155 return v3_drill_host_pt_32(info, host_cr3, vaddr, translate_pt_32_cb, paddr);
157 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
158 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, translate_pt_32_cb, paddr);
162 int v3_translate_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
163 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, translate_pt_32pae_cb, paddr);
165 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
166 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, translate_pt_32pae_cb, paddr);
170 int v3_translate_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
171 return v3_drill_host_pt_64(info, host_cr3, vaddr, translate_pt_64_cb, paddr);
173 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
174 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, translate_pt_64_cb, paddr);
179 struct pt_find_data {
181 addr_t * pt_page_ptr;
185 static int find_pt_cb(struct guest_info * info, page_type_t type, addr_t vaddr,
186 addr_t page_ptr, addr_t page_pa, void * private_data) {
187 struct pt_find_data * pt_data = (struct pt_find_data *)private_data;
189 PrintDebug("FIND_PT Type=%s, page_pa = %p\n",
190 v3_page_type_to_str(type),
193 if (type == pt_data->type) {
194 *(pt_data->pt_page_ptr) = page_ptr;
195 *(pt_data->pt_page_pa) = page_pa;
203 int v3_find_host_pt_32_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
204 addr_t * page_ptr, addr_t * page_pa) {
205 struct pt_find_data data;
208 data.pt_page_ptr = page_ptr;
209 data.pt_page_pa = page_pa;
211 return v3_drill_host_pt_32(info, host_cr3, vaddr, find_pt_cb, &data);
214 int v3_find_host_pt_32pae_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
215 addr_t * page_ptr, addr_t * page_pa) {
216 struct pt_find_data data;
219 data.pt_page_ptr = page_ptr;
220 data.pt_page_pa = page_pa;
222 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, find_pt_cb, &data);
225 int v3_find_host_pt_64_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
226 addr_t * page_ptr, addr_t * page_pa) {
227 struct pt_find_data data;
230 data.pt_page_ptr = page_ptr;
231 data.pt_page_pa = page_pa;
233 return v3_drill_host_pt_64(info, host_cr3, vaddr, find_pt_cb, &data);
235 int v3_find_guest_pt_32_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
236 addr_t * page_ptr, addr_t * page_pa) {
237 struct pt_find_data data;
240 data.pt_page_ptr = page_ptr;
241 data.pt_page_pa = page_pa;
243 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, find_pt_cb, &data);
246 int v3_find_guest_pt_32pae_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
247 addr_t * page_ptr, addr_t * page_pa) {
248 struct pt_find_data data;
251 data.pt_page_ptr = page_ptr;
252 data.pt_page_pa = page_pa;
254 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, find_pt_cb, &data);
257 int v3_find_guest_pt_64_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
258 addr_t * page_ptr, addr_t * page_pa) {
259 struct pt_find_data data;
262 data.pt_page_ptr = page_ptr;
263 data.pt_page_pa = page_pa;
265 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, find_pt_cb, &data);
271 * Page Table Access Checks
276 struct pt_check_data {
277 pf_error_t access_type;
278 pt_access_status_t * access_status;
281 static int check_pt_32_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
282 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
286 *(chk_data->access_status) = v3_can_access_pde32((pde32_t *)page_ptr, vaddr, chk_data->access_type);
289 *(chk_data->access_status) = v3_can_access_pte32((pte32_t *)page_ptr, vaddr, chk_data->access_type);
295 PrintError("Inavlid page type (%s) in check pt 32 callback\n", v3_page_type_to_str(type));
299 if (chk_data->access_status != PT_ACCESS_OK) {
307 static int check_pt_32pae_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
308 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
312 *(chk_data->access_status) = v3_can_access_pdpe32pae((pdpe32pae_t *)page_ptr, vaddr, chk_data->access_type);
315 *(chk_data->access_status) = v3_can_access_pde32pae((pde32pae_t *)page_ptr, vaddr, chk_data->access_type);
318 *(chk_data->access_status) = v3_can_access_pte32pae((pte32pae_t *)page_ptr, vaddr, chk_data->access_type);
324 PrintError("Inavlid page type (%s) in check pt 32pae callback\n", v3_page_type_to_str(type));
328 if (chk_data->access_status != PT_ACCESS_OK) {
336 static int check_pt_64_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
337 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
341 *(chk_data->access_status) = v3_can_access_pml4e64((pml4e64_t *)page_ptr, vaddr, chk_data->access_type);
344 *(chk_data->access_status) = v3_can_access_pdpe64((pdpe64_t *)page_ptr, vaddr, chk_data->access_type);
347 *(chk_data->access_status) = v3_can_access_pde64((pde64_t *)page_ptr, vaddr, chk_data->access_type);
350 *(chk_data->access_status) = v3_can_access_pte64((pte64_t *)page_ptr, vaddr, chk_data->access_type);
357 PrintError("Inavlid page type (%s) in check pt 64 callback\n", v3_page_type_to_str(type));
361 if (chk_data->access_status != PT_ACCESS_OK) {
370 int v3_check_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
371 struct pt_check_data access_data;
373 access_data.access_type = access_type;
374 access_data.access_status = access_status;
376 return v3_drill_host_pt_32(info, host_cr3, vaddr, check_pt_32_cb, &access_data);
379 int v3_check_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
380 struct pt_check_data access_data;
382 access_data.access_type = access_type;
383 access_data.access_status = access_status;
385 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, check_pt_32pae_cb, &access_data);
390 int v3_check_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
391 struct pt_check_data access_data;
393 access_data.access_type = access_type;
394 access_data.access_status = access_status;
396 return v3_drill_host_pt_64(info, host_cr3, vaddr, check_pt_64_cb, &access_data);
401 int v3_check_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
402 pf_error_t access_type, pt_access_status_t * access_status) {
403 struct pt_check_data access_data;
405 access_data.access_type = access_type;
406 access_data.access_status = access_status;
408 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, check_pt_32_cb, &access_data);
415 int v3_check_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
416 pf_error_t access_type, pt_access_status_t * access_status) {
417 struct pt_check_data access_data;
419 access_data.access_type = access_type;
420 access_data.access_status = access_status;
422 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, check_pt_32pae_cb, &access_data);
427 int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
428 pf_error_t access_type, pt_access_status_t * access_status) {
429 struct pt_check_data access_data;
431 access_data.access_type = access_type;
432 access_data.access_status = access_status;
434 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, check_pt_64_cb, &access_data);
438 static int get_data_page_type_cb(struct guest_info * info, page_type_t type,
439 addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
453 page_type_t v3_get_guest_data_page_type_32(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
454 return v3_drill_guest_pt_32(info, cr3, vaddr, get_data_page_type_cb, NULL);
456 page_type_t v3_get_guest_data_page_type_32pae(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
457 return v3_drill_guest_pt_32pae(info, cr3, vaddr, get_data_page_type_cb, NULL);
459 page_type_t v3_get_guest_data_page_type_64(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
460 return v3_drill_guest_pt_64(info, cr3, vaddr, get_data_page_type_cb, NULL);
462 page_type_t v3_get_host_data_page_type_32(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
463 return v3_drill_host_pt_32(info, cr3, vaddr, get_data_page_type_cb, NULL);
465 page_type_t v3_get_host_data_page_type_32pae(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
466 return v3_drill_host_pt_32pae(info, cr3, vaddr, get_data_page_type_cb, NULL);
468 page_type_t v3_get_host_data_page_type_64(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
469 return v3_drill_host_pt_64(info, cr3, vaddr, get_data_page_type_cb, NULL);
474 * PAGE TABLE LOOKUP FUNCTIONS
476 * The value of entry is a return type:
477 * Page not present: *entry = 0
482 * 32 bit Page Table lookup functions
486 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
487 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
489 if (!pde_entry->present) {
491 return PT_ENTRY_NOT_PRESENT;
492 } else if (pde_entry->large_page) {
493 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
495 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
497 return PT_ENTRY_LARGE_PAGE;
499 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
500 return PT_ENTRY_PAGE;
506 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
508 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
509 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
511 if (!pte_entry->present) {
513 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
514 return PT_ENTRY_NOT_PRESENT;
516 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
518 return PT_ENTRY_PAGE;
527 * 32 bit PAE Page Table lookup functions
530 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
531 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
533 if (!pdpe_entry->present) {
535 return PT_ENTRY_NOT_PRESENT;
537 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
538 return PT_ENTRY_PAGE;
542 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
543 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
545 if (!pde_entry->present) {
547 return PT_ENTRY_NOT_PRESENT;
548 } else if (pde_entry->large_page) {
549 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
551 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
553 return PT_ENTRY_LARGE_PAGE;
555 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
556 return PT_ENTRY_PAGE;
560 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
561 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
563 if (!pte_entry->present) {
565 return PT_ENTRY_NOT_PRESENT;
567 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
568 return PT_ENTRY_PAGE;
576 * 64 bit Page Table lookup functions
579 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
580 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
582 if (!pml_entry->present) {
584 return PT_ENTRY_NOT_PRESENT;
586 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
587 return PT_ENTRY_PAGE;
591 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
592 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
594 if (!pdpe_entry->present) {
596 return PT_ENTRY_NOT_PRESENT;
597 } else if (pdpe_entry->large_page) {
598 PrintError("1 Gigabyte pages not supported\n");
602 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
603 return PT_ENTRY_PAGE;
607 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
608 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
610 if (!pde_entry->present) {
612 return PT_ENTRY_NOT_PRESENT;
613 } else if (pde_entry->large_page) {
614 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
616 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
618 return PT_ENTRY_LARGE_PAGE;
620 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
621 return PT_ENTRY_PAGE;
625 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
626 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
628 if (!pte_entry->present) {
630 return PT_ENTRY_NOT_PRESENT;
632 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
633 return PT_ENTRY_PAGE;
640 static pt_access_status_t can_access_pt_entry(gen_pt_t * pt, pf_error_t access_type) {
641 if (pt->present == 0) {
642 return PT_ACCESS_NOT_PRESENT;
643 } else if ((pt->writable == 0) && (access_type.write == 1)) {
644 return PT_ACCESS_WRITE_ERROR;
645 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
647 return PT_ACCESS_USER_ERROR;
656 * 32 bit access checks
658 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
659 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
660 return can_access_pt_entry(entry, access_type);
663 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
664 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
665 return can_access_pt_entry(entry, access_type);
670 * 32 bit PAE access checks
672 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
673 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
674 return can_access_pt_entry(entry, access_type);
677 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
678 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
679 return can_access_pt_entry(entry, access_type);
682 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
683 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
684 return can_access_pt_entry(entry, access_type);
688 * 64 Bit access checks
690 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
691 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
692 return can_access_pt_entry(entry, access_type);
695 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
696 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
697 return can_access_pt_entry(entry, access_type);
700 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
701 gen_pt_t * entry = (gen_pt_t *)&pde[PDE64_INDEX(addr)];
702 return can_access_pt_entry(entry, access_type);
705 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
706 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
707 return can_access_pt_entry(entry, access_type);
719 /* We generate a page table to correspond to a given memory layout
720 * pulling pages from the mem_list when necessary
721 * If there are any gaps in the layout, we add them as unmapped pages
723 pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
724 addr_t current_page_addr = 0;
727 pde32_t * pde = V3_VAddr(V3_AllocPages(1));
729 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
731 pte32_t * pte = V3_VAddr(V3_AllocPages(1));
734 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
735 struct v3_shadow_region * region = v3_get_shadow_region(guest_info, current_page_addr);
738 (region->host_type == SHDW_REGION_FULL_HOOK)) {
741 pte[j].user_page = 0;
742 pte[j].write_through = 0;
743 pte[j].cache_disable = 0;
747 pte[j].global_page = 0;
749 pte[j].page_base_addr = 0;
754 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
756 PrintDebug("Marking Write hook host_addr %p as RO\n", (void *)current_page_addr);
761 pte[j].user_page = 1;
762 pte[j].write_through = 0;
763 pte[j].cache_disable = 0;
767 pte[j].global_page = 0;
770 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
776 pte[j].page_base_addr = host_addr >> 12;
781 current_page_addr += PAGE_SIZE;
784 if (pte_present == 0) {
785 V3_FreePage(V3_PAddr(pte));
789 pde[i].user_page = 0;
790 pde[i].write_through = 0;
791 pde[i].cache_disable = 0;
794 pde[i].large_page = 0;
795 pde[i].global_page = 0;
797 pde[i].pt_base_addr = 0;
801 pde[i].user_page = 1;
802 pde[i].write_through = 0;
803 pde[i].cache_disable = 0;
806 pde[i].large_page = 0;
807 pde[i].global_page = 0;
809 pde[i].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
818 /* We generate a page table to correspond to a given memory layout
819 * pulling pages from the mem_list when necessary
820 * If there are any gaps in the layout, we add them as unmapped pages
822 pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
823 addr_t current_page_addr = 0;
826 pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
827 memset(pdpe, 0, PAGE_SIZE);
829 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
831 pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
833 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
837 pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
840 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
841 struct v3_shadow_region * region = v3_get_shadow_region(guest_info, current_page_addr);
844 (region->host_type == SHDW_REGION_FULL_HOOK)) {
847 pte[k].user_page = 0;
848 pte[k].write_through = 0;
849 pte[k].cache_disable = 0;
853 pte[k].global_page = 0;
855 pte[k].page_base_addr = 0;
861 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
867 pte[k].user_page = 1;
868 pte[k].write_through = 0;
869 pte[k].cache_disable = 0;
873 pte[k].global_page = 0;
876 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
882 pte[k].page_base_addr = host_addr >> 12;
888 current_page_addr += PAGE_SIZE;
891 if (pte_present == 0) {
892 V3_FreePage(V3_PAddr(pte));
896 pde[j].user_page = 0;
897 pde[j].write_through = 0;
898 pde[j].cache_disable = 0;
901 pde[j].large_page = 0;
902 pde[j].global_page = 0;
904 pde[j].pt_base_addr = 0;
909 pde[j].user_page = 1;
910 pde[j].write_through = 0;
911 pde[j].cache_disable = 0;
914 pde[j].large_page = 0;
915 pde[j].global_page = 0;
917 pde[j].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
925 if (pde_present == 0) {
926 V3_FreePage(V3_PAddr(pde));
930 pdpe[i].write_through = 0;
931 pdpe[i].cache_disable = 0;
932 pdpe[i].accessed = 0;
935 pdpe[i].vmm_info = 0;
936 pdpe[i].pd_base_addr = 0;
941 pdpe[i].write_through = 0;
942 pdpe[i].cache_disable = 0;
943 pdpe[i].accessed = 0;
946 pdpe[i].vmm_info = 0;
947 pdpe[i].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
962 pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
963 addr_t current_page_addr = 0;
966 pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
968 for (i = 0; i < 1; i++) {
969 int pdpe_present = 0;
970 pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
972 for (j = 0; j < 20; j++) {
974 pde64_t * pde = V3_VAddr(V3_AllocPages(1));
976 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
978 pte64_t * pte = V3_VAddr(V3_AllocPages(1));
981 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
982 struct v3_shadow_region * region = v3_get_shadow_region(info, current_page_addr);
987 (region->host_type == SHDW_REGION_FULL_HOOK)) {
990 pte[m].user_page = 0;
991 pte[m].write_through = 0;
992 pte[m].cache_disable = 0;
996 pte[m].global_page = 0;
998 pte[m].page_base_addr = 0;
1003 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
1004 pte[m].writable = 0;
1006 pte[m].writable = 1;
1009 pte[m].user_page = 1;
1010 pte[m].write_through = 0;
1011 pte[m].cache_disable = 0;
1012 pte[m].accessed = 0;
1014 pte[m].pte_attr = 0;
1015 pte[m].global_page = 0;
1016 pte[m].vmm_info = 0;
1018 if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
1024 pte[m].page_base_addr = PAGE_BASE_ADDR(host_addr);
1026 //PrintPTE64(current_page_addr, &(pte[m]));
1034 current_page_addr += PAGE_SIZE;
1037 if (pte_present == 0) {
1038 V3_FreePage(V3_PAddr(pte));
1041 pde[k].writable = 0;
1042 pde[k].user_page = 0;
1043 pde[k].write_through = 0;
1044 pde[k].cache_disable = 0;
1045 pde[k].accessed = 0;
1047 pde[k].large_page = 0;
1048 //pde[k].global_page = 0;
1049 pde[k].vmm_info = 0;
1050 pde[k].pt_base_addr = 0;
1053 pde[k].writable = 1;
1054 pde[k].user_page = 1;
1055 pde[k].write_through = 0;
1056 pde[k].cache_disable = 0;
1057 pde[k].accessed = 0;
1059 pde[k].large_page = 0;
1060 //pde[k].global_page = 0;
1061 pde[k].vmm_info = 0;
1062 pde[k].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1068 if (pde_present == 0) {
1069 V3_FreePage(V3_PAddr(pde));
1071 pdpe[j].present = 0;
1072 pdpe[j].writable = 0;
1073 pdpe[j].user_page = 0;
1074 pdpe[j].write_through = 0;
1075 pdpe[j].cache_disable = 0;
1076 pdpe[j].accessed = 0;
1078 pdpe[j].large_page = 0;
1079 //pdpe[j].global_page = 0;
1080 pdpe[j].vmm_info = 0;
1081 pdpe[j].pd_base_addr = 0;
1083 pdpe[j].present = 1;
1084 pdpe[j].writable = 1;
1085 pdpe[j].user_page = 1;
1086 pdpe[j].write_through = 0;
1087 pdpe[j].cache_disable = 0;
1088 pdpe[j].accessed = 0;
1090 pdpe[j].large_page = 0;
1091 //pdpe[j].global_page = 0;
1092 pdpe[j].vmm_info = 0;
1093 pdpe[j].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
1101 PrintDebug("PML index=%d\n", i);
1103 if (pdpe_present == 0) {
1104 V3_FreePage(V3_PAddr(pdpe));
1107 pml[i].writable = 0;
1108 pml[i].user_page = 0;
1109 pml[i].write_through = 0;
1110 pml[i].cache_disable = 0;
1111 pml[i].accessed = 0;
1112 pml[i].reserved = 0;
1113 //pml[i].large_page = 0;
1114 //pml[i].global_page = 0;
1115 pml[i].vmm_info = 0;
1116 pml[i].pdp_base_addr = 0;
1119 pml[i].writable = 1;
1120 pml[i].user_page = 1;
1121 pml[i].write_through = 0;
1122 pml[i].cache_disable = 0;
1123 pml[i].accessed = 0;
1124 pml[i].reserved = 0;
1125 //pml[i].large_page = 0;
1126 //pml[i].global_page = 0;
1127 pml[i].vmm_info = 0;
1128 pml[i].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe));
1136 int v3_drill_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
1137 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1138 void * private_data) {
1139 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1140 addr_t host_pde_pa = CR3_TO_PDE32_PA(host_cr3);
1141 addr_t host_pte_pa = 0;
1145 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)host_pde, host_pde_pa, private_data)) != 0) {
1146 return (ret == -1) ? -1 : PAGE_PD32;
1149 switch (pde32_lookup(host_pde, vaddr, &host_pte_pa)) {
1150 case PT_ENTRY_NOT_PRESENT:
1152 case PT_ENTRY_LARGE_PAGE:
1153 if ((ret == callback(info, PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1154 return (ret == -1) ? -1 : PAGE_4MB;
1158 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1159 return (ret == -1) ? -1 : PAGE_PT32;
1162 if (pte32_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1165 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1166 return (ret == -1) ? -1 : PAGE_4KB;
1176 int v3_drill_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
1177 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1178 void * private_data) {
1179 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1180 addr_t host_pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1181 addr_t host_pde_pa = 0;
1182 addr_t host_pte_pa = 0;
1186 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, host_pdpe_pa, private_data)) != 0) {
1187 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1190 switch (pdpe32pae_lookup(host_pdpe, vaddr, &host_pde_pa)) {
1191 case PT_ENTRY_NOT_PRESENT:
1195 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1196 return (ret == -1) ? -1 : PAGE_PD32PAE;
1199 switch (pde32pae_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1200 case PT_ENTRY_NOT_PRESENT:
1202 case PT_ENTRY_LARGE_PAGE:
1203 if ((ret == callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1204 return (ret == -1) ? -1 : PAGE_2MB;
1208 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1209 return (ret == -1) ? -1 : PAGE_PT32PAE;
1212 if (pte32pae_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1215 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1216 return (ret == -1) ? -1 : PAGE_4KB;
1225 // should never get here
1230 int v3_drill_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
1231 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1232 void * private_data) {
1233 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1234 addr_t host_pmle_pa = CR3_TO_PML4E64_PA(host_cr3);
1235 addr_t host_pdpe_pa = 0;
1236 addr_t host_pde_pa = 0;
1237 addr_t host_pte_pa = 0;
1241 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)host_pmle, host_pmle_pa, private_data)) != 0) {
1242 return (ret == -1) ? -1 : PAGE_PML464;
1245 switch(pml4e64_lookup(host_pmle, vaddr, &host_pdpe_pa)) {
1246 case PT_ENTRY_NOT_PRESENT:
1250 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)V3_VAddr((void *)host_pdpe_pa), host_pdpe_pa, private_data)) != 0) {
1251 return (ret == -1) ? -1 : PAGE_PDP64;
1254 switch(pdpe64_lookup(V3_VAddr((void *)host_pdpe_pa), vaddr, &host_pde_pa)) {
1255 case PT_ENTRY_NOT_PRESENT:
1257 case PT_ENTRY_LARGE_PAGE:
1258 if ((ret == callback(info, PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data)) != 0) {
1259 return (ret == -1) ? -1 : PAGE_1GB;
1261 PrintError("1 Gigabyte Pages not supported\n");
1265 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1266 return (ret == -1) ? -1 : PAGE_PD64;
1269 switch (pde64_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1270 case PT_ENTRY_NOT_PRESENT:
1272 case PT_ENTRY_LARGE_PAGE:
1273 if ((ret == callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1274 return (ret == -1) ? -1 : PAGE_2MB;
1279 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1280 return (ret == -1) ? -1 : PAGE_PT64;
1283 if (pte64_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1286 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1287 return (ret == -1) ? -1 : PAGE_4KB;
1296 // should never get here
1306 int v3_drill_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1307 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1308 void * private_data) {
1309 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1310 pde32_t * guest_pde = NULL;
1311 addr_t guest_pte_pa = 0;
1315 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1316 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1317 (void *)guest_pde_pa);
1321 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1322 return (ret == -1) ? -1 : PAGE_PD32;
1325 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1326 case PT_ENTRY_NOT_PRESENT:
1328 case PT_ENTRY_LARGE_PAGE:
1330 addr_t large_page_pa = (addr_t)guest_pte_pa;
1331 addr_t large_page_va = 0;
1333 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1334 PrintError("Could not get virtual address of Guest Page 4MB (PA=%p)\n",
1335 (void *)large_page_va);
1340 if ((ret == callback(info, PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1341 return (ret == -1) ? -1 : PAGE_4MB;
1347 pte32_t * guest_pte = NULL;
1350 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
1351 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1352 (void *)guest_pte_pa);
1356 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1357 return (ret == -1) ? -1 : PAGE_PT32;
1360 if (pte32_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1365 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1366 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1371 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1372 return (ret == -1) ? -1 : PAGE_4KB;
1379 // should never get here
1380 PrintError("End of drill function (guest 32)... Should never have gotten here...\n");
1386 int v3_drill_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1387 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1388 void * private_data) {
1389 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1390 pdpe32pae_t * guest_pdpe = 0;
1391 addr_t guest_pde_pa = 0;
1394 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
1395 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1396 (void *)guest_pdpe_pa);
1400 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1401 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1404 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
1406 case PT_ENTRY_NOT_PRESENT:
1410 pde32pae_t * guest_pde = NULL;
1411 addr_t guest_pte_pa = 0;
1413 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1414 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
1415 (void *)guest_pde_pa);
1419 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1420 return (ret == -1) ? -1 : PAGE_PD32PAE;
1423 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
1425 case PT_ENTRY_NOT_PRESENT:
1427 case PT_ENTRY_LARGE_PAGE:
1429 addr_t large_page_pa = (addr_t)guest_pte_pa;
1430 addr_t large_page_va = 0;
1432 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1433 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1434 (void *)large_page_va);
1438 if ((ret == callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1439 return (ret == -1) ? -1 : PAGE_2MB;
1445 pte32pae_t * guest_pte = NULL;
1448 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1449 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
1450 (void *)guest_pte_pa);
1454 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1455 return (ret == -1) ? -1 : PAGE_PT32PAE;
1458 if (pte32pae_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1463 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1464 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1469 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1470 return (ret == -1) ? -1 : PAGE_4KB;
1478 PrintError("Invalid page type for PD32PAE\n");
1482 // should never get here
1483 PrintError("End of drill function (guest 32pae)... Should never have gotten here...\n");
1487 int v3_drill_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1488 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1489 void * private_data) {
1490 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
1491 pml4e64_t * guest_pmle = 0;
1492 addr_t guest_pdpe_pa = 0;
1495 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
1496 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
1497 (void *)guest_pml4_pa);
1501 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)guest_pmle, guest_pml4_pa, private_data)) != 0) {
1502 return (ret == -1) ? -1 : PAGE_PML464;
1505 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
1506 case PT_ENTRY_NOT_PRESENT:
1510 pdpe64_t * guest_pdp = NULL;
1511 addr_t guest_pde_pa = 0;
1513 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
1514 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1515 (void *)guest_pdpe_pa);
1519 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)guest_pdp, guest_pdpe_pa, private_data)) != 0) {
1520 return (ret == -1) ? -1 : PAGE_PDP64;
1523 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
1524 case PT_ENTRY_NOT_PRESENT:
1526 case PT_ENTRY_LARGE_PAGE:
1528 addr_t large_page_pa = (addr_t)guest_pde_pa;
1529 addr_t large_page_va = 0;
1531 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1532 PrintDebug("Could not get virtual address of Guest Page 1GB (PA=%p)\n",
1533 (void *)large_page_va);
1537 if ((ret == callback(info, PAGE_1GB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1538 return (ret == -1) ? -1 : PAGE_1GB;
1540 PrintError("1 Gigabyte Pages not supported\n");
1545 pde64_t * guest_pde = NULL;
1546 addr_t guest_pte_pa = 0;
1548 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1549 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
1550 (void *)guest_pde_pa);
1554 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1555 return (ret == -1) ? -1 : PAGE_PD64;
1558 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1559 case PT_ENTRY_NOT_PRESENT:
1561 case PT_ENTRY_LARGE_PAGE:
1563 addr_t large_page_pa = (addr_t)guest_pte_pa;
1564 addr_t large_page_va = 0;
1566 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1567 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1568 (void *)large_page_va);
1572 if ((ret == callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1573 return (ret == -1) ? -1 : PAGE_2MB;
1579 pte64_t * guest_pte = NULL;
1582 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1583 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
1584 (void *)guest_pte_pa);
1588 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1589 return (ret == -1) ? -1 : PAGE_PT64;
1592 if (pte64_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1597 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1598 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1603 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1604 return (ret == -1) ? -1 : PAGE_4KB;
1618 // should never get here
1619 PrintError("End of drill function (guest 64)... Should never have gotten here...\n");
1626 int v3_walk_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3,
1627 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1628 void * private_data) {
1629 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1630 pde32_t * guest_pde = NULL;
1636 PrintError("Call back was not specified\n");
1640 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1641 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1642 (void *)guest_pde_pa);
1646 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1650 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1651 if (guest_pde[i].present) {
1652 if (guest_pde[i].large_page) {
1653 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(guest_pde[i]);
1654 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1655 addr_t large_page_va = 0;
1657 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1658 PrintDebug("Could not get virtual address of Guest 4MB Page (PA=%p)\n",
1659 (void *)large_page_pa);
1660 // We'll let it through for data pages because they may be unmapped or hooked
1664 if ((ret = callback(info, PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1668 vaddr += PAGE_SIZE_4MB;
1670 addr_t pte_pa = BASE_TO_PAGE_ADDR(guest_pde[i].pt_base_addr);
1671 pte32_t * tmp_pte = NULL;
1673 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1674 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1679 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1683 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1684 if (tmp_pte[j].present) {
1685 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1688 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1689 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1691 // We'll let it through for data pages because they may be unmapped or hooked
1695 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1700 vaddr += PAGE_SIZE_4KB;
1704 vaddr += PAGE_SIZE_4MB;
1711 int v3_walk_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3,
1712 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1713 void * private_data) {
1714 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1715 pdpe32pae_t * guest_pdpe = NULL;
1721 PrintError("Call back was not specified\n");
1725 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
1726 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1727 (void *)guest_pdpe_pa);
1732 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1736 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1737 if (guest_pdpe[i].present) {
1738 addr_t pde_pa = BASE_TO_PAGE_ADDR(guest_pdpe[i].pd_base_addr);
1739 pde32pae_t * tmp_pde = NULL;
1741 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1742 PrintError("Could not get virtual address of Guest PDE32PAE (PA=%p)\n",
1747 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1751 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1752 if (tmp_pde[j].present) {
1753 if (tmp_pde[j].large_page) {
1754 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1755 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1756 addr_t large_page_va = 0;
1758 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1759 PrintDebug("Could not get virtual address of Guest 2MB Page (PA=%p)\n",
1760 (void *)large_page_pa);
1761 // We'll let it through for data pages because they may be unmapped or hooked
1765 if ((ret = callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1769 vaddr += PAGE_SIZE_2MB;
1771 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1772 pte32pae_t * tmp_pte = NULL;
1774 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1775 PrintError("Could not get virtual address of Guest PTE32PAE (PA=%p)\n",
1780 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1784 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1785 if (tmp_pte[k].present) {
1786 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1789 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1790 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1792 // We'll let it through for data pages because they may be unmapped or hooked
1796 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1801 vaddr += PAGE_SIZE_4KB;
1805 vaddr += PAGE_SIZE_2MB;
1809 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1818 int v3_walk_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3,
1819 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1820 void * private_data) {
1821 addr_t guest_pml_pa = CR3_TO_PML4E64_PA(guest_cr3);
1822 pml4e64_t * guest_pml = NULL;
1828 PrintError("Call back was not specified\n");
1832 if (guest_pa_to_host_va(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
1833 PrintError("Could not get virtual address of Guest PML464 (PA=%p)\n",
1839 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)guest_pml, guest_pml_pa, private_data)) != 0) {
1843 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1844 if (guest_pml[i].present) {
1845 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(guest_pml[i].pdp_base_addr);
1846 pdpe64_t * tmp_pdpe = NULL;
1849 if (guest_pa_to_host_va(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
1850 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1855 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data)) != 0) {
1859 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1860 if (tmp_pdpe[j].present) {
1861 if (tmp_pdpe[j].large_page) {
1862 pdpe64_1GB_t * large_pdpe = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1863 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdpe->page_base_addr);
1864 addr_t large_page_va = 0;
1866 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1867 PrintDebug("Could not get virtual address of Guest 1GB page (PA=%p)\n",
1868 (void *)large_page_pa);
1869 // We'll let it through for data pages because they may be unmapped or hooked
1873 if ((ret = callback(info, PAGE_1GB, vaddr, (addr_t)large_page_va, large_page_pa, private_data)) != 0) {
1877 vaddr += PAGE_SIZE_1GB;
1879 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1880 pde64_t * tmp_pde = NULL;
1882 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1883 PrintError("Could not get virtual address of Guest PDE64 (PA=%p)\n",
1888 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1892 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1893 if (tmp_pde[k].present) {
1894 if (tmp_pde[k].large_page) {
1895 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1896 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1897 addr_t large_page_va = 0;
1899 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1900 PrintDebug("Could not get virtual address of Guest 2MB page (PA=%p)\n",
1901 (void *)large_page_pa);
1902 // We'll let it through for data pages because they may be unmapped or hooked
1906 if ((ret = callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1910 vaddr += PAGE_SIZE_2MB;
1912 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1913 pte64_t * tmp_pte = NULL;
1915 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1916 PrintError("Could not get virtual address of Guest PTE64 (PA=%p)\n",
1921 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1925 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1926 if (tmp_pte[m].present) {
1927 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1930 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1931 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1933 // We'll let it through for data pages because they may be unmapped or hooked
1937 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1942 vaddr += PAGE_SIZE_4KB;
1946 vaddr += PAGE_SIZE_2MB;
1951 vaddr += PAGE_SIZE_1GB;
1955 vaddr += ((ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES);
1961 int v3_walk_host_pt_32(struct guest_info * info, v3_reg_t host_cr3,
1962 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1963 void * private_data) {
1964 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1965 addr_t pde_pa = CR3_TO_PDE32_PA(host_cr3);
1971 PrintError("Call back was not specified\n");
1975 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)host_pde, pde_pa, private_data)) != 0) {
1979 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1980 if (host_pde[i].present) {
1981 if (host_pde[i].large_page) {
1982 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(host_pde[i]);
1983 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1985 if ((ret = callback(info, PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1989 vaddr += PAGE_SIZE_4MB;
1991 addr_t pte_pa = BASE_TO_PAGE_ADDR(host_pde[i].pt_base_addr);
1992 pte32_t * tmp_pte = (pte32_t *)V3_VAddr((void *)pte_pa);
1994 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1998 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1999 if (tmp_pte[j].present) {
2000 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
2001 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
2006 vaddr += PAGE_SIZE_4KB;
2010 vaddr += PAGE_SIZE_4MB;
2020 int v3_walk_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3,
2021 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
2022 void * private_data) {
2023 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
2024 addr_t pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
2030 PrintError("Callback was not specified\n");
2034 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, pdpe_pa, private_data)) != 0) {
2038 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
2039 if (host_pdpe[i].present) {
2040 addr_t pde_pa = BASE_TO_PAGE_ADDR(host_pdpe[i].pd_base_addr);
2041 pde32pae_t * tmp_pde = (pde32pae_t *)V3_VAddr((void *)pde_pa);
2043 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
2047 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
2048 if (tmp_pde[j].present) {
2050 if (tmp_pde[j].large_page) {
2051 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
2052 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
2054 if ((ret = callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
2058 vaddr += PAGE_SIZE_2MB;
2060 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
2061 pte32pae_t * tmp_pte = (pte32pae_t *)V3_VAddr((void *)pte_pa);
2063 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
2067 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
2068 if (tmp_pte[k].present) {
2069 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
2070 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
2075 vaddr += PAGE_SIZE_4KB;
2079 vaddr += PAGE_SIZE_2MB;
2083 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
2090 int v3_walk_host_pt_64(struct guest_info * info, v3_reg_t host_cr3,
2091 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
2092 void * private_data) {
2093 pml4e64_t * host_pml = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
2094 addr_t pml_pa = CR3_TO_PML4E64_PA(host_cr3);
2100 PrintError("Callback was not specified\n");
2104 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)host_pml, pml_pa, private_data)) != 0) {
2108 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
2109 if (host_pml[i].present) {
2110 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(host_pml[i].pdp_base_addr);
2111 pdpe64_t * tmp_pdpe = (pdpe64_t *)V3_VAddr((void *)pdpe_pa);
2113 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data)) != 0) {
2117 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
2118 if (tmp_pdpe[j].present) {
2119 if (tmp_pdpe[j].large_page) {
2120 pdpe64_1GB_t * large_pdp = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
2121 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_base_addr);
2123 if ((ret = callback(info, PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
2127 vaddr += PAGE_SIZE_1GB;
2129 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
2130 pde64_t * tmp_pde = (pde64_t *)V3_VAddr((void *)pde_pa);
2132 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
2136 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
2137 if (tmp_pde[k].present) {
2138 if (tmp_pde[k].large_page) {
2139 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
2140 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
2142 if ((ret = callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
2146 vaddr += PAGE_SIZE_2MB;
2148 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
2149 pte64_t * tmp_pte = (pte64_t *)V3_VAddr((void *)pte_pa);
2151 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
2155 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
2156 if (tmp_pte[m].present) {
2157 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
2158 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
2162 vaddr += PAGE_SIZE_4KB;
2166 vaddr += PAGE_SIZE_2MB;
2171 vaddr += PAGE_SIZE_1GB;
2175 vaddr += (ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES;
2183 static const uchar_t PAGE_4KB_STR[] = "4KB_PAGE";
2184 static const uchar_t PAGE_2MB_STR[] = "2MB_PAGE";
2185 static const uchar_t PAGE_4MB_STR[] = "4MB_PAGE";
2186 static const uchar_t PAGE_1GB_STR[] = "1GB_PAGE";
2187 static const uchar_t PAGE_PT32_STR[] = "32 Bit PT";
2188 static const uchar_t PAGE_PD32_STR[] = "32 Bit PD";
2189 static const uchar_t PAGE_PDP32PAE_STR[] = "32 Bit PAE PDP";
2190 static const uchar_t PAGE_PD32PAE_STR[] = "32 Bit PAE PD";
2191 static const uchar_t PAGE_PT32PAE_STR[] = "32 Bit PAE PT";
2192 static const uchar_t PAGE_PML464_STR[] = "64 Bit PML4";
2193 static const uchar_t PAGE_PDP64_STR[] = "64 Bit PDP";
2194 static const uchar_t PAGE_PD64_STR[] = "64 Bit PD";
2195 static const uchar_t PAGE_PT64_STR[] = "64 Bit PT";
2198 const uchar_t * v3_page_type_to_str(page_type_t type) {
2201 return PAGE_4KB_STR;
2203 return PAGE_2MB_STR;
2205 return PAGE_4MB_STR;
2207 return PAGE_1GB_STR;
2209 return PAGE_PT32_STR;
2211 return PAGE_PD32_STR;
2213 return PAGE_PDP32PAE_STR;
2215 return PAGE_PD32PAE_STR;
2217 return PAGE_PT32PAE_STR;
2219 return PAGE_PML464_STR;
2221 return PAGE_PDP64_STR;
2223 return PAGE_PD64_STR;
2225 return PAGE_PT64_STR;