2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
27 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry);
28 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry);
30 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry);
31 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry);
32 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry);
34 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry);
35 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry);
36 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry);
37 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry);
42 #define USE_VMM_PAGING_DEBUG
43 // All of the debug functions defined in vmm_paging.h are implemented in this file
44 #include "vmm_paging_debug.h"
45 #undef USE_VMM_PAGING_DEBUG
49 #ifndef CONFIG_DEBUG_SHADOW_PAGING
51 #define PrintDebug(fmt, args...)
56 void delete_page_tables_32(pde32_t * pde) {
63 PrintDebug("Deleting Page Tables (32) -- PDE (%p)\n", pde);
65 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
66 if ((pde[i].present == 1) && (pde[i].large_page == 0)) {
67 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
69 PrintDebug("Deleting PT Page %d (%p)\n", i, (void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[i].pt_base_addr));
70 V3_FreePages((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[i].pt_base_addr), 1);
74 V3_FreePages(V3_PAddr(pde), 1);
77 void delete_page_tables_32pae(pdpe32pae_t * pdpe) {
84 PrintDebug("Deleting Page Tables (32 PAE) -- PDPE (%p)\n", pdpe);
86 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
87 if (pdpe[i].present == 0) {
91 pde32pae_t * pde = (pde32pae_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pdpe[i].pd_base_addr));
93 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
95 if ((pde[j].present == 0) || (pde[j].large_page == 1)) {
99 V3_FreePages((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[j].pt_base_addr), 1);
102 V3_FreePages(V3_PAddr(pde), 1);
105 V3_FreePages(V3_PAddr(pdpe), 1);
108 void delete_page_tables_64(pml4e64_t * pml4) {
115 PrintDebug("Deleting Page Tables (64) -- PML4 (%p)\n", pml4);
117 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
118 if (pml4[i].present == 0) {
122 pdpe64_t * pdpe = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pml4[i].pdp_base_addr));
124 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
125 if ((pdpe[j].present == 0) || (pdpe[j].large_page == 1)) {
129 pde64_t * pde = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pdpe[j].pd_base_addr));
131 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
132 if ((pde[k].present == 0) || (pde[k].large_page == 1)) {
136 V3_FreePages((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[k].pt_base_addr), 1);
139 V3_FreePages(V3_PAddr(pde), 1);
142 V3_FreePages(V3_PAddr(pdpe), 1);
145 V3_FreePages(V3_PAddr(pml4), 1);
151 static int translate_pt_32_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
152 addr_t * paddr = (addr_t *)private_data;
159 *paddr = page_pa + PAGE_OFFSET_4MB(vaddr);
162 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
165 PrintError("Inavlid page type (%s) in tranlate pt 32 callback\n", v3_page_type_to_str(type));
170 static int translate_pt_32pae_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
171 addr_t * paddr = (addr_t *)private_data;
179 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
182 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
185 PrintError("Inavlid page type (%s) in translate pt 32pae callback\n", v3_page_type_to_str(type));
190 static int translate_pt_64_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
191 addr_t * paddr = (addr_t *)private_data;
200 *paddr = page_pa + PAGE_OFFSET_1GB(vaddr);
203 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
206 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
209 PrintError("Inavlid page type (%s) in translate pt 64 callback\n", v3_page_type_to_str(type));
215 int v3_translate_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
216 return v3_drill_host_pt_32(info, host_cr3, vaddr, translate_pt_32_cb, paddr);
218 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
219 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, translate_pt_32_cb, paddr);
223 int v3_translate_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
224 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, translate_pt_32pae_cb, paddr);
226 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
227 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, translate_pt_32pae_cb, paddr);
231 int v3_translate_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
232 return v3_drill_host_pt_64(info, host_cr3, vaddr, translate_pt_64_cb, paddr);
234 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
235 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, translate_pt_64_cb, paddr);
240 struct pt_find_data {
242 addr_t * pt_page_ptr;
246 static int find_pt_cb(struct guest_info * info, page_type_t type, addr_t vaddr,
247 addr_t page_ptr, addr_t page_pa, void * private_data) {
248 struct pt_find_data * pt_data = (struct pt_find_data *)private_data;
250 PrintDebug("FIND_PT Type=%s, page_pa = %p\n",
251 v3_page_type_to_str(type),
254 if (type == pt_data->type) {
255 *(pt_data->pt_page_ptr) = page_ptr;
256 *(pt_data->pt_page_pa) = page_pa;
264 int v3_find_host_pt_32_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
265 addr_t * page_ptr, addr_t * page_pa) {
266 struct pt_find_data data;
269 data.pt_page_ptr = page_ptr;
270 data.pt_page_pa = page_pa;
272 return v3_drill_host_pt_32(info, host_cr3, vaddr, find_pt_cb, &data);
275 int v3_find_host_pt_32pae_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
276 addr_t * page_ptr, addr_t * page_pa) {
277 struct pt_find_data data;
280 data.pt_page_ptr = page_ptr;
281 data.pt_page_pa = page_pa;
283 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, find_pt_cb, &data);
286 int v3_find_host_pt_64_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
287 addr_t * page_ptr, addr_t * page_pa) {
288 struct pt_find_data data;
291 data.pt_page_ptr = page_ptr;
292 data.pt_page_pa = page_pa;
294 return v3_drill_host_pt_64(info, host_cr3, vaddr, find_pt_cb, &data);
296 int v3_find_guest_pt_32_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
297 addr_t * page_ptr, addr_t * page_pa) {
298 struct pt_find_data data;
301 data.pt_page_ptr = page_ptr;
302 data.pt_page_pa = page_pa;
304 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, find_pt_cb, &data);
307 int v3_find_guest_pt_32pae_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
308 addr_t * page_ptr, addr_t * page_pa) {
309 struct pt_find_data data;
312 data.pt_page_ptr = page_ptr;
313 data.pt_page_pa = page_pa;
315 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, find_pt_cb, &data);
318 int v3_find_guest_pt_64_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
319 addr_t * page_ptr, addr_t * page_pa) {
320 struct pt_find_data data;
323 data.pt_page_ptr = page_ptr;
324 data.pt_page_pa = page_pa;
326 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, find_pt_cb, &data);
332 * Page Table Access Checks
337 struct pt_check_data {
338 pf_error_t access_type;
339 pt_access_status_t * access_status;
342 static int check_pt_32_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
343 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
347 *(chk_data->access_status) = v3_can_access_pde32((pde32_t *)page_ptr, vaddr, chk_data->access_type);
350 *(chk_data->access_status) = v3_can_access_pte32((pte32_t *)page_ptr, vaddr, chk_data->access_type);
356 PrintError("Inavlid page type (%s) in check pt 32 callback\n", v3_page_type_to_str(type));
360 if (chk_data->access_status != PT_ACCESS_OK) {
368 static int check_pt_32pae_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
369 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
373 *(chk_data->access_status) = v3_can_access_pdpe32pae((pdpe32pae_t *)page_ptr, vaddr, chk_data->access_type);
376 *(chk_data->access_status) = v3_can_access_pde32pae((pde32pae_t *)page_ptr, vaddr, chk_data->access_type);
379 *(chk_data->access_status) = v3_can_access_pte32pae((pte32pae_t *)page_ptr, vaddr, chk_data->access_type);
385 PrintError("Inavlid page type (%s) in check pt 32pae callback\n", v3_page_type_to_str(type));
389 if (chk_data->access_status != PT_ACCESS_OK) {
397 static int check_pt_64_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
398 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
402 *(chk_data->access_status) = v3_can_access_pml4e64((pml4e64_t *)page_ptr, vaddr, chk_data->access_type);
405 *(chk_data->access_status) = v3_can_access_pdpe64((pdpe64_t *)page_ptr, vaddr, chk_data->access_type);
408 *(chk_data->access_status) = v3_can_access_pde64((pde64_t *)page_ptr, vaddr, chk_data->access_type);
411 *(chk_data->access_status) = v3_can_access_pte64((pte64_t *)page_ptr, vaddr, chk_data->access_type);
418 PrintError("Inavlid page type (%s) in check pt 64 callback\n", v3_page_type_to_str(type));
422 if (chk_data->access_status != PT_ACCESS_OK) {
431 int v3_check_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
432 struct pt_check_data access_data;
434 access_data.access_type = access_type;
435 access_data.access_status = access_status;
437 return v3_drill_host_pt_32(info, host_cr3, vaddr, check_pt_32_cb, &access_data);
440 int v3_check_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
441 struct pt_check_data access_data;
443 access_data.access_type = access_type;
444 access_data.access_status = access_status;
446 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, check_pt_32pae_cb, &access_data);
451 int v3_check_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
452 struct pt_check_data access_data;
454 access_data.access_type = access_type;
455 access_data.access_status = access_status;
457 return v3_drill_host_pt_64(info, host_cr3, vaddr, check_pt_64_cb, &access_data);
462 int v3_check_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
463 pf_error_t access_type, pt_access_status_t * access_status) {
464 struct pt_check_data access_data;
466 access_data.access_type = access_type;
467 access_data.access_status = access_status;
469 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, check_pt_32_cb, &access_data);
476 int v3_check_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
477 pf_error_t access_type, pt_access_status_t * access_status) {
478 struct pt_check_data access_data;
480 access_data.access_type = access_type;
481 access_data.access_status = access_status;
483 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, check_pt_32pae_cb, &access_data);
488 int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
489 pf_error_t access_type, pt_access_status_t * access_status) {
490 struct pt_check_data access_data;
492 access_data.access_type = access_type;
493 access_data.access_status = access_status;
495 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, check_pt_64_cb, &access_data);
499 static int get_data_page_type_cb(struct guest_info * info, page_type_t type,
500 addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
514 page_type_t v3_get_guest_data_page_type_32(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
515 return v3_drill_guest_pt_32(info, cr3, vaddr, get_data_page_type_cb, NULL);
517 page_type_t v3_get_guest_data_page_type_32pae(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
518 return v3_drill_guest_pt_32pae(info, cr3, vaddr, get_data_page_type_cb, NULL);
520 page_type_t v3_get_guest_data_page_type_64(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
521 return v3_drill_guest_pt_64(info, cr3, vaddr, get_data_page_type_cb, NULL);
523 page_type_t v3_get_host_data_page_type_32(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
524 return v3_drill_host_pt_32(info, cr3, vaddr, get_data_page_type_cb, NULL);
526 page_type_t v3_get_host_data_page_type_32pae(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
527 return v3_drill_host_pt_32pae(info, cr3, vaddr, get_data_page_type_cb, NULL);
529 page_type_t v3_get_host_data_page_type_64(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
530 return v3_drill_host_pt_64(info, cr3, vaddr, get_data_page_type_cb, NULL);
535 * PAGE TABLE LOOKUP FUNCTIONS
537 * The value of entry is a return type:
538 * Page not present: *entry = 0
543 * 32 bit Page Table lookup functions
547 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
548 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
550 if (!pde_entry->present) {
552 return PT_ENTRY_NOT_PRESENT;
553 } else if (pde_entry->large_page) {
554 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
556 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
558 return PT_ENTRY_LARGE_PAGE;
560 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
561 return PT_ENTRY_PAGE;
567 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
569 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
570 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
572 if (!pte_entry->present) {
574 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
575 return PT_ENTRY_NOT_PRESENT;
577 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
579 return PT_ENTRY_PAGE;
588 * 32 bit PAE Page Table lookup functions
591 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
592 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
594 if (!pdpe_entry->present) {
596 return PT_ENTRY_NOT_PRESENT;
598 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
599 return PT_ENTRY_PAGE;
603 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
604 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
606 if (!pde_entry->present) {
608 return PT_ENTRY_NOT_PRESENT;
609 } else if (pde_entry->large_page) {
610 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
612 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
614 return PT_ENTRY_LARGE_PAGE;
616 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
617 return PT_ENTRY_PAGE;
621 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
622 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
624 if (!pte_entry->present) {
626 return PT_ENTRY_NOT_PRESENT;
628 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
629 return PT_ENTRY_PAGE;
637 * 64 bit Page Table lookup functions
640 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
641 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
643 if (!pml_entry->present) {
645 return PT_ENTRY_NOT_PRESENT;
647 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
648 return PT_ENTRY_PAGE;
652 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
653 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
655 if (!pdpe_entry->present) {
657 return PT_ENTRY_NOT_PRESENT;
658 } else if (pdpe_entry->large_page) {
659 PrintError("1 Gigabyte pages not supported\n");
663 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
664 return PT_ENTRY_PAGE;
668 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
669 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
671 if (!pde_entry->present) {
673 return PT_ENTRY_NOT_PRESENT;
674 } else if (pde_entry->large_page) {
675 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
677 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
679 return PT_ENTRY_LARGE_PAGE;
681 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
682 return PT_ENTRY_PAGE;
686 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
687 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
689 if (!pte_entry->present) {
691 return PT_ENTRY_NOT_PRESENT;
693 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
694 return PT_ENTRY_PAGE;
701 static pt_access_status_t can_access_pt_entry(gen_pt_t * pt, pf_error_t access_type) {
702 if (pt->present == 0) {
703 return PT_ACCESS_NOT_PRESENT;
704 } else if ((pt->writable == 0) && (access_type.write == 1)) {
705 return PT_ACCESS_WRITE_ERROR;
706 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
708 return PT_ACCESS_USER_ERROR;
717 * 32 bit access checks
719 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
720 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
721 return can_access_pt_entry(entry, access_type);
724 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
725 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
726 return can_access_pt_entry(entry, access_type);
731 * 32 bit PAE access checks
733 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
734 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
735 return can_access_pt_entry(entry, access_type);
738 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
739 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
740 return can_access_pt_entry(entry, access_type);
743 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
744 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
745 return can_access_pt_entry(entry, access_type);
749 * 64 Bit access checks
751 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
752 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
753 return can_access_pt_entry(entry, access_type);
756 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
757 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
758 return can_access_pt_entry(entry, access_type);
761 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
762 gen_pt_t * entry = (gen_pt_t *)&pde[PDE64_INDEX(addr)];
763 return can_access_pt_entry(entry, access_type);
766 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
767 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
768 return can_access_pt_entry(entry, access_type);
776 int v3_drill_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
777 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
778 void * private_data) {
779 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
780 addr_t host_pde_pa = CR3_TO_PDE32_PA(host_cr3);
781 addr_t host_pte_pa = 0;
785 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)host_pde, host_pde_pa, private_data)) != 0) {
786 return (ret == -1) ? -1 : PAGE_PD32;
789 switch (pde32_lookup(host_pde, vaddr, &host_pte_pa)) {
790 case PT_ENTRY_NOT_PRESENT:
792 case PT_ENTRY_LARGE_PAGE:
793 if ((ret == callback(info, PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
794 return (ret == -1) ? -1 : PAGE_4MB;
798 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
799 return (ret == -1) ? -1 : PAGE_PT32;
802 if (pte32_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
805 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
806 return (ret == -1) ? -1 : PAGE_4KB;
816 int v3_drill_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
817 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
818 void * private_data) {
819 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
820 addr_t host_pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
821 addr_t host_pde_pa = 0;
822 addr_t host_pte_pa = 0;
826 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, host_pdpe_pa, private_data)) != 0) {
827 return (ret == -1) ? -1 : PAGE_PDP32PAE;
830 switch (pdpe32pae_lookup(host_pdpe, vaddr, &host_pde_pa)) {
831 case PT_ENTRY_NOT_PRESENT:
835 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
836 return (ret == -1) ? -1 : PAGE_PD32PAE;
839 switch (pde32pae_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
840 case PT_ENTRY_NOT_PRESENT:
842 case PT_ENTRY_LARGE_PAGE:
843 if ((ret == callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
844 return (ret == -1) ? -1 : PAGE_2MB;
848 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
849 return (ret == -1) ? -1 : PAGE_PT32PAE;
852 if (pte32pae_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
855 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
856 return (ret == -1) ? -1 : PAGE_4KB;
865 // should never get here
870 int v3_drill_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
871 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
872 void * private_data) {
873 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
874 addr_t host_pmle_pa = CR3_TO_PML4E64_PA(host_cr3);
875 addr_t host_pdpe_pa = 0;
876 addr_t host_pde_pa = 0;
877 addr_t host_pte_pa = 0;
881 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)host_pmle, host_pmle_pa, private_data)) != 0) {
882 return (ret == -1) ? -1 : PAGE_PML464;
885 switch(pml4e64_lookup(host_pmle, vaddr, &host_pdpe_pa)) {
886 case PT_ENTRY_NOT_PRESENT:
890 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)V3_VAddr((void *)host_pdpe_pa), host_pdpe_pa, private_data)) != 0) {
891 return (ret == -1) ? -1 : PAGE_PDP64;
894 switch(pdpe64_lookup(V3_VAddr((void *)host_pdpe_pa), vaddr, &host_pde_pa)) {
895 case PT_ENTRY_NOT_PRESENT:
897 case PT_ENTRY_LARGE_PAGE:
898 if ((ret == callback(info, PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data)) != 0) {
899 return (ret == -1) ? -1 : PAGE_1GB;
901 PrintError("1 Gigabyte Pages not supported\n");
905 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
906 return (ret == -1) ? -1 : PAGE_PD64;
909 switch (pde64_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
910 case PT_ENTRY_NOT_PRESENT:
912 case PT_ENTRY_LARGE_PAGE:
913 if ((ret == callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
914 return (ret == -1) ? -1 : PAGE_2MB;
919 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
920 return (ret == -1) ? -1 : PAGE_PT64;
923 if (pte64_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
926 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
927 return (ret == -1) ? -1 : PAGE_4KB;
936 // should never get here
946 int v3_drill_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
947 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
948 void * private_data) {
949 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
950 pde32_t * guest_pde = NULL;
951 addr_t guest_pte_pa = 0;
955 if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
956 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
957 (void *)guest_pde_pa);
961 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
962 return (ret == -1) ? -1 : PAGE_PD32;
965 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
966 case PT_ENTRY_NOT_PRESENT:
968 case PT_ENTRY_LARGE_PAGE:
970 addr_t large_page_pa = (addr_t)guest_pte_pa;
971 addr_t large_page_va = 0;
973 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
974 PrintError("Could not get virtual address of Guest Page 4MB (PA=%p)\n",
975 (void *)large_page_va);
980 if ((ret == callback(info, PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
981 return (ret == -1) ? -1 : PAGE_4MB;
987 pte32_t * guest_pte = NULL;
990 if (v3_gpa_to_hva(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
991 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
992 (void *)guest_pte_pa);
996 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
997 return (ret == -1) ? -1 : PAGE_PT32;
1000 if (pte32_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1005 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1006 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1011 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1012 return (ret == -1) ? -1 : PAGE_4KB;
1019 // should never get here
1020 PrintError("End of drill function (guest 32)... Should never have gotten here...\n");
1026 int v3_drill_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1027 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1028 void * private_data) {
1029 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1030 pdpe32pae_t * guest_pdpe = 0;
1031 addr_t guest_pde_pa = 0;
1034 if (v3_gpa_to_hva(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
1035 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1036 (void *)guest_pdpe_pa);
1040 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1041 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1044 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
1046 case PT_ENTRY_NOT_PRESENT:
1050 pde32pae_t * guest_pde = NULL;
1051 addr_t guest_pte_pa = 0;
1053 if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1054 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
1055 (void *)guest_pde_pa);
1059 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1060 return (ret == -1) ? -1 : PAGE_PD32PAE;
1063 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
1065 case PT_ENTRY_NOT_PRESENT:
1067 case PT_ENTRY_LARGE_PAGE:
1069 addr_t large_page_pa = (addr_t)guest_pte_pa;
1070 addr_t large_page_va = 0;
1072 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1073 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1074 (void *)large_page_va);
1078 if ((ret == callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1079 return (ret == -1) ? -1 : PAGE_2MB;
1085 pte32pae_t * guest_pte = NULL;
1088 if (v3_gpa_to_hva(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1089 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
1090 (void *)guest_pte_pa);
1094 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1095 return (ret == -1) ? -1 : PAGE_PT32PAE;
1098 if (pte32pae_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1103 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1104 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1109 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1110 return (ret == -1) ? -1 : PAGE_4KB;
1118 PrintError("Invalid page type for PD32PAE\n");
1122 // should never get here
1123 PrintError("End of drill function (guest 32pae)... Should never have gotten here...\n");
1127 int v3_drill_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1128 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1129 void * private_data) {
1130 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
1131 pml4e64_t * guest_pmle = 0;
1132 addr_t guest_pdpe_pa = 0;
1135 if (v3_gpa_to_hva(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
1136 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
1137 (void *)guest_pml4_pa);
1141 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)guest_pmle, guest_pml4_pa, private_data)) != 0) {
1142 return (ret == -1) ? -1 : PAGE_PML464;
1145 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
1146 case PT_ENTRY_NOT_PRESENT:
1150 pdpe64_t * guest_pdp = NULL;
1151 addr_t guest_pde_pa = 0;
1153 if (v3_gpa_to_hva(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
1154 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1155 (void *)guest_pdpe_pa);
1159 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)guest_pdp, guest_pdpe_pa, private_data)) != 0) {
1160 return (ret == -1) ? -1 : PAGE_PDP64;
1163 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
1164 case PT_ENTRY_NOT_PRESENT:
1166 case PT_ENTRY_LARGE_PAGE:
1168 addr_t large_page_pa = (addr_t)guest_pde_pa;
1169 addr_t large_page_va = 0;
1171 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1172 PrintDebug("Could not get virtual address of Guest Page 1GB (PA=%p)\n",
1173 (void *)large_page_va);
1177 if ((ret == callback(info, PAGE_1GB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1178 return (ret == -1) ? -1 : PAGE_1GB;
1180 PrintError("1 Gigabyte Pages not supported\n");
1185 pde64_t * guest_pde = NULL;
1186 addr_t guest_pte_pa = 0;
1188 if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1189 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
1190 (void *)guest_pde_pa);
1194 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1195 return (ret == -1) ? -1 : PAGE_PD64;
1198 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1199 case PT_ENTRY_NOT_PRESENT:
1201 case PT_ENTRY_LARGE_PAGE:
1203 addr_t large_page_pa = (addr_t)guest_pte_pa;
1204 addr_t large_page_va = 0;
1206 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1207 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1208 (void *)large_page_va);
1212 if ((ret == callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1213 return (ret == -1) ? -1 : PAGE_2MB;
1219 pte64_t * guest_pte = NULL;
1222 if (v3_gpa_to_hva(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1223 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
1224 (void *)guest_pte_pa);
1228 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1229 return (ret == -1) ? -1 : PAGE_PT64;
1232 if (pte64_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1237 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1238 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1243 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1244 return (ret == -1) ? -1 : PAGE_4KB;
1258 // should never get here
1259 PrintError("End of drill function (guest 64)... Should never have gotten here...\n");
1266 int v3_walk_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3,
1267 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1268 void * private_data) {
1269 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1270 pde32_t * guest_pde = NULL;
1276 PrintError("Call back was not specified\n");
1280 if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1281 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1282 (void *)guest_pde_pa);
1286 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1290 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1291 if (guest_pde[i].present) {
1292 if (guest_pde[i].large_page) {
1293 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(guest_pde[i]);
1294 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1295 addr_t large_page_va = 0;
1297 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1298 PrintDebug("Could not get virtual address of Guest 4MB Page (PA=%p)\n",
1299 (void *)large_page_pa);
1300 // We'll let it through for data pages because they may be unmapped or hooked
1304 if ((ret = callback(info, PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1308 vaddr += PAGE_SIZE_4MB;
1310 addr_t pte_pa = BASE_TO_PAGE_ADDR(guest_pde[i].pt_base_addr);
1311 pte32_t * tmp_pte = NULL;
1313 if (v3_gpa_to_hva(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1314 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1319 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1323 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1324 if (tmp_pte[j].present) {
1325 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1328 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1329 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1331 // We'll let it through for data pages because they may be unmapped or hooked
1335 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1340 vaddr += PAGE_SIZE_4KB;
1344 vaddr += PAGE_SIZE_4MB;
1351 int v3_walk_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3,
1352 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1353 void * private_data) {
1354 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1355 pdpe32pae_t * guest_pdpe = NULL;
1361 PrintError("Call back was not specified\n");
1365 if (v3_gpa_to_hva(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
1366 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1367 (void *)guest_pdpe_pa);
1372 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1376 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1377 if (guest_pdpe[i].present) {
1378 addr_t pde_pa = BASE_TO_PAGE_ADDR(guest_pdpe[i].pd_base_addr);
1379 pde32pae_t * tmp_pde = NULL;
1381 if (v3_gpa_to_hva(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1382 PrintError("Could not get virtual address of Guest PDE32PAE (PA=%p)\n",
1387 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1391 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1392 if (tmp_pde[j].present) {
1393 if (tmp_pde[j].large_page) {
1394 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1395 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1396 addr_t large_page_va = 0;
1398 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1399 PrintDebug("Could not get virtual address of Guest 2MB Page (PA=%p)\n",
1400 (void *)large_page_pa);
1401 // We'll let it through for data pages because they may be unmapped or hooked
1405 if ((ret = callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1409 vaddr += PAGE_SIZE_2MB;
1411 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1412 pte32pae_t * tmp_pte = NULL;
1414 if (v3_gpa_to_hva(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1415 PrintError("Could not get virtual address of Guest PTE32PAE (PA=%p)\n",
1420 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1424 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1425 if (tmp_pte[k].present) {
1426 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1429 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1430 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1432 // We'll let it through for data pages because they may be unmapped or hooked
1436 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1441 vaddr += PAGE_SIZE_4KB;
1445 vaddr += PAGE_SIZE_2MB;
1449 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1458 int v3_walk_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3,
1459 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1460 void * private_data) {
1461 addr_t guest_pml_pa = CR3_TO_PML4E64_PA(guest_cr3);
1462 pml4e64_t * guest_pml = NULL;
1468 PrintError("Call back was not specified\n");
1472 if (v3_gpa_to_hva(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
1473 PrintError("Could not get virtual address of Guest PML464 (PA=%p)\n",
1479 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)guest_pml, guest_pml_pa, private_data)) != 0) {
1483 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1484 if (guest_pml[i].present) {
1485 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(guest_pml[i].pdp_base_addr);
1486 pdpe64_t * tmp_pdpe = NULL;
1489 if (v3_gpa_to_hva(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
1490 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1495 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data)) != 0) {
1499 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1500 if (tmp_pdpe[j].present) {
1501 if (tmp_pdpe[j].large_page) {
1502 pdpe64_1GB_t * large_pdpe = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1503 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdpe->page_base_addr);
1504 addr_t large_page_va = 0;
1506 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1507 PrintDebug("Could not get virtual address of Guest 1GB page (PA=%p)\n",
1508 (void *)large_page_pa);
1509 // We'll let it through for data pages because they may be unmapped or hooked
1513 if ((ret = callback(info, PAGE_1GB, vaddr, (addr_t)large_page_va, large_page_pa, private_data)) != 0) {
1517 vaddr += PAGE_SIZE_1GB;
1519 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1520 pde64_t * tmp_pde = NULL;
1522 if (v3_gpa_to_hva(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1523 PrintError("Could not get virtual address of Guest PDE64 (PA=%p)\n",
1528 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1532 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1533 if (tmp_pde[k].present) {
1534 if (tmp_pde[k].large_page) {
1535 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1536 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1537 addr_t large_page_va = 0;
1539 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1540 PrintDebug("Could not get virtual address of Guest 2MB page (PA=%p)\n",
1541 (void *)large_page_pa);
1542 // We'll let it through for data pages because they may be unmapped or hooked
1546 if ((ret = callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1550 vaddr += PAGE_SIZE_2MB;
1552 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1553 pte64_t * tmp_pte = NULL;
1555 if (v3_gpa_to_hva(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1556 PrintError("Could not get virtual address of Guest PTE64 (PA=%p)\n",
1561 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1565 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1566 if (tmp_pte[m].present) {
1567 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1570 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1571 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1573 // We'll let it through for data pages because they may be unmapped or hooked
1577 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1582 vaddr += PAGE_SIZE_4KB;
1586 vaddr += PAGE_SIZE_2MB;
1591 vaddr += PAGE_SIZE_1GB;
1595 vaddr += ((ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES);
1601 int v3_walk_host_pt_32(struct guest_info * info, v3_reg_t host_cr3,
1602 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1603 void * private_data) {
1604 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1605 addr_t pde_pa = CR3_TO_PDE32_PA(host_cr3);
1611 PrintError("Call back was not specified\n");
1615 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)host_pde, pde_pa, private_data)) != 0) {
1619 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1620 if (host_pde[i].present) {
1621 if (host_pde[i].large_page) {
1622 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(host_pde[i]);
1623 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1625 if ((ret = callback(info, PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1629 vaddr += PAGE_SIZE_4MB;
1631 addr_t pte_pa = BASE_TO_PAGE_ADDR(host_pde[i].pt_base_addr);
1632 pte32_t * tmp_pte = (pte32_t *)V3_VAddr((void *)pte_pa);
1634 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1638 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1639 if (tmp_pte[j].present) {
1640 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1641 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
1646 vaddr += PAGE_SIZE_4KB;
1650 vaddr += PAGE_SIZE_4MB;
1660 int v3_walk_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3,
1661 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1662 void * private_data) {
1663 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1664 addr_t pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1670 PrintError("Callback was not specified\n");
1674 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, pdpe_pa, private_data)) != 0) {
1678 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1679 if (host_pdpe[i].present) {
1680 addr_t pde_pa = BASE_TO_PAGE_ADDR(host_pdpe[i].pd_base_addr);
1681 pde32pae_t * tmp_pde = (pde32pae_t *)V3_VAddr((void *)pde_pa);
1683 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1687 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1688 if (tmp_pde[j].present) {
1690 if (tmp_pde[j].large_page) {
1691 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1692 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1694 if ((ret = callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1698 vaddr += PAGE_SIZE_2MB;
1700 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1701 pte32pae_t * tmp_pte = (pte32pae_t *)V3_VAddr((void *)pte_pa);
1703 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1707 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1708 if (tmp_pte[k].present) {
1709 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1710 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
1715 vaddr += PAGE_SIZE_4KB;
1719 vaddr += PAGE_SIZE_2MB;
1723 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1730 int v3_walk_host_pt_64(struct guest_info * info, v3_reg_t host_cr3,
1731 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1732 void * private_data) {
1733 pml4e64_t * host_pml = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1734 addr_t pml_pa = CR3_TO_PML4E64_PA(host_cr3);
1740 PrintError("Callback was not specified\n");
1744 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)host_pml, pml_pa, private_data)) != 0) {
1748 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1749 if (host_pml[i].present) {
1750 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(host_pml[i].pdp_base_addr);
1751 pdpe64_t * tmp_pdpe = (pdpe64_t *)V3_VAddr((void *)pdpe_pa);
1753 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data)) != 0) {
1757 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1758 if (tmp_pdpe[j].present) {
1759 if (tmp_pdpe[j].large_page) {
1760 pdpe64_1GB_t * large_pdp = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1761 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_base_addr);
1763 if ((ret = callback(info, PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1767 vaddr += PAGE_SIZE_1GB;
1769 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1770 pde64_t * tmp_pde = (pde64_t *)V3_VAddr((void *)pde_pa);
1772 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1776 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1777 if (tmp_pde[k].present) {
1778 if (tmp_pde[k].large_page) {
1779 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1780 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1782 if ((ret = callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1786 vaddr += PAGE_SIZE_2MB;
1788 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1789 pte64_t * tmp_pte = (pte64_t *)V3_VAddr((void *)pte_pa);
1791 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1795 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1796 if (tmp_pte[m].present) {
1797 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1798 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
1802 vaddr += PAGE_SIZE_4KB;
1806 vaddr += PAGE_SIZE_2MB;
1811 vaddr += PAGE_SIZE_1GB;
1815 vaddr += (ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES;
1823 static const uchar_t PAGE_4KB_STR[] = "4KB_PAGE";
1824 static const uchar_t PAGE_2MB_STR[] = "2MB_PAGE";
1825 static const uchar_t PAGE_4MB_STR[] = "4MB_PAGE";
1826 static const uchar_t PAGE_1GB_STR[] = "1GB_PAGE";
1827 static const uchar_t PAGE_PT32_STR[] = "32 Bit PT";
1828 static const uchar_t PAGE_PD32_STR[] = "32 Bit PD";
1829 static const uchar_t PAGE_PDP32PAE_STR[] = "32 Bit PAE PDP";
1830 static const uchar_t PAGE_PD32PAE_STR[] = "32 Bit PAE PD";
1831 static const uchar_t PAGE_PT32PAE_STR[] = "32 Bit PAE PT";
1832 static const uchar_t PAGE_PML464_STR[] = "64 Bit PML4";
1833 static const uchar_t PAGE_PDP64_STR[] = "64 Bit PDP";
1834 static const uchar_t PAGE_PD64_STR[] = "64 Bit PD";
1835 static const uchar_t PAGE_PT64_STR[] = "64 Bit PT";
1838 const uchar_t * v3_page_type_to_str(page_type_t type) {
1841 return PAGE_4KB_STR;
1843 return PAGE_2MB_STR;
1845 return PAGE_4MB_STR;
1847 return PAGE_1GB_STR;
1849 return PAGE_PT32_STR;
1851 return PAGE_PD32_STR;
1853 return PAGE_PDP32PAE_STR;
1855 return PAGE_PD32PAE_STR;
1857 return PAGE_PT32PAE_STR;
1859 return PAGE_PML464_STR;
1861 return PAGE_PDP64_STR;
1863 return PAGE_PD64_STR;
1865 return PAGE_PT64_STR;