2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
27 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry);
28 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry);
30 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry);
31 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry);
32 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry);
34 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry);
35 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry);
36 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry);
37 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry);
42 #define USE_VMM_PAGING_DEBUG
43 // All of the debug functions defined in vmm_paging.h are implemented in this file
44 #include "vmm_paging_debug.h"
45 #undef USE_VMM_PAGING_DEBUG
49 #ifndef V3_CONFIG_DEBUG_SHADOW_PAGING
51 #define PrintDebug(fmt, args...)
56 void delete_page_tables_32(pde32_t * pde) {
63 PrintDebug("Deleting Page Tables (32) -- PDE (%p)\n", pde);
65 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
66 if ((pde[i].present == 1) && (pde[i].large_page == 0)) {
67 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
69 PrintDebug("Deleting PT Page %d (%p)\n", i, (void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[i].pt_base_addr));
70 V3_FreePages((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[i].pt_base_addr), 1);
74 V3_FreePages(V3_PAddr(pde), 1);
77 void delete_page_tables_32pae(pdpe32pae_t * pdpe) {
84 PrintDebug("Deleting Page Tables (32 PAE) -- PDPE (%p)\n", pdpe);
86 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
87 if (pdpe[i].present == 0) {
91 pde32pae_t * pde = (pde32pae_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pdpe[i].pd_base_addr));
93 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
95 if ((pde[j].present == 0) || (pde[j].large_page == 1)) {
99 V3_FreePages((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[j].pt_base_addr), 1);
102 V3_FreePages(V3_PAddr(pde), 1);
105 V3_FreePages(V3_PAddr(pdpe), 1);
108 void delete_page_tables_64(pml4e64_t * pml4) {
115 PrintDebug("Deleting Page Tables (64) -- PML4 (%p)\n", pml4);
117 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
118 if (pml4[i].present == 0) {
122 pdpe64_t * pdpe = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pml4[i].pdp_base_addr));
124 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
125 if ((pdpe[j].present == 0) || (pdpe[j].large_page == 1)) {
129 pde64_t * pde = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pdpe[j].pd_base_addr));
131 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
132 if ((pde[k].present == 0) || (pde[k].large_page == 1)) {
136 V3_FreePages((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[k].pt_base_addr), 1);
139 V3_FreePages(V3_PAddr(pde), 1);
142 V3_FreePages(V3_PAddr(pdpe), 1);
145 V3_FreePages(V3_PAddr(pml4), 1);
151 static int translate_pt_32_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
152 addr_t * paddr = (addr_t *)private_data;
159 *paddr = page_pa + PAGE_OFFSET_4MB(vaddr);
162 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
165 PrintError("Inavlid page type (%s) in tranlate pt 32 callback\n", v3_page_type_to_str(type));
170 static int translate_pt_32pae_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
171 addr_t * paddr = (addr_t *)private_data;
179 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
182 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
185 PrintError("Inavlid page type (%s) in translate pt 32pae callback\n", v3_page_type_to_str(type));
190 static int translate_pt_64_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
191 addr_t * paddr = (addr_t *)private_data;
200 *paddr = page_pa + PAGE_OFFSET_1GB(vaddr);
203 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
206 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
209 PrintError("Inavlid page type (%s) in translate pt 64 callback\n", v3_page_type_to_str(type));
215 int v3_translate_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
216 return v3_drill_host_pt_32(info, host_cr3, vaddr, translate_pt_32_cb, paddr);
218 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
219 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, translate_pt_32_cb, paddr);
223 int v3_translate_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
224 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, translate_pt_32pae_cb, paddr);
226 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
227 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, translate_pt_32pae_cb, paddr);
231 int v3_translate_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
232 return v3_drill_host_pt_64(info, host_cr3, vaddr, translate_pt_64_cb, paddr);
234 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
235 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, translate_pt_64_cb, paddr);
240 struct pt_find_data {
242 addr_t * pt_page_ptr;
246 static int find_pt_cb(struct guest_info * info, page_type_t type, addr_t vaddr,
247 addr_t page_ptr, addr_t page_pa, void * private_data) {
248 struct pt_find_data * pt_data = (struct pt_find_data *)private_data;
250 PrintDebug("FIND_PT Type=%s, page_pa = %p\n",
251 v3_page_type_to_str(type),
254 if (type == pt_data->type) {
255 *(pt_data->pt_page_ptr) = page_ptr;
256 *(pt_data->pt_page_pa) = page_pa;
264 int v3_find_host_pt_32_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
265 addr_t * page_ptr, addr_t * page_pa) {
266 struct pt_find_data data;
269 data.pt_page_ptr = page_ptr;
270 data.pt_page_pa = page_pa;
272 return v3_drill_host_pt_32(info, host_cr3, vaddr, find_pt_cb, &data);
275 int v3_find_host_pt_32pae_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
276 addr_t * page_ptr, addr_t * page_pa) {
277 struct pt_find_data data;
280 data.pt_page_ptr = page_ptr;
281 data.pt_page_pa = page_pa;
283 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, find_pt_cb, &data);
286 int v3_find_host_pt_64_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
287 addr_t * page_ptr, addr_t * page_pa) {
288 struct pt_find_data data;
291 data.pt_page_ptr = page_ptr;
292 data.pt_page_pa = page_pa;
294 return v3_drill_host_pt_64(info, host_cr3, vaddr, find_pt_cb, &data);
296 int v3_find_guest_pt_32_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
297 addr_t * page_ptr, addr_t * page_pa) {
298 struct pt_find_data data;
301 data.pt_page_ptr = page_ptr;
302 data.pt_page_pa = page_pa;
304 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, find_pt_cb, &data);
307 int v3_find_guest_pt_32pae_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
308 addr_t * page_ptr, addr_t * page_pa) {
309 struct pt_find_data data;
312 data.pt_page_ptr = page_ptr;
313 data.pt_page_pa = page_pa;
315 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, find_pt_cb, &data);
318 int v3_find_guest_pt_64_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
319 addr_t * page_ptr, addr_t * page_pa) {
320 struct pt_find_data data;
323 data.pt_page_ptr = page_ptr;
324 data.pt_page_pa = page_pa;
326 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, find_pt_cb, &data);
332 * Page Table Access Checks
337 struct pt_check_data {
338 pf_error_t access_type;
339 pt_access_status_t * access_status;
342 static int check_pt_32_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
343 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
347 *(chk_data->access_status) = v3_can_access_pde32((pde32_t *)page_ptr, vaddr, chk_data->access_type);
350 *(chk_data->access_status) = v3_can_access_pte32((pte32_t *)page_ptr, vaddr, chk_data->access_type);
356 PrintError("Inavlid page type (%s) in check pt 32 callback\n", v3_page_type_to_str(type));
360 if (chk_data->access_status != PT_ACCESS_OK) {
368 static int check_pt_32pae_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
369 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
373 *(chk_data->access_status) = v3_can_access_pdpe32pae((pdpe32pae_t *)page_ptr, vaddr, chk_data->access_type);
376 *(chk_data->access_status) = v3_can_access_pde32pae((pde32pae_t *)page_ptr, vaddr, chk_data->access_type);
379 *(chk_data->access_status) = v3_can_access_pte32pae((pte32pae_t *)page_ptr, vaddr, chk_data->access_type);
385 PrintError("Inavlid page type (%s) in check pt 32pae callback\n", v3_page_type_to_str(type));
389 if (chk_data->access_status != PT_ACCESS_OK) {
397 static int check_pt_64_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
398 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
402 *(chk_data->access_status) = v3_can_access_pml4e64((pml4e64_t *)page_ptr, vaddr, chk_data->access_type);
405 *(chk_data->access_status) = v3_can_access_pdpe64((pdpe64_t *)page_ptr, vaddr, chk_data->access_type);
408 *(chk_data->access_status) = v3_can_access_pde64((pde64_t *)page_ptr, vaddr, chk_data->access_type);
411 *(chk_data->access_status) = v3_can_access_pte64((pte64_t *)page_ptr, vaddr, chk_data->access_type);
418 PrintError("Inavlid page type (%s) in check pt 64 callback\n", v3_page_type_to_str(type));
422 if (chk_data->access_status != PT_ACCESS_OK) {
431 int v3_check_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
432 struct pt_check_data access_data;
434 access_data.access_type = access_type;
435 access_data.access_status = access_status;
437 return v3_drill_host_pt_32(info, host_cr3, vaddr, check_pt_32_cb, &access_data);
440 int v3_check_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
441 struct pt_check_data access_data;
443 access_data.access_type = access_type;
444 access_data.access_status = access_status;
446 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, check_pt_32pae_cb, &access_data);
451 int v3_check_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
452 struct pt_check_data access_data;
454 access_data.access_type = access_type;
455 access_data.access_status = access_status;
457 return v3_drill_host_pt_64(info, host_cr3, vaddr, check_pt_64_cb, &access_data);
462 int v3_check_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
463 pf_error_t access_type, pt_access_status_t * access_status) {
464 struct pt_check_data access_data;
466 access_data.access_type = access_type;
467 access_data.access_status = access_status;
469 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, check_pt_32_cb, &access_data);
476 int v3_check_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
477 pf_error_t access_type, pt_access_status_t * access_status) {
478 struct pt_check_data access_data;
480 access_data.access_type = access_type;
481 access_data.access_status = access_status;
483 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, check_pt_32pae_cb, &access_data);
488 int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
489 pf_error_t access_type, pt_access_status_t * access_status) {
490 struct pt_check_data access_data;
492 access_data.access_type = access_type;
493 access_data.access_status = access_status;
495 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, check_pt_64_cb, &access_data);
499 static int get_data_page_type_cb(struct guest_info * info, page_type_t type,
500 addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
514 page_type_t v3_get_guest_data_page_type_32(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
515 return v3_drill_guest_pt_32(info, cr3, vaddr, get_data_page_type_cb, NULL);
517 page_type_t v3_get_guest_data_page_type_32pae(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
518 return v3_drill_guest_pt_32pae(info, cr3, vaddr, get_data_page_type_cb, NULL);
520 page_type_t v3_get_guest_data_page_type_64(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
521 return v3_drill_guest_pt_64(info, cr3, vaddr, get_data_page_type_cb, NULL);
523 page_type_t v3_get_host_data_page_type_32(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
524 return v3_drill_host_pt_32(info, cr3, vaddr, get_data_page_type_cb, NULL);
526 page_type_t v3_get_host_data_page_type_32pae(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
527 return v3_drill_host_pt_32pae(info, cr3, vaddr, get_data_page_type_cb, NULL);
529 page_type_t v3_get_host_data_page_type_64(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
530 return v3_drill_host_pt_64(info, cr3, vaddr, get_data_page_type_cb, NULL);
535 * PAGE TABLE LOOKUP FUNCTIONS
537 * The value of entry is a return type:
538 * Page not present: *entry = 0
543 * 32 bit Page Table lookup functions
547 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
548 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
550 if (!pde_entry->present) {
552 return PT_ENTRY_NOT_PRESENT;
553 } else if (pde_entry->large_page) {
554 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
556 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
558 return PT_ENTRY_LARGE_PAGE;
560 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
561 return PT_ENTRY_PAGE;
567 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
569 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
570 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
572 if (!pte_entry->present) {
574 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
575 return PT_ENTRY_NOT_PRESENT;
577 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
579 return PT_ENTRY_PAGE;
588 * 32 bit PAE Page Table lookup functions
591 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
592 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
594 if (!pdpe_entry->present) {
596 return PT_ENTRY_NOT_PRESENT;
598 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
599 return PT_ENTRY_PAGE;
603 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
604 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
606 if (!pde_entry->present) {
608 return PT_ENTRY_NOT_PRESENT;
609 } else if (pde_entry->large_page) {
610 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
612 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
614 return PT_ENTRY_LARGE_PAGE;
616 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
617 return PT_ENTRY_PAGE;
621 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
622 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
624 if (!pte_entry->present) {
626 return PT_ENTRY_NOT_PRESENT;
628 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
629 return PT_ENTRY_PAGE;
637 * 64 bit Page Table lookup functions
640 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
641 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
643 if (!pml_entry->present) {
645 return PT_ENTRY_NOT_PRESENT;
647 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
648 return PT_ENTRY_PAGE;
652 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
653 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
655 if (!pdpe_entry->present) {
657 return PT_ENTRY_NOT_PRESENT;
658 } else if (pdpe_entry->large_page) {
659 pdpe64_1GB_t * large_pdp = (pdpe64_1GB_t *)pdpe_entry;
661 *entry = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_base_addr);
663 return PT_ENTRY_LARGE_PAGE;
665 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
666 return PT_ENTRY_PAGE;
670 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
671 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
673 if (!pde_entry->present) {
675 return PT_ENTRY_NOT_PRESENT;
676 } else if (pde_entry->large_page) {
677 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
679 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
681 return PT_ENTRY_LARGE_PAGE;
683 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
684 return PT_ENTRY_PAGE;
688 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
689 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
691 if (!pte_entry->present) {
693 return PT_ENTRY_NOT_PRESENT;
695 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
696 return PT_ENTRY_PAGE;
703 static pt_access_status_t can_access_pt_entry(gen_pt_t * pt, pf_error_t access_type) {
704 if (pt->present == 0) {
705 return PT_ACCESS_NOT_PRESENT;
706 } else if ((pt->writable == 0) && (access_type.write == 1)) {
707 return PT_ACCESS_WRITE_ERROR;
708 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
710 return PT_ACCESS_USER_ERROR;
719 * 32 bit access checks
721 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
722 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
723 return can_access_pt_entry(entry, access_type);
726 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
727 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
728 return can_access_pt_entry(entry, access_type);
733 * 32 bit PAE access checks
735 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
736 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
737 return can_access_pt_entry(entry, access_type);
740 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
741 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
742 return can_access_pt_entry(entry, access_type);
745 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
746 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
747 return can_access_pt_entry(entry, access_type);
751 * 64 Bit access checks
753 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
754 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
755 return can_access_pt_entry(entry, access_type);
758 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
759 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
760 return can_access_pt_entry(entry, access_type);
763 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
764 gen_pt_t * entry = (gen_pt_t *)&pde[PDE64_INDEX(addr)];
765 return can_access_pt_entry(entry, access_type);
768 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
769 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
770 return can_access_pt_entry(entry, access_type);
778 int v3_drill_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
779 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
780 void * private_data) {
781 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
782 addr_t host_pde_pa = CR3_TO_PDE32_PA(host_cr3);
783 addr_t host_pte_pa = 0;
787 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)host_pde, host_pde_pa, private_data)) != 0) {
788 return (ret == -1) ? -1 : PAGE_PD32;
791 switch (pde32_lookup(host_pde, vaddr, &host_pte_pa)) {
792 case PT_ENTRY_NOT_PRESENT:
794 case PT_ENTRY_LARGE_PAGE:
795 if ((ret == callback(info, PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
796 return (ret == -1) ? -1 : PAGE_4MB;
800 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
801 return (ret == -1) ? -1 : PAGE_PT32;
804 if (pte32_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
807 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
808 return (ret == -1) ? -1 : PAGE_4KB;
818 int v3_drill_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
819 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
820 void * private_data) {
821 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
822 addr_t host_pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
823 addr_t host_pde_pa = 0;
824 addr_t host_pte_pa = 0;
828 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, host_pdpe_pa, private_data)) != 0) {
829 return (ret == -1) ? -1 : PAGE_PDP32PAE;
832 switch (pdpe32pae_lookup(host_pdpe, vaddr, &host_pde_pa)) {
833 case PT_ENTRY_NOT_PRESENT:
837 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
838 return (ret == -1) ? -1 : PAGE_PD32PAE;
841 switch (pde32pae_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
842 case PT_ENTRY_NOT_PRESENT:
844 case PT_ENTRY_LARGE_PAGE:
845 if ((ret == callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
846 return (ret == -1) ? -1 : PAGE_2MB;
850 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
851 return (ret == -1) ? -1 : PAGE_PT32PAE;
854 if (pte32pae_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
857 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
858 return (ret == -1) ? -1 : PAGE_4KB;
867 // should never get here
872 int v3_drill_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
873 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
874 void * private_data) {
875 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
876 addr_t host_pmle_pa = CR3_TO_PML4E64_PA(host_cr3);
877 addr_t host_pdpe_pa = 0;
878 addr_t host_pde_pa = 0;
879 addr_t host_pte_pa = 0;
883 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)host_pmle, host_pmle_pa, private_data)) != 0) {
884 return (ret == -1) ? -1 : PAGE_PML464;
887 switch(pml4e64_lookup(host_pmle, vaddr, &host_pdpe_pa)) {
888 case PT_ENTRY_NOT_PRESENT:
892 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)V3_VAddr((void *)host_pdpe_pa), host_pdpe_pa, private_data)) != 0) {
893 return (ret == -1) ? -1 : PAGE_PDP64;
896 switch(pdpe64_lookup(V3_VAddr((void *)host_pdpe_pa), vaddr, &host_pde_pa)) {
897 case PT_ENTRY_NOT_PRESENT:
899 case PT_ENTRY_LARGE_PAGE:
900 if ((ret == callback(info, PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data)) != 0) {
901 return (ret == -1) ? -1 : PAGE_1GB;
903 PrintError("1 Gigabyte Pages not supported\n");
907 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
908 return (ret == -1) ? -1 : PAGE_PD64;
911 switch (pde64_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
912 case PT_ENTRY_NOT_PRESENT:
914 case PT_ENTRY_LARGE_PAGE:
915 if ((ret == callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
916 return (ret == -1) ? -1 : PAGE_2MB;
921 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
922 return (ret == -1) ? -1 : PAGE_PT64;
925 if (pte64_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
928 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
929 return (ret == -1) ? -1 : PAGE_4KB;
938 // should never get here
948 int v3_drill_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
949 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
950 void * private_data) {
951 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
952 pde32_t * guest_pde = NULL;
953 addr_t guest_pte_pa = 0;
957 if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
958 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
959 (void *)guest_pde_pa);
963 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
964 return (ret == -1) ? -1 : PAGE_PD32;
967 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
968 case PT_ENTRY_NOT_PRESENT:
970 case PT_ENTRY_LARGE_PAGE:
972 addr_t large_page_pa = (addr_t)guest_pte_pa;
973 addr_t large_page_va = 0;
975 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
980 if ((ret == callback(info, PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
981 return (ret == -1) ? -1 : PAGE_4MB;
987 pte32_t * guest_pte = NULL;
990 if (v3_gpa_to_hva(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
991 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
992 (void *)guest_pte_pa);
996 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
997 return (ret == -1) ? -1 : PAGE_PT32;
1000 if (pte32_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1005 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1009 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1010 return (ret == -1) ? -1 : PAGE_4KB;
1017 // should never get here
1018 PrintError("End of drill function (guest 32)... Should never have gotten here...\n");
1024 int v3_drill_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1025 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1026 void * private_data) {
1027 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1028 pdpe32pae_t * guest_pdpe = 0;
1029 addr_t guest_pde_pa = 0;
1032 if (v3_gpa_to_hva(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
1033 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1034 (void *)guest_pdpe_pa);
1038 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1039 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1042 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
1044 case PT_ENTRY_NOT_PRESENT:
1048 pde32pae_t * guest_pde = NULL;
1049 addr_t guest_pte_pa = 0;
1051 if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1052 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
1053 (void *)guest_pde_pa);
1057 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1058 return (ret == -1) ? -1 : PAGE_PD32PAE;
1061 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
1063 case PT_ENTRY_NOT_PRESENT:
1065 case PT_ENTRY_LARGE_PAGE:
1067 addr_t large_page_pa = (addr_t)guest_pte_pa;
1068 addr_t large_page_va = 0;
1070 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1074 if ((ret == callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1075 return (ret == -1) ? -1 : PAGE_2MB;
1081 pte32pae_t * guest_pte = NULL;
1084 if (v3_gpa_to_hva(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1085 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
1086 (void *)guest_pte_pa);
1090 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1091 return (ret == -1) ? -1 : PAGE_PT32PAE;
1094 if (pte32pae_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1099 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1103 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1104 return (ret == -1) ? -1 : PAGE_4KB;
1112 PrintError("Invalid page type for PD32PAE\n");
1116 // should never get here
1117 PrintError("End of drill function (guest 32pae)... Should never have gotten here...\n");
1121 int v3_drill_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1122 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1123 void * private_data) {
1124 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
1125 pml4e64_t * guest_pmle = 0;
1126 addr_t guest_pdpe_pa = 0;
1129 if (v3_gpa_to_hva(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
1130 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
1131 (void *)guest_pml4_pa);
1135 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)guest_pmle, guest_pml4_pa, private_data)) != 0) {
1136 return (ret == -1) ? -1 : PAGE_PML464;
1139 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
1140 case PT_ENTRY_NOT_PRESENT:
1144 pdpe64_t * guest_pdp = NULL;
1145 addr_t guest_pde_pa = 0;
1147 if (v3_gpa_to_hva(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
1148 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1149 (void *)guest_pdpe_pa);
1153 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)guest_pdp, guest_pdpe_pa, private_data)) != 0) {
1154 return (ret == -1) ? -1 : PAGE_PDP64;
1157 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
1158 case PT_ENTRY_NOT_PRESENT:
1160 case PT_ENTRY_LARGE_PAGE:
1162 addr_t large_page_pa = (addr_t)guest_pde_pa;
1163 addr_t large_page_va = 0;
1165 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1169 if ((ret == callback(info, PAGE_1GB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1170 return (ret == -1) ? -1 : PAGE_1GB;
1172 PrintError("1 Gigabyte Pages not supported\n");
1177 pde64_t * guest_pde = NULL;
1178 addr_t guest_pte_pa = 0;
1180 if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1181 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
1182 (void *)guest_pde_pa);
1186 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1187 return (ret == -1) ? -1 : PAGE_PD64;
1190 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1191 case PT_ENTRY_NOT_PRESENT:
1193 case PT_ENTRY_LARGE_PAGE:
1195 addr_t large_page_pa = (addr_t)guest_pte_pa;
1196 addr_t large_page_va = 0;
1198 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1202 if ((ret == callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1203 return (ret == -1) ? -1 : PAGE_2MB;
1209 pte64_t * guest_pte = NULL;
1212 if (v3_gpa_to_hva(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1213 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
1214 (void *)guest_pte_pa);
1218 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1219 return (ret == -1) ? -1 : PAGE_PT64;
1222 if (pte64_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1227 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1231 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1232 return (ret == -1) ? -1 : PAGE_4KB;
1246 // should never get here
1247 PrintError("End of drill function (guest 64)... Should never have gotten here...\n");
1254 int v3_walk_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3,
1255 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1256 void * private_data) {
1257 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1258 pde32_t * guest_pde = NULL;
1264 PrintError("Call back was not specified\n");
1268 if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1269 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1270 (void *)guest_pde_pa);
1274 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1278 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1279 if (guest_pde[i].present) {
1280 if (guest_pde[i].large_page) {
1281 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(guest_pde[i]);
1282 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1283 addr_t large_page_va = 0;
1285 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1286 PrintDebug("Could not get virtual address of Guest 4MB Page (PA=%p)\n",
1287 (void *)large_page_pa);
1288 // We'll let it through for data pages because they may be unmapped or hooked
1292 if ((ret = callback(info, PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1296 vaddr += PAGE_SIZE_4MB;
1298 addr_t pte_pa = BASE_TO_PAGE_ADDR(guest_pde[i].pt_base_addr);
1299 pte32_t * tmp_pte = NULL;
1301 if (v3_gpa_to_hva(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1302 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1307 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1311 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1312 if (tmp_pte[j].present) {
1313 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1316 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1317 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1319 // We'll let it through for data pages because they may be unmapped or hooked
1323 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1328 vaddr += PAGE_SIZE_4KB;
1332 vaddr += PAGE_SIZE_4MB;
1339 int v3_walk_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3,
1340 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1341 void * private_data) {
1342 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1343 pdpe32pae_t * guest_pdpe = NULL;
1349 PrintError("Call back was not specified\n");
1353 if (v3_gpa_to_hva(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
1354 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1355 (void *)guest_pdpe_pa);
1360 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1364 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1365 if (guest_pdpe[i].present) {
1366 addr_t pde_pa = BASE_TO_PAGE_ADDR(guest_pdpe[i].pd_base_addr);
1367 pde32pae_t * tmp_pde = NULL;
1369 if (v3_gpa_to_hva(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1370 PrintError("Could not get virtual address of Guest PDE32PAE (PA=%p)\n",
1375 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1379 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1380 if (tmp_pde[j].present) {
1381 if (tmp_pde[j].large_page) {
1382 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1383 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1384 addr_t large_page_va = 0;
1386 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1387 PrintDebug("Could not get virtual address of Guest 2MB Page (PA=%p)\n",
1388 (void *)large_page_pa);
1389 // We'll let it through for data pages because they may be unmapped or hooked
1393 if ((ret = callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1397 vaddr += PAGE_SIZE_2MB;
1399 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1400 pte32pae_t * tmp_pte = NULL;
1402 if (v3_gpa_to_hva(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1403 PrintError("Could not get virtual address of Guest PTE32PAE (PA=%p)\n",
1408 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1412 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1413 if (tmp_pte[k].present) {
1414 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1417 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1418 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1420 // We'll let it through for data pages because they may be unmapped or hooked
1424 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1429 vaddr += PAGE_SIZE_4KB;
1433 vaddr += PAGE_SIZE_2MB;
1437 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1446 int v3_walk_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3,
1447 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1448 void * private_data) {
1449 addr_t guest_pml_pa = CR3_TO_PML4E64_PA(guest_cr3);
1450 pml4e64_t * guest_pml = NULL;
1456 PrintError("Call back was not specified\n");
1460 if (v3_gpa_to_hva(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
1461 PrintError("Could not get virtual address of Guest PML464 (PA=%p)\n",
1467 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)guest_pml, guest_pml_pa, private_data)) != 0) {
1471 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1472 if (guest_pml[i].present) {
1473 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(guest_pml[i].pdp_base_addr);
1474 pdpe64_t * tmp_pdpe = NULL;
1477 if (v3_gpa_to_hva(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
1478 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1483 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data)) != 0) {
1487 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1488 if (tmp_pdpe[j].present) {
1489 if (tmp_pdpe[j].large_page) {
1490 pdpe64_1GB_t * large_pdpe = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1491 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdpe->page_base_addr);
1492 addr_t large_page_va = 0;
1494 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1495 PrintDebug("Could not get virtual address of Guest 1GB page (PA=%p)\n",
1496 (void *)large_page_pa);
1497 // We'll let it through for data pages because they may be unmapped or hooked
1501 if ((ret = callback(info, PAGE_1GB, vaddr, (addr_t)large_page_va, large_page_pa, private_data)) != 0) {
1505 vaddr += PAGE_SIZE_1GB;
1507 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1508 pde64_t * tmp_pde = NULL;
1510 if (v3_gpa_to_hva(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1511 PrintError("Could not get virtual address of Guest PDE64 (PA=%p)\n",
1516 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1520 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1521 if (tmp_pde[k].present) {
1522 if (tmp_pde[k].large_page) {
1523 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1524 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1525 addr_t large_page_va = 0;
1527 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1528 PrintDebug("Could not get virtual address of Guest 2MB page (PA=%p)\n",
1529 (void *)large_page_pa);
1530 // We'll let it through for data pages because they may be unmapped or hooked
1534 if ((ret = callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1538 vaddr += PAGE_SIZE_2MB;
1540 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1541 pte64_t * tmp_pte = NULL;
1543 if (v3_gpa_to_hva(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1544 PrintError("Could not get virtual address of Guest PTE64 (PA=%p)\n",
1549 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1553 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1554 if (tmp_pte[m].present) {
1555 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1558 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1559 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1561 // We'll let it through for data pages because they may be unmapped or hooked
1565 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1570 vaddr += PAGE_SIZE_4KB;
1574 vaddr += PAGE_SIZE_2MB;
1579 vaddr += PAGE_SIZE_1GB;
1583 vaddr += ((ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES);
1589 int v3_walk_host_pt_32(struct guest_info * info, v3_reg_t host_cr3,
1590 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1591 void * private_data) {
1592 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1593 addr_t pde_pa = CR3_TO_PDE32_PA(host_cr3);
1599 PrintError("Call back was not specified\n");
1603 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)host_pde, pde_pa, private_data)) != 0) {
1607 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1608 if (host_pde[i].present) {
1609 if (host_pde[i].large_page) {
1610 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(host_pde[i]);
1611 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1613 if ((ret = callback(info, PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1617 vaddr += PAGE_SIZE_4MB;
1619 addr_t pte_pa = BASE_TO_PAGE_ADDR(host_pde[i].pt_base_addr);
1620 pte32_t * tmp_pte = (pte32_t *)V3_VAddr((void *)pte_pa);
1622 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1626 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1627 if (tmp_pte[j].present) {
1628 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1629 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
1634 vaddr += PAGE_SIZE_4KB;
1638 vaddr += PAGE_SIZE_4MB;
1648 int v3_walk_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3,
1649 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1650 void * private_data) {
1651 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1652 addr_t pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1658 PrintError("Callback was not specified\n");
1662 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, pdpe_pa, private_data)) != 0) {
1666 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1667 if (host_pdpe[i].present) {
1668 addr_t pde_pa = BASE_TO_PAGE_ADDR(host_pdpe[i].pd_base_addr);
1669 pde32pae_t * tmp_pde = (pde32pae_t *)V3_VAddr((void *)pde_pa);
1671 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1675 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1676 if (tmp_pde[j].present) {
1678 if (tmp_pde[j].large_page) {
1679 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1680 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1682 if ((ret = callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1686 vaddr += PAGE_SIZE_2MB;
1688 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1689 pte32pae_t * tmp_pte = (pte32pae_t *)V3_VAddr((void *)pte_pa);
1691 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1695 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1696 if (tmp_pte[k].present) {
1697 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1698 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
1703 vaddr += PAGE_SIZE_4KB;
1707 vaddr += PAGE_SIZE_2MB;
1711 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1718 int v3_walk_host_pt_64(struct guest_info * info, v3_reg_t host_cr3,
1719 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1720 void * private_data) {
1721 pml4e64_t * host_pml = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1722 addr_t pml_pa = CR3_TO_PML4E64_PA(host_cr3);
1728 PrintError("Callback was not specified\n");
1732 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)host_pml, pml_pa, private_data)) != 0) {
1736 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1737 if (host_pml[i].present) {
1738 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(host_pml[i].pdp_base_addr);
1739 pdpe64_t * tmp_pdpe = (pdpe64_t *)V3_VAddr((void *)pdpe_pa);
1741 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data)) != 0) {
1745 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1746 if (tmp_pdpe[j].present) {
1747 if (tmp_pdpe[j].large_page) {
1748 pdpe64_1GB_t * large_pdp = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1749 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_base_addr);
1751 if ((ret = callback(info, PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1755 vaddr += PAGE_SIZE_1GB;
1757 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1758 pde64_t * tmp_pde = (pde64_t *)V3_VAddr((void *)pde_pa);
1760 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1764 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1765 if (tmp_pde[k].present) {
1766 if (tmp_pde[k].large_page) {
1767 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1768 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1770 if ((ret = callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1774 vaddr += PAGE_SIZE_2MB;
1776 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1777 pte64_t * tmp_pte = (pte64_t *)V3_VAddr((void *)pte_pa);
1779 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1783 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1784 if (tmp_pte[m].present) {
1785 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1786 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
1790 vaddr += PAGE_SIZE_4KB;
1794 vaddr += PAGE_SIZE_2MB;
1799 vaddr += PAGE_SIZE_1GB;
1803 vaddr += (ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES;
1811 static const uchar_t PAGE_4KB_STR[] = "4KB_PAGE";
1812 static const uchar_t PAGE_2MB_STR[] = "2MB_PAGE";
1813 static const uchar_t PAGE_4MB_STR[] = "4MB_PAGE";
1814 static const uchar_t PAGE_1GB_STR[] = "1GB_PAGE";
1815 static const uchar_t PAGE_PT32_STR[] = "32 Bit PT";
1816 static const uchar_t PAGE_PD32_STR[] = "32 Bit PD";
1817 static const uchar_t PAGE_PDP32PAE_STR[] = "32 Bit PAE PDP";
1818 static const uchar_t PAGE_PD32PAE_STR[] = "32 Bit PAE PD";
1819 static const uchar_t PAGE_PT32PAE_STR[] = "32 Bit PAE PT";
1820 static const uchar_t PAGE_PML464_STR[] = "64 Bit PML4";
1821 static const uchar_t PAGE_PDP64_STR[] = "64 Bit PDP";
1822 static const uchar_t PAGE_PD64_STR[] = "64 Bit PD";
1823 static const uchar_t PAGE_PT64_STR[] = "64 Bit PT";
1826 const uchar_t * v3_page_type_to_str(page_type_t type) {
1829 return PAGE_4KB_STR;
1831 return PAGE_2MB_STR;
1833 return PAGE_4MB_STR;
1835 return PAGE_1GB_STR;
1837 return PAGE_PT32_STR;
1839 return PAGE_PD32_STR;
1841 return PAGE_PDP32PAE_STR;
1843 return PAGE_PD32PAE_STR;
1845 return PAGE_PT32PAE_STR;
1847 return PAGE_PML464_STR;
1849 return PAGE_PDP64_STR;
1851 return PAGE_PD64_STR;
1853 return PAGE_PT64_STR;