2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
27 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry);
28 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry);
30 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry);
31 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry);
32 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry);
34 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry);
35 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry);
36 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry);
37 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry);
42 #define USE_VMM_PAGING_DEBUG
43 // All of the debug functions defined in vmm_paging.h are implemented in this file
44 #include "vmm_paging_debug.h"
45 #undef USE_VMM_PAGING_DEBUG
49 #ifndef V3_CONFIG_DEBUG_SHADOW_PAGING
51 #define PrintDebug(fmt, args...)
56 void delete_page_tables_32(pde32_t * pde) {
63 PrintDebug("Deleting Page Tables (32) -- PDE (%p)\n", pde);
65 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
66 if ((pde[i].present == 1) && (pde[i].large_page == 0)) {
67 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
69 PrintDebug("Deleting PT Page %d (%p)\n", i, (void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[i].pt_base_addr));
70 V3_FreePages((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[i].pt_base_addr), 1);
74 V3_FreePages(V3_PAddr(pde), 1);
77 void delete_page_tables_32pae(pdpe32pae_t * pdpe) {
84 PrintDebug("Deleting Page Tables (32 PAE) -- PDPE (%p)\n", pdpe);
86 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
87 if (pdpe[i].present == 0) {
91 pde32pae_t * pde = (pde32pae_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pdpe[i].pd_base_addr));
93 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
95 if ((pde[j].present == 0) || (pde[j].large_page == 1)) {
99 V3_FreePages((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[j].pt_base_addr), 1);
102 V3_FreePages(V3_PAddr(pde), 1);
105 V3_FreePages(V3_PAddr(pdpe), 1);
108 void delete_page_tables_64(pml4e64_t * pml4) {
115 PrintDebug("Deleting Page Tables (64) -- PML4 (%p)\n", pml4);
117 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
118 if (pml4[i].present == 0) {
122 pdpe64_t * pdpe = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pml4[i].pdp_base_addr));
124 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
125 if ((pdpe[j].present == 0) || (pdpe[j].large_page == 1)) {
129 pde64_t * pde = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pdpe[j].pd_base_addr));
131 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
132 if ((pde[k].present == 0) || (pde[k].large_page == 1)) {
136 V3_FreePages((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[k].pt_base_addr), 1);
139 V3_FreePages(V3_PAddr(pde), 1);
142 V3_FreePages(V3_PAddr(pdpe), 1);
145 V3_FreePages(V3_PAddr(pml4), 1);
151 static int translate_pt_32_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
152 addr_t * paddr = (addr_t *)private_data;
159 *paddr = page_pa + PAGE_OFFSET_4MB(vaddr);
162 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
165 PrintError("Inavlid page type (%s) in tranlate pt 32 callback\n", v3_page_type_to_str(type));
170 static int translate_pt_32pae_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
171 addr_t * paddr = (addr_t *)private_data;
179 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
182 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
185 PrintError("Inavlid page type (%s) in translate pt 32pae callback\n", v3_page_type_to_str(type));
190 static int translate_pt_64_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
191 addr_t * paddr = (addr_t *)private_data;
200 *paddr = page_pa + PAGE_OFFSET_1GB(vaddr);
203 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
206 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
209 PrintError("Inavlid page type (%s) in translate pt 64 callback\n", v3_page_type_to_str(type));
215 int v3_translate_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
216 return v3_drill_host_pt_32(info, host_cr3, vaddr, translate_pt_32_cb, paddr);
218 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
219 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, translate_pt_32_cb, paddr);
223 int v3_translate_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
224 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, translate_pt_32pae_cb, paddr);
226 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
227 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, translate_pt_32pae_cb, paddr);
231 int v3_translate_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
232 return v3_drill_host_pt_64(info, host_cr3, vaddr, translate_pt_64_cb, paddr);
234 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
235 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, translate_pt_64_cb, paddr);
240 struct pt_find_data {
242 addr_t * pt_page_ptr;
246 static int find_pt_cb(struct guest_info * info, page_type_t type, addr_t vaddr,
247 addr_t page_ptr, addr_t page_pa, void * private_data) {
248 struct pt_find_data * pt_data = (struct pt_find_data *)private_data;
250 PrintDebug("FIND_PT Type=%s, page_pa = %p\n",
251 v3_page_type_to_str(type),
254 if (type == pt_data->type) {
255 *(pt_data->pt_page_ptr) = page_ptr;
256 *(pt_data->pt_page_pa) = page_pa;
264 int v3_find_host_pt_32_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
265 addr_t * page_ptr, addr_t * page_pa) {
266 struct pt_find_data data;
269 data.pt_page_ptr = page_ptr;
270 data.pt_page_pa = page_pa;
272 return v3_drill_host_pt_32(info, host_cr3, vaddr, find_pt_cb, &data);
275 int v3_find_host_pt_32pae_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
276 addr_t * page_ptr, addr_t * page_pa) {
277 struct pt_find_data data;
280 data.pt_page_ptr = page_ptr;
281 data.pt_page_pa = page_pa;
283 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, find_pt_cb, &data);
286 int v3_find_host_pt_64_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
287 addr_t * page_ptr, addr_t * page_pa) {
288 struct pt_find_data data;
291 data.pt_page_ptr = page_ptr;
292 data.pt_page_pa = page_pa;
294 return v3_drill_host_pt_64(info, host_cr3, vaddr, find_pt_cb, &data);
296 int v3_find_guest_pt_32_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
297 addr_t * page_ptr, addr_t * page_pa) {
298 struct pt_find_data data;
301 data.pt_page_ptr = page_ptr;
302 data.pt_page_pa = page_pa;
304 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, find_pt_cb, &data);
307 int v3_find_guest_pt_32pae_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
308 addr_t * page_ptr, addr_t * page_pa) {
309 struct pt_find_data data;
312 data.pt_page_ptr = page_ptr;
313 data.pt_page_pa = page_pa;
315 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, find_pt_cb, &data);
318 int v3_find_guest_pt_64_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
319 addr_t * page_ptr, addr_t * page_pa) {
320 struct pt_find_data data;
323 data.pt_page_ptr = page_ptr;
324 data.pt_page_pa = page_pa;
326 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, find_pt_cb, &data);
332 * Page Table Access Checks
337 struct pt_check_data {
338 pf_error_t access_type;
339 pt_access_status_t * access_status;
342 static int check_pt_32_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
343 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
347 *(chk_data->access_status) = v3_can_access_pde32((pde32_t *)page_ptr, vaddr, chk_data->access_type);
350 *(chk_data->access_status) = v3_can_access_pte32((pte32_t *)page_ptr, vaddr, chk_data->access_type);
356 PrintError("Inavlid page type (%s) in check pt 32 callback\n", v3_page_type_to_str(type));
360 if (chk_data->access_status != PT_ACCESS_OK) {
368 static int check_pt_32pae_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
369 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
373 *(chk_data->access_status) = v3_can_access_pdpe32pae((pdpe32pae_t *)page_ptr, vaddr, chk_data->access_type);
376 *(chk_data->access_status) = v3_can_access_pde32pae((pde32pae_t *)page_ptr, vaddr, chk_data->access_type);
379 *(chk_data->access_status) = v3_can_access_pte32pae((pte32pae_t *)page_ptr, vaddr, chk_data->access_type);
385 PrintError("Inavlid page type (%s) in check pt 32pae callback\n", v3_page_type_to_str(type));
389 if (chk_data->access_status != PT_ACCESS_OK) {
397 static int check_pt_64_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
398 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
402 *(chk_data->access_status) = v3_can_access_pml4e64((pml4e64_t *)page_ptr, vaddr, chk_data->access_type);
405 *(chk_data->access_status) = v3_can_access_pdpe64((pdpe64_t *)page_ptr, vaddr, chk_data->access_type);
408 *(chk_data->access_status) = v3_can_access_pde64((pde64_t *)page_ptr, vaddr, chk_data->access_type);
411 *(chk_data->access_status) = v3_can_access_pte64((pte64_t *)page_ptr, vaddr, chk_data->access_type);
418 PrintError("Inavlid page type (%s) in check pt 64 callback\n", v3_page_type_to_str(type));
422 if (chk_data->access_status != PT_ACCESS_OK) {
431 int v3_check_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
432 struct pt_check_data access_data;
434 access_data.access_type = access_type;
435 access_data.access_status = access_status;
437 return v3_drill_host_pt_32(info, host_cr3, vaddr, check_pt_32_cb, &access_data);
440 int v3_check_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
441 struct pt_check_data access_data;
443 access_data.access_type = access_type;
444 access_data.access_status = access_status;
446 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, check_pt_32pae_cb, &access_data);
451 int v3_check_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
452 struct pt_check_data access_data;
454 access_data.access_type = access_type;
455 access_data.access_status = access_status;
457 return v3_drill_host_pt_64(info, host_cr3, vaddr, check_pt_64_cb, &access_data);
462 int v3_check_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
463 pf_error_t access_type, pt_access_status_t * access_status) {
464 struct pt_check_data access_data;
466 access_data.access_type = access_type;
467 access_data.access_status = access_status;
469 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, check_pt_32_cb, &access_data);
476 int v3_check_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
477 pf_error_t access_type, pt_access_status_t * access_status) {
478 struct pt_check_data access_data;
480 access_data.access_type = access_type;
481 access_data.access_status = access_status;
483 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, check_pt_32pae_cb, &access_data);
488 int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
489 pf_error_t access_type, pt_access_status_t * access_status) {
490 struct pt_check_data access_data;
492 access_data.access_type = access_type;
493 access_data.access_status = access_status;
495 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, check_pt_64_cb, &access_data);
499 static int get_data_page_type_cb(struct guest_info * info, page_type_t type,
500 addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
514 page_type_t v3_get_guest_data_page_type_32(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
515 return v3_drill_guest_pt_32(info, cr3, vaddr, get_data_page_type_cb, NULL);
517 page_type_t v3_get_guest_data_page_type_32pae(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
518 return v3_drill_guest_pt_32pae(info, cr3, vaddr, get_data_page_type_cb, NULL);
520 page_type_t v3_get_guest_data_page_type_64(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
521 return v3_drill_guest_pt_64(info, cr3, vaddr, get_data_page_type_cb, NULL);
523 page_type_t v3_get_host_data_page_type_32(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
524 return v3_drill_host_pt_32(info, cr3, vaddr, get_data_page_type_cb, NULL);
526 page_type_t v3_get_host_data_page_type_32pae(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
527 return v3_drill_host_pt_32pae(info, cr3, vaddr, get_data_page_type_cb, NULL);
529 page_type_t v3_get_host_data_page_type_64(struct guest_info * info, v3_reg_t cr3, addr_t vaddr) {
530 return v3_drill_host_pt_64(info, cr3, vaddr, get_data_page_type_cb, NULL);
535 * PAGE TABLE LOOKUP FUNCTIONS
537 * The value of entry is a return type:
538 * Page not present: *entry = 0
543 * 32 bit Page Table lookup functions
547 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
548 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
550 if (!pde_entry->present) {
552 return PT_ENTRY_NOT_PRESENT;
553 } else if (pde_entry->large_page) {
554 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
556 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
558 return PT_ENTRY_LARGE_PAGE;
560 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
561 return PT_ENTRY_PAGE;
567 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
569 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
570 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
572 if (!pte_entry->present) {
574 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
575 return PT_ENTRY_NOT_PRESENT;
577 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
579 return PT_ENTRY_PAGE;
588 * 32 bit PAE Page Table lookup functions
591 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
592 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
594 if (!pdpe_entry->present) {
596 return PT_ENTRY_NOT_PRESENT;
598 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
599 return PT_ENTRY_PAGE;
603 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
604 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
606 if (!pde_entry->present) {
608 return PT_ENTRY_NOT_PRESENT;
609 } else if (pde_entry->large_page) {
610 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
612 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
614 return PT_ENTRY_LARGE_PAGE;
616 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
617 return PT_ENTRY_PAGE;
621 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
622 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
624 if (!pte_entry->present) {
626 return PT_ENTRY_NOT_PRESENT;
628 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
629 return PT_ENTRY_PAGE;
637 * 64 bit Page Table lookup functions
640 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
641 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
643 if (!pml_entry->present) {
645 return PT_ENTRY_NOT_PRESENT;
647 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
648 return PT_ENTRY_PAGE;
652 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
653 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
655 if (!pdpe_entry->present) {
657 return PT_ENTRY_NOT_PRESENT;
658 } else if (pdpe_entry->large_page) {
659 PrintError("1 Gigabyte pages not supported\n");
663 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
664 return PT_ENTRY_PAGE;
668 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
669 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
671 if (!pde_entry->present) {
673 return PT_ENTRY_NOT_PRESENT;
674 } else if (pde_entry->large_page) {
675 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
677 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
679 return PT_ENTRY_LARGE_PAGE;
681 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
682 return PT_ENTRY_PAGE;
686 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
687 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
689 if (!pte_entry->present) {
691 return PT_ENTRY_NOT_PRESENT;
693 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
694 return PT_ENTRY_PAGE;
701 static pt_access_status_t can_access_pt_entry(gen_pt_t * pt, pf_error_t access_type) {
702 if (pt->present == 0) {
703 return PT_ACCESS_NOT_PRESENT;
704 } else if ((pt->writable == 0) && (access_type.write == 1)) {
705 return PT_ACCESS_WRITE_ERROR;
706 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
708 return PT_ACCESS_USER_ERROR;
717 * 32 bit access checks
719 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
720 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
721 return can_access_pt_entry(entry, access_type);
724 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
725 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
726 return can_access_pt_entry(entry, access_type);
731 * 32 bit PAE access checks
733 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
734 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
735 return can_access_pt_entry(entry, access_type);
738 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
739 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
740 return can_access_pt_entry(entry, access_type);
743 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
744 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
745 return can_access_pt_entry(entry, access_type);
749 * 64 Bit access checks
751 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
752 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
753 return can_access_pt_entry(entry, access_type);
756 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
757 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
758 return can_access_pt_entry(entry, access_type);
761 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
762 gen_pt_t * entry = (gen_pt_t *)&pde[PDE64_INDEX(addr)];
763 return can_access_pt_entry(entry, access_type);
766 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
767 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
768 return can_access_pt_entry(entry, access_type);
776 int v3_drill_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
777 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
778 void * private_data) {
779 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
780 addr_t host_pde_pa = CR3_TO_PDE32_PA(host_cr3);
781 addr_t host_pte_pa = 0;
785 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)host_pde, host_pde_pa, private_data)) != 0) {
786 return (ret == -1) ? -1 : PAGE_PD32;
789 switch (pde32_lookup(host_pde, vaddr, &host_pte_pa)) {
790 case PT_ENTRY_NOT_PRESENT:
792 case PT_ENTRY_LARGE_PAGE:
793 if ((ret == callback(info, PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
794 return (ret == -1) ? -1 : PAGE_4MB;
798 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
799 return (ret == -1) ? -1 : PAGE_PT32;
802 if (pte32_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
805 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
806 return (ret == -1) ? -1 : PAGE_4KB;
816 int v3_drill_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
817 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
818 void * private_data) {
819 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
820 addr_t host_pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
821 addr_t host_pde_pa = 0;
822 addr_t host_pte_pa = 0;
826 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, host_pdpe_pa, private_data)) != 0) {
827 return (ret == -1) ? -1 : PAGE_PDP32PAE;
830 switch (pdpe32pae_lookup(host_pdpe, vaddr, &host_pde_pa)) {
831 case PT_ENTRY_NOT_PRESENT:
835 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
836 return (ret == -1) ? -1 : PAGE_PD32PAE;
839 switch (pde32pae_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
840 case PT_ENTRY_NOT_PRESENT:
842 case PT_ENTRY_LARGE_PAGE:
843 if ((ret == callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
844 return (ret == -1) ? -1 : PAGE_2MB;
848 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
849 return (ret == -1) ? -1 : PAGE_PT32PAE;
852 if (pte32pae_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
855 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
856 return (ret == -1) ? -1 : PAGE_4KB;
865 // should never get here
870 int v3_drill_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
871 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
872 void * private_data) {
873 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
874 addr_t host_pmle_pa = CR3_TO_PML4E64_PA(host_cr3);
875 addr_t host_pdpe_pa = 0;
876 addr_t host_pde_pa = 0;
877 addr_t host_pte_pa = 0;
881 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)host_pmle, host_pmle_pa, private_data)) != 0) {
882 return (ret == -1) ? -1 : PAGE_PML464;
885 switch(pml4e64_lookup(host_pmle, vaddr, &host_pdpe_pa)) {
886 case PT_ENTRY_NOT_PRESENT:
890 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)V3_VAddr((void *)host_pdpe_pa), host_pdpe_pa, private_data)) != 0) {
891 return (ret == -1) ? -1 : PAGE_PDP64;
894 switch(pdpe64_lookup(V3_VAddr((void *)host_pdpe_pa), vaddr, &host_pde_pa)) {
895 case PT_ENTRY_NOT_PRESENT:
897 case PT_ENTRY_LARGE_PAGE:
898 if ((ret == callback(info, PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data)) != 0) {
899 return (ret == -1) ? -1 : PAGE_1GB;
901 PrintError("1 Gigabyte Pages not supported\n");
905 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
906 return (ret == -1) ? -1 : PAGE_PD64;
909 switch (pde64_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
910 case PT_ENTRY_NOT_PRESENT:
912 case PT_ENTRY_LARGE_PAGE:
913 if ((ret == callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
914 return (ret == -1) ? -1 : PAGE_2MB;
919 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
920 return (ret == -1) ? -1 : PAGE_PT64;
923 if (pte64_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
926 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
927 return (ret == -1) ? -1 : PAGE_4KB;
936 // should never get here
946 int v3_drill_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
947 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
948 void * private_data) {
949 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
950 pde32_t * guest_pde = NULL;
951 addr_t guest_pte_pa = 0;
955 if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
956 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
957 (void *)guest_pde_pa);
961 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
962 return (ret == -1) ? -1 : PAGE_PD32;
965 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
966 case PT_ENTRY_NOT_PRESENT:
968 case PT_ENTRY_LARGE_PAGE:
970 addr_t large_page_pa = (addr_t)guest_pte_pa;
971 addr_t large_page_va = 0;
973 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
978 if ((ret == callback(info, PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
979 return (ret == -1) ? -1 : PAGE_4MB;
985 pte32_t * guest_pte = NULL;
988 if (v3_gpa_to_hva(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
989 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
990 (void *)guest_pte_pa);
994 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
995 return (ret == -1) ? -1 : PAGE_PT32;
998 if (pte32_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1003 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1007 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1008 return (ret == -1) ? -1 : PAGE_4KB;
1015 // should never get here
1016 PrintError("End of drill function (guest 32)... Should never have gotten here...\n");
1022 int v3_drill_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1023 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1024 void * private_data) {
1025 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1026 pdpe32pae_t * guest_pdpe = 0;
1027 addr_t guest_pde_pa = 0;
1030 if (v3_gpa_to_hva(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
1031 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1032 (void *)guest_pdpe_pa);
1036 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1037 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1040 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
1042 case PT_ENTRY_NOT_PRESENT:
1046 pde32pae_t * guest_pde = NULL;
1047 addr_t guest_pte_pa = 0;
1049 if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1050 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
1051 (void *)guest_pde_pa);
1055 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1056 return (ret == -1) ? -1 : PAGE_PD32PAE;
1059 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
1061 case PT_ENTRY_NOT_PRESENT:
1063 case PT_ENTRY_LARGE_PAGE:
1065 addr_t large_page_pa = (addr_t)guest_pte_pa;
1066 addr_t large_page_va = 0;
1068 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1072 if ((ret == callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1073 return (ret == -1) ? -1 : PAGE_2MB;
1079 pte32pae_t * guest_pte = NULL;
1082 if (v3_gpa_to_hva(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1083 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
1084 (void *)guest_pte_pa);
1088 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1089 return (ret == -1) ? -1 : PAGE_PT32PAE;
1092 if (pte32pae_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1097 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1101 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1102 return (ret == -1) ? -1 : PAGE_4KB;
1110 PrintError("Invalid page type for PD32PAE\n");
1114 // should never get here
1115 PrintError("End of drill function (guest 32pae)... Should never have gotten here...\n");
1119 int v3_drill_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1120 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1121 void * private_data) {
1122 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
1123 pml4e64_t * guest_pmle = 0;
1124 addr_t guest_pdpe_pa = 0;
1127 if (v3_gpa_to_hva(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
1128 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
1129 (void *)guest_pml4_pa);
1133 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)guest_pmle, guest_pml4_pa, private_data)) != 0) {
1134 return (ret == -1) ? -1 : PAGE_PML464;
1137 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
1138 case PT_ENTRY_NOT_PRESENT:
1142 pdpe64_t * guest_pdp = NULL;
1143 addr_t guest_pde_pa = 0;
1145 if (v3_gpa_to_hva(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
1146 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1147 (void *)guest_pdpe_pa);
1151 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)guest_pdp, guest_pdpe_pa, private_data)) != 0) {
1152 return (ret == -1) ? -1 : PAGE_PDP64;
1155 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
1156 case PT_ENTRY_NOT_PRESENT:
1158 case PT_ENTRY_LARGE_PAGE:
1160 addr_t large_page_pa = (addr_t)guest_pde_pa;
1161 addr_t large_page_va = 0;
1163 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1167 if ((ret == callback(info, PAGE_1GB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1168 return (ret == -1) ? -1 : PAGE_1GB;
1170 PrintError("1 Gigabyte Pages not supported\n");
1175 pde64_t * guest_pde = NULL;
1176 addr_t guest_pte_pa = 0;
1178 if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1179 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
1180 (void *)guest_pde_pa);
1184 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1185 return (ret == -1) ? -1 : PAGE_PD64;
1188 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1189 case PT_ENTRY_NOT_PRESENT:
1191 case PT_ENTRY_LARGE_PAGE:
1193 addr_t large_page_pa = (addr_t)guest_pte_pa;
1194 addr_t large_page_va = 0;
1196 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1200 if ((ret == callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1201 return (ret == -1) ? -1 : PAGE_2MB;
1207 pte64_t * guest_pte = NULL;
1210 if (v3_gpa_to_hva(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1211 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
1212 (void *)guest_pte_pa);
1216 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1217 return (ret == -1) ? -1 : PAGE_PT64;
1220 if (pte64_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1225 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1229 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1230 return (ret == -1) ? -1 : PAGE_4KB;
1244 // should never get here
1245 PrintError("End of drill function (guest 64)... Should never have gotten here...\n");
1252 int v3_walk_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3,
1253 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1254 void * private_data) {
1255 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1256 pde32_t * guest_pde = NULL;
1262 PrintError("Call back was not specified\n");
1266 if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1267 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1268 (void *)guest_pde_pa);
1272 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1276 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1277 if (guest_pde[i].present) {
1278 if (guest_pde[i].large_page) {
1279 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(guest_pde[i]);
1280 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1281 addr_t large_page_va = 0;
1283 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1284 PrintDebug("Could not get virtual address of Guest 4MB Page (PA=%p)\n",
1285 (void *)large_page_pa);
1286 // We'll let it through for data pages because they may be unmapped or hooked
1290 if ((ret = callback(info, PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1294 vaddr += PAGE_SIZE_4MB;
1296 addr_t pte_pa = BASE_TO_PAGE_ADDR(guest_pde[i].pt_base_addr);
1297 pte32_t * tmp_pte = NULL;
1299 if (v3_gpa_to_hva(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1300 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1305 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1309 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1310 if (tmp_pte[j].present) {
1311 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1314 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1315 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1317 // We'll let it through for data pages because they may be unmapped or hooked
1321 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1326 vaddr += PAGE_SIZE_4KB;
1330 vaddr += PAGE_SIZE_4MB;
1337 int v3_walk_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3,
1338 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1339 void * private_data) {
1340 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1341 pdpe32pae_t * guest_pdpe = NULL;
1347 PrintError("Call back was not specified\n");
1351 if (v3_gpa_to_hva(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
1352 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1353 (void *)guest_pdpe_pa);
1358 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1362 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1363 if (guest_pdpe[i].present) {
1364 addr_t pde_pa = BASE_TO_PAGE_ADDR(guest_pdpe[i].pd_base_addr);
1365 pde32pae_t * tmp_pde = NULL;
1367 if (v3_gpa_to_hva(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1368 PrintError("Could not get virtual address of Guest PDE32PAE (PA=%p)\n",
1373 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1377 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1378 if (tmp_pde[j].present) {
1379 if (tmp_pde[j].large_page) {
1380 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1381 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1382 addr_t large_page_va = 0;
1384 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1385 PrintDebug("Could not get virtual address of Guest 2MB Page (PA=%p)\n",
1386 (void *)large_page_pa);
1387 // We'll let it through for data pages because they may be unmapped or hooked
1391 if ((ret = callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1395 vaddr += PAGE_SIZE_2MB;
1397 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1398 pte32pae_t * tmp_pte = NULL;
1400 if (v3_gpa_to_hva(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1401 PrintError("Could not get virtual address of Guest PTE32PAE (PA=%p)\n",
1406 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1410 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1411 if (tmp_pte[k].present) {
1412 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1415 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1416 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1418 // We'll let it through for data pages because they may be unmapped or hooked
1422 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1427 vaddr += PAGE_SIZE_4KB;
1431 vaddr += PAGE_SIZE_2MB;
1435 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1444 int v3_walk_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3,
1445 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1446 void * private_data) {
1447 addr_t guest_pml_pa = CR3_TO_PML4E64_PA(guest_cr3);
1448 pml4e64_t * guest_pml = NULL;
1454 PrintError("Call back was not specified\n");
1458 if (v3_gpa_to_hva(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
1459 PrintError("Could not get virtual address of Guest PML464 (PA=%p)\n",
1465 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)guest_pml, guest_pml_pa, private_data)) != 0) {
1469 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1470 if (guest_pml[i].present) {
1471 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(guest_pml[i].pdp_base_addr);
1472 pdpe64_t * tmp_pdpe = NULL;
1475 if (v3_gpa_to_hva(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
1476 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1481 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data)) != 0) {
1485 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1486 if (tmp_pdpe[j].present) {
1487 if (tmp_pdpe[j].large_page) {
1488 pdpe64_1GB_t * large_pdpe = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1489 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdpe->page_base_addr);
1490 addr_t large_page_va = 0;
1492 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1493 PrintDebug("Could not get virtual address of Guest 1GB page (PA=%p)\n",
1494 (void *)large_page_pa);
1495 // We'll let it through for data pages because they may be unmapped or hooked
1499 if ((ret = callback(info, PAGE_1GB, vaddr, (addr_t)large_page_va, large_page_pa, private_data)) != 0) {
1503 vaddr += PAGE_SIZE_1GB;
1505 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1506 pde64_t * tmp_pde = NULL;
1508 if (v3_gpa_to_hva(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1509 PrintError("Could not get virtual address of Guest PDE64 (PA=%p)\n",
1514 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1518 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1519 if (tmp_pde[k].present) {
1520 if (tmp_pde[k].large_page) {
1521 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1522 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1523 addr_t large_page_va = 0;
1525 if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
1526 PrintDebug("Could not get virtual address of Guest 2MB page (PA=%p)\n",
1527 (void *)large_page_pa);
1528 // We'll let it through for data pages because they may be unmapped or hooked
1532 if ((ret = callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1536 vaddr += PAGE_SIZE_2MB;
1538 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1539 pte64_t * tmp_pte = NULL;
1541 if (v3_gpa_to_hva(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1542 PrintError("Could not get virtual address of Guest PTE64 (PA=%p)\n",
1547 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1551 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1552 if (tmp_pte[m].present) {
1553 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1556 if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
1557 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1559 // We'll let it through for data pages because they may be unmapped or hooked
1563 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1568 vaddr += PAGE_SIZE_4KB;
1572 vaddr += PAGE_SIZE_2MB;
1577 vaddr += PAGE_SIZE_1GB;
1581 vaddr += ((ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES);
1587 int v3_walk_host_pt_32(struct guest_info * info, v3_reg_t host_cr3,
1588 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1589 void * private_data) {
1590 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1591 addr_t pde_pa = CR3_TO_PDE32_PA(host_cr3);
1597 PrintError("Call back was not specified\n");
1601 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)host_pde, pde_pa, private_data)) != 0) {
1605 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1606 if (host_pde[i].present) {
1607 if (host_pde[i].large_page) {
1608 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(host_pde[i]);
1609 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1611 if ((ret = callback(info, PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1615 vaddr += PAGE_SIZE_4MB;
1617 addr_t pte_pa = BASE_TO_PAGE_ADDR(host_pde[i].pt_base_addr);
1618 pte32_t * tmp_pte = (pte32_t *)V3_VAddr((void *)pte_pa);
1620 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1624 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1625 if (tmp_pte[j].present) {
1626 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1627 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
1632 vaddr += PAGE_SIZE_4KB;
1636 vaddr += PAGE_SIZE_4MB;
1646 int v3_walk_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3,
1647 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1648 void * private_data) {
1649 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1650 addr_t pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1656 PrintError("Callback was not specified\n");
1660 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, pdpe_pa, private_data)) != 0) {
1664 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1665 if (host_pdpe[i].present) {
1666 addr_t pde_pa = BASE_TO_PAGE_ADDR(host_pdpe[i].pd_base_addr);
1667 pde32pae_t * tmp_pde = (pde32pae_t *)V3_VAddr((void *)pde_pa);
1669 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1673 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1674 if (tmp_pde[j].present) {
1676 if (tmp_pde[j].large_page) {
1677 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1678 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1680 if ((ret = callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1684 vaddr += PAGE_SIZE_2MB;
1686 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1687 pte32pae_t * tmp_pte = (pte32pae_t *)V3_VAddr((void *)pte_pa);
1689 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1693 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1694 if (tmp_pte[k].present) {
1695 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1696 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
1701 vaddr += PAGE_SIZE_4KB;
1705 vaddr += PAGE_SIZE_2MB;
1709 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1716 int v3_walk_host_pt_64(struct guest_info * info, v3_reg_t host_cr3,
1717 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1718 void * private_data) {
1719 pml4e64_t * host_pml = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1720 addr_t pml_pa = CR3_TO_PML4E64_PA(host_cr3);
1726 PrintError("Callback was not specified\n");
1730 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)host_pml, pml_pa, private_data)) != 0) {
1734 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1735 if (host_pml[i].present) {
1736 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(host_pml[i].pdp_base_addr);
1737 pdpe64_t * tmp_pdpe = (pdpe64_t *)V3_VAddr((void *)pdpe_pa);
1739 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data)) != 0) {
1743 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1744 if (tmp_pdpe[j].present) {
1745 if (tmp_pdpe[j].large_page) {
1746 pdpe64_1GB_t * large_pdp = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1747 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_base_addr);
1749 if ((ret = callback(info, PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1753 vaddr += PAGE_SIZE_1GB;
1755 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1756 pde64_t * tmp_pde = (pde64_t *)V3_VAddr((void *)pde_pa);
1758 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1762 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1763 if (tmp_pde[k].present) {
1764 if (tmp_pde[k].large_page) {
1765 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1766 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1768 if ((ret = callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1772 vaddr += PAGE_SIZE_2MB;
1774 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1775 pte64_t * tmp_pte = (pte64_t *)V3_VAddr((void *)pte_pa);
1777 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1781 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1782 if (tmp_pte[m].present) {
1783 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1784 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
1788 vaddr += PAGE_SIZE_4KB;
1792 vaddr += PAGE_SIZE_2MB;
1797 vaddr += PAGE_SIZE_1GB;
1801 vaddr += (ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES;
1809 static const uchar_t PAGE_4KB_STR[] = "4KB_PAGE";
1810 static const uchar_t PAGE_2MB_STR[] = "2MB_PAGE";
1811 static const uchar_t PAGE_4MB_STR[] = "4MB_PAGE";
1812 static const uchar_t PAGE_1GB_STR[] = "1GB_PAGE";
1813 static const uchar_t PAGE_PT32_STR[] = "32 Bit PT";
1814 static const uchar_t PAGE_PD32_STR[] = "32 Bit PD";
1815 static const uchar_t PAGE_PDP32PAE_STR[] = "32 Bit PAE PDP";
1816 static const uchar_t PAGE_PD32PAE_STR[] = "32 Bit PAE PD";
1817 static const uchar_t PAGE_PT32PAE_STR[] = "32 Bit PAE PT";
1818 static const uchar_t PAGE_PML464_STR[] = "64 Bit PML4";
1819 static const uchar_t PAGE_PDP64_STR[] = "64 Bit PDP";
1820 static const uchar_t PAGE_PD64_STR[] = "64 Bit PD";
1821 static const uchar_t PAGE_PT64_STR[] = "64 Bit PT";
1824 const uchar_t * v3_page_type_to_str(page_type_t type) {
1827 return PAGE_4KB_STR;
1829 return PAGE_2MB_STR;
1831 return PAGE_4MB_STR;
1833 return PAGE_1GB_STR;
1835 return PAGE_PT32_STR;
1837 return PAGE_PD32_STR;
1839 return PAGE_PDP32PAE_STR;
1841 return PAGE_PD32PAE_STR;
1843 return PAGE_PT32PAE_STR;
1845 return PAGE_PML464_STR;
1847 return PAGE_PDP64_STR;
1849 return PAGE_PD64_STR;
1851 return PAGE_PT64_STR;