2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
28 #define USE_VMM_PAGING_DEBUG
29 // All of the debug functions defined in vmm_paging.h are implemented in this file
30 #include "vmm_paging_debug.h"
31 #undef USE_VMM_PAGING_DEBUG
34 void delete_page_tables_32(pde32_t * pde) {
41 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
43 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
44 PrintDebug("PTE base addr %x \n", pde[i].pt_base_addr);
45 pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
47 PrintDebug("Deleting PTE %d (%p)\n", i, pte);
52 PrintDebug("Deleting PDE (%p)\n", pde);
53 V3_FreePage(V3_PAddr(pde));
56 void delete_page_tables_32PAE(pdpe32pae_t * pdpe) {
57 PrintError("Unimplemented function\n");
60 void delete_page_tables_64(pml4e64_t * pml4) {
61 PrintError("Unimplemented function\n");
65 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
66 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
67 pde32_t * guest_pde = 0;
68 addr_t guest_pte_pa = 0;
70 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t*)&guest_pde) == -1) {
71 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
72 (void *)guest_pde_pa);
76 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
77 case PT_ENTRY_NOT_PRESENT:
80 case PT_ENTRY_LARGE_PAGE:
81 *paddr = guest_pte_pa;
85 pte32_t * guest_pte = NULL;
87 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
88 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
89 (void *)guest_pte_pa);
93 if (pte32_lookup(guest_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
101 // should never get here
106 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
107 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
108 pdpe32pae_t * guest_pdpe = 0;
109 addr_t guest_pde_pa = 0;
111 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
112 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
113 (void *)guest_pdpe_pa);
117 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
119 case PT_ENTRY_NOT_PRESENT:
124 pde32pae_t * guest_pde = NULL;
125 addr_t guest_pte_pa = 0;
127 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
128 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
129 (void *)guest_pde_pa);
133 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
135 case PT_ENTRY_NOT_PRESENT:
138 case PT_ENTRY_LARGE_PAGE:
139 *paddr = guest_pte_pa;
143 pte32pae_t * guest_pte = NULL;
145 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
146 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
147 (void *)guest_pte_pa);
151 if (pte32pae_lookup(guest_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
163 // should never get here
167 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
168 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
169 pml4e64_t * guest_pmle = 0;
170 addr_t guest_pdpe_pa = 0;
172 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
173 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
174 (void *)guest_pml4_pa);
178 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
179 case PT_ENTRY_NOT_PRESENT:
184 pdpe64_t * guest_pdp = NULL;
185 addr_t guest_pde_pa = 0;
187 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
188 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
189 (void *)guest_pdpe_pa);
193 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
194 case PT_ENTRY_NOT_PRESENT:
197 case PT_ENTRY_LARGE_PAGE:
199 PrintError("1 Gigabyte Pages not supported\n");
203 pde64_t * guest_pde = NULL;
204 addr_t guest_pte_pa = 0;
206 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
207 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
208 (void *)guest_pde_pa);
212 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
213 case PT_ENTRY_NOT_PRESENT:
216 case PT_ENTRY_LARGE_PAGE:
217 *paddr = guest_pte_pa;
221 pte64_t * guest_pte = NULL;
223 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
224 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
225 (void *)guest_pte_pa);
229 if (pte64_lookup(guest_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
243 // should never get here
249 int v3_translate_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
250 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
251 pte32_t * host_pte = 0;
253 switch (pde32_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
254 case PT_ENTRY_NOT_PRESENT:
257 case PT_ENTRY_LARGE_PAGE:
258 *paddr = (addr_t)host_pte;
261 if (pte32_lookup(V3_VAddr(host_pte), vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
267 // should never get here
272 int v3_translate_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
273 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
274 pde32pae_t * host_pde = NULL;
275 pte32pae_t * host_pte = NULL;
277 switch (pdpe32pae_lookup(host_pdpe, vaddr, (addr_t *)&host_pde)) {
278 case PT_ENTRY_NOT_PRESENT:
282 switch (pde32pae_lookup(V3_VAddr(host_pde), vaddr, (addr_t *)&host_pte)) {
283 case PT_ENTRY_NOT_PRESENT:
286 case PT_ENTRY_LARGE_PAGE:
287 *paddr = (addr_t)host_pte;
290 if (pte32pae_lookup(V3_VAddr(host_pte), vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
299 // should never get here
304 int v3_translate_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
305 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
306 pdpe64_t * host_pdpe = NULL;
307 pde64_t * host_pde = NULL;
308 pte64_t * host_pte = NULL;
310 switch(pml4e64_lookup(host_pmle, vaddr, (addr_t *)&host_pdpe)) {
311 case PT_ENTRY_NOT_PRESENT:
315 switch(pdpe64_lookup(V3_VAddr(host_pdpe), vaddr, (addr_t *)&host_pde)) {
316 case PT_ENTRY_NOT_PRESENT:
319 case PT_ENTRY_LARGE_PAGE:
321 PrintError("1 Gigabyte Pages not supported\n");
324 switch (pde64_lookup(V3_VAddr(host_pde), vaddr, (addr_t *)&host_pte)) {
325 case PT_ENTRY_NOT_PRESENT:
328 case PT_ENTRY_LARGE_PAGE:
329 *paddr = (addr_t)host_pte;
332 if (pte64_lookup(V3_VAddr(host_pte), vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
342 // should never get here
351 * PAGE TABLE LOOKUP FUNCTIONS
354 * The value of entry is a return type:
355 * Page not present: *entry = 0
356 * Large Page: *entry = translated physical address (byte granularity)
357 * PTE entry: *entry is the address of the PTE Page
362 * 32 bit Page Table lookup functions
366 pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
367 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
369 if (!pde_entry->present) {
371 return PT_ENTRY_NOT_PRESENT;
372 } else if (pde_entry->large_page) {
373 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
375 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
376 *entry += PAGE_OFFSET_4MB(addr);
378 return PT_ENTRY_LARGE_PAGE;
380 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
381 return PT_ENTRY_PAGE;
387 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
389 pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
390 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
392 if (!pte_entry->present) {
394 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
395 return PT_ENTRY_NOT_PRESENT;
397 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
398 return PT_ENTRY_PAGE;
407 * 32 bit PAE Page Table lookup functions
410 pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
411 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
413 if (!pdpe_entry->present) {
415 return PT_ENTRY_NOT_PRESENT;
417 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
418 return PT_ENTRY_PAGE;
422 pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
423 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
425 if (!pde_entry->present) {
427 return PT_ENTRY_NOT_PRESENT;
428 } else if (pde_entry->large_page) {
429 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
431 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
432 *entry += PAGE_OFFSET_2MB(addr);
434 return PT_ENTRY_LARGE_PAGE;
436 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
437 return PT_ENTRY_PAGE;
441 pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
442 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
444 if (!pte_entry->present) {
446 return PT_ENTRY_NOT_PRESENT;
448 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
449 return PT_ENTRY_PAGE;
457 * 64 bit Page Table lookup functions
460 pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
461 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
463 if (!pml_entry->present) {
465 return PT_ENTRY_NOT_PRESENT;
467 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
468 return PT_ENTRY_PAGE;
472 pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
473 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
475 if (!pdpe_entry->present) {
477 return PT_ENTRY_NOT_PRESENT;
478 } else if (pdpe_entry->large_page) {
479 PrintError("1 Gigabyte pages not supported\n");
483 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
484 return PT_ENTRY_PAGE;
488 pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
489 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
491 if (!pde_entry->present) {
493 return PT_ENTRY_NOT_PRESENT;
494 } else if (pde_entry->large_page) {
495 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
497 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
498 *entry += PAGE_OFFSET_2MB(addr);
500 return PT_ENTRY_LARGE_PAGE;
502 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
503 return PT_ENTRY_PAGE;
507 pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
508 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
510 if (!pte_entry->present) {
512 return PT_ENTRY_NOT_PRESENT;
514 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
515 return PT_ENTRY_PAGE;
528 * Page Table Access Checks
537 int v3_check_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
538 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
539 pte32_t * host_pte = 0;
543 // Check accessibility of PDE
544 *access_status = v3_can_access_pde32(host_pde, vaddr, access_type);
546 if (*access_status != PT_ACCESS_OK) {
552 switch (pde32_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
553 case PT_ENTRY_LARGE_PAGE:
556 *access_status = v3_can_access_pte32(V3_VAddr(host_pte), vaddr, access_type);
558 if (*access_status != PT_ACCESS_OK) {
567 // should never get here
571 int v3_check_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
572 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
573 pde32pae_t * host_pde = NULL;
574 pte32pae_t * host_pte = NULL;
577 *access_status = v3_can_access_pdpe32pae(host_pdpe, vaddr, access_type);
579 if (*access_status != PT_ACCESS_OK) {
585 switch (pdpe32pae_lookup(host_pdpe, vaddr, (addr_t *)&host_pde)) {
587 *access_status = v3_can_access_pde32pae(V3_VAddr(host_pde), vaddr, access_type);
589 if (*access_status != PT_ACCESS_OK) {
595 switch (pde32pae_lookup(V3_VAddr(host_pde), vaddr, (addr_t *)&host_pte)) {
596 case PT_ENTRY_LARGE_PAGE:
599 *access_status = v3_can_access_pte32pae(V3_VAddr(host_pte), vaddr, access_type);
601 if (*access_status != PT_ACCESS_OK) {
613 // should never get here
619 int v3_check_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
620 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
621 pdpe64_t * host_pdpe = NULL;
622 pde64_t * host_pde = NULL;
623 pte64_t * host_pte = NULL;
627 *access_status = v3_can_access_pml4e64(host_pmle, vaddr, access_type);
629 if (*access_status != PT_ACCESS_OK) {
635 switch(pml4e64_lookup(host_pmle, vaddr, (addr_t *)&host_pdpe)) {
637 *access_status = v3_can_access_pdpe64(V3_VAddr(host_pdpe), vaddr, access_type);
639 if (*access_status != PT_ACCESS_OK) {
645 switch(pdpe64_lookup(V3_VAddr(host_pdpe), vaddr, (addr_t *)&host_pde)) {
646 case PT_ENTRY_LARGE_PAGE:
649 *access_status = v3_can_access_pde64(V3_VAddr(host_pde), vaddr, access_type);
651 if (*access_status != PT_ACCESS_OK) {
657 switch (pde64_lookup(V3_VAddr(host_pde), vaddr, (addr_t *)&host_pte)) {
658 case PT_ENTRY_LARGE_PAGE:
661 *access_status = v3_can_access_pte64(V3_VAddr(host_pte), vaddr, access_type);
663 if (*access_status != PT_ACCESS_OK) {
678 // should never get here
686 int v3_check_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
687 pf_error_t access_type, pt_access_status_t * access_status) {
688 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
689 pde32_t * guest_pde = NULL;
690 addr_t guest_pte_pa = 0;
693 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t*)&guest_pde) == -1) {
694 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
695 (void *)guest_pde_pa);
700 // Check accessibility of PDE
701 *access_status = v3_can_access_pde32(guest_pde, vaddr, access_type);
703 if (*access_status != PT_ACCESS_OK) {
709 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
710 case PT_ENTRY_LARGE_PAGE:
714 pte32_t * guest_pte = NULL;
716 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
717 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
718 (void *)guest_pte_pa);
722 *access_status = v3_can_access_pte32(guest_pte, vaddr, access_type);
724 if (*access_status != PT_ACCESS_OK) {
733 // should never get here
741 int v3_check_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
742 pf_error_t access_type, pt_access_status_t * access_status) {
743 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
744 pdpe32pae_t * guest_pdpe = NULL;
745 addr_t guest_pde_pa = 0;
748 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
749 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
750 (void *)guest_pdpe_pa);
754 *access_status = v3_can_access_pdpe32pae(guest_pdpe, vaddr, access_type);
756 if (*access_status != PT_ACCESS_OK) {
762 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa)) {
765 pde32pae_t * guest_pde = NULL;
766 addr_t guest_pte_pa = 0;
768 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
769 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
770 (void *)guest_pde_pa);
774 *access_status = v3_can_access_pde32pae(guest_pde, vaddr, access_type);
776 if (*access_status != PT_ACCESS_OK) {
782 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa)) {
783 case PT_ENTRY_LARGE_PAGE:
787 pte32pae_t * guest_pte = NULL;
789 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
790 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
791 (void *)guest_pte_pa);
795 *access_status = v3_can_access_pte32pae(guest_pte, vaddr, access_type);
797 if (*access_status != PT_ACCESS_OK) {
811 // should never get here
816 pte64_t * guest_pte = NULL;
818 int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
819 pf_error_t access_type, pt_access_status_t * access_status) {
820 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
821 pml4e64_t * guest_pmle = NULL;
822 addr_t guest_pdpe_pa = 0;
825 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
826 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
827 (void *)guest_pml4_pa);
831 *access_status = v3_can_access_pml4e64(guest_pmle, vaddr, access_type);
833 if (*access_status != PT_ACCESS_OK) {
839 switch(pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
842 pdpe64_t * guest_pdp = NULL;
843 addr_t guest_pde_pa = 0;
845 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
846 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
847 (void *)guest_pdpe_pa);
851 *access_status = v3_can_access_pdpe64(guest_pdp, vaddr, access_type);
853 if (*access_status != PT_ACCESS_OK) {
859 switch(pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
860 case PT_ENTRY_LARGE_PAGE:
864 pde64_t * guest_pde = NULL;
865 addr_t guest_pte_pa = 0;
867 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
868 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
869 (void *)guest_pde_pa);
873 *access_status = v3_can_access_pde64(guest_pde, vaddr, access_type);
875 if (*access_status != PT_ACCESS_OK) {
881 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
882 case PT_ENTRY_LARGE_PAGE:
886 pte64_t * guest_pte = NULL;
888 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
889 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
890 (void *)guest_pte_pa);
894 *access_status = v3_can_access_pte64(guest_pte, vaddr, access_type);
896 if (*access_status != PT_ACCESS_OK) {
914 // should never get here
922 static pt_access_status_t can_access_pt_entry(gen_pt_t * pt, pf_error_t access_type) {
923 if (pt->present == 0) {
924 return PT_ACCESS_NOT_PRESENT;
925 } else if ((pt->writable == 0) && (access_type.write == 1)) {
926 return PT_ACCESS_WRITE_ERROR;
927 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
929 return PT_ACCESS_USER_ERROR;
938 * 32 bit access checks
940 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
941 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
942 return can_access_pt_entry(entry, access_type);
945 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
946 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
947 return can_access_pt_entry(entry, access_type);
952 * 32 bit PAE access checks
954 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
955 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
956 return can_access_pt_entry(entry, access_type);
959 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
960 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
961 return can_access_pt_entry(entry, access_type);
964 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
965 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
966 return can_access_pt_entry(entry, access_type);
970 * 64 Bit access checks
972 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
973 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
974 return can_access_pt_entry(entry, access_type);
977 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
978 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
979 return can_access_pt_entry(entry, access_type);
982 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
983 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
984 return can_access_pt_entry(entry, access_type);
987 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
988 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
989 return can_access_pt_entry(entry, access_type);
1001 /* We generate a page table to correspond to a given memory layout
1002 * pulling pages from the mem_list when necessary
1003 * If there are any gaps in the layout, we add them as unmapped pages
1005 pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
1006 addr_t current_page_addr = 0;
1008 struct shadow_map * map = &(guest_info->mem_map);
1010 pde32_t * pde = V3_VAddr(V3_AllocPages(1));
1012 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1013 int pte_present = 0;
1014 pte32_t * pte = V3_VAddr(V3_AllocPages(1));
1017 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1018 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
1021 (region->host_type == HOST_REGION_HOOK) ||
1022 (region->host_type == HOST_REGION_UNALLOCATED) ||
1023 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
1024 (region->host_type == HOST_REGION_REMOTE) ||
1025 (region->host_type == HOST_REGION_SWAPPED)) {
1027 pte[j].writable = 0;
1028 pte[j].user_page = 0;
1029 pte[j].write_through = 0;
1030 pte[j].cache_disable = 0;
1031 pte[j].accessed = 0;
1033 pte[j].pte_attr = 0;
1034 pte[j].global_page = 0;
1035 pte[j].vmm_info = 0;
1036 pte[j].page_base_addr = 0;
1040 pte[j].writable = 1;
1041 pte[j].user_page = 1;
1042 pte[j].write_through = 0;
1043 pte[j].cache_disable = 0;
1044 pte[j].accessed = 0;
1046 pte[j].pte_attr = 0;
1047 pte[j].global_page = 0;
1048 pte[j].vmm_info = 0;
1050 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
1056 pte[j].page_base_addr = host_addr >> 12;
1061 current_page_addr += PAGE_SIZE;
1064 if (pte_present == 0) {
1065 V3_FreePage(V3_PAddr(pte));
1068 pde[i].writable = 0;
1069 pde[i].user_page = 0;
1070 pde[i].write_through = 0;
1071 pde[i].cache_disable = 0;
1072 pde[i].accessed = 0;
1073 pde[i].reserved = 0;
1074 pde[i].large_page = 0;
1075 pde[i].global_page = 0;
1076 pde[i].vmm_info = 0;
1077 pde[i].pt_base_addr = 0;
1080 pde[i].writable = 1;
1081 pde[i].user_page = 1;
1082 pde[i].write_through = 0;
1083 pde[i].cache_disable = 0;
1084 pde[i].accessed = 0;
1085 pde[i].reserved = 0;
1086 pde[i].large_page = 0;
1087 pde[i].global_page = 0;
1088 pde[i].vmm_info = 0;
1089 pde[i].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1098 /* We generate a page table to correspond to a given memory layout
1099 * pulling pages from the mem_list when necessary
1100 * If there are any gaps in the layout, we add them as unmapped pages
1102 pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
1103 addr_t current_page_addr = 0;
1105 struct shadow_map * map = &(guest_info->mem_map);
1107 pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
1108 memset(pdpe, 0, PAGE_SIZE);
1110 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1111 int pde_present = 0;
1112 pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
1114 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1117 int pte_present = 0;
1118 pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
1121 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1122 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
1125 (region->host_type == HOST_REGION_HOOK) ||
1126 (region->host_type == HOST_REGION_UNALLOCATED) ||
1127 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
1128 (region->host_type == HOST_REGION_REMOTE) ||
1129 (region->host_type == HOST_REGION_SWAPPED)) {
1131 pte[k].writable = 0;
1132 pte[k].user_page = 0;
1133 pte[k].write_through = 0;
1134 pte[k].cache_disable = 0;
1135 pte[k].accessed = 0;
1137 pte[k].pte_attr = 0;
1138 pte[k].global_page = 0;
1139 pte[k].vmm_info = 0;
1140 pte[k].page_base_addr = 0;
1145 pte[k].writable = 1;
1146 pte[k].user_page = 1;
1147 pte[k].write_through = 0;
1148 pte[k].cache_disable = 0;
1149 pte[k].accessed = 0;
1151 pte[k].pte_attr = 0;
1152 pte[k].global_page = 0;
1153 pte[k].vmm_info = 0;
1155 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
1161 pte[k].page_base_addr = host_addr >> 12;
1167 current_page_addr += PAGE_SIZE;
1170 if (pte_present == 0) {
1171 V3_FreePage(V3_PAddr(pte));
1174 pde[j].writable = 0;
1175 pde[j].user_page = 0;
1176 pde[j].write_through = 0;
1177 pde[j].cache_disable = 0;
1178 pde[j].accessed = 0;
1180 pde[j].large_page = 0;
1181 pde[j].global_page = 0;
1182 pde[j].vmm_info = 0;
1183 pde[j].pt_base_addr = 0;
1187 pde[j].writable = 1;
1188 pde[j].user_page = 1;
1189 pde[j].write_through = 0;
1190 pde[j].cache_disable = 0;
1191 pde[j].accessed = 0;
1193 pde[j].large_page = 0;
1194 pde[j].global_page = 0;
1195 pde[j].vmm_info = 0;
1196 pde[j].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1204 if (pde_present == 0) {
1205 V3_FreePage(V3_PAddr(pde));
1207 pdpe[i].present = 0;
1209 pdpe[i].write_through = 0;
1210 pdpe[i].cache_disable = 0;
1211 pdpe[i].accessed = 0;
1214 pdpe[i].vmm_info = 0;
1215 pdpe[i].pd_base_addr = 0;
1218 pdpe[i].present = 1;
1220 pdpe[i].write_through = 0;
1221 pdpe[i].cache_disable = 0;
1222 pdpe[i].accessed = 0;
1225 pdpe[i].vmm_info = 0;
1226 pdpe[i].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
1241 pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
1242 addr_t current_page_addr = 0;
1244 struct shadow_map * map = &(info->mem_map);
1246 pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
1248 for (i = 0; i < 1; i++) {
1249 int pdpe_present = 0;
1250 pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
1252 for (j = 0; j < 20; j++) {
1253 int pde_present = 0;
1254 pde64_t * pde = V3_VAddr(V3_AllocPages(1));
1256 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1257 int pte_present = 0;
1258 pte64_t * pte = V3_VAddr(V3_AllocPages(1));
1261 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1262 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
1267 (region->host_type == HOST_REGION_HOOK) ||
1268 (region->host_type == HOST_REGION_UNALLOCATED) ||
1269 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
1270 (region->host_type == HOST_REGION_REMOTE) ||
1271 (region->host_type == HOST_REGION_SWAPPED)) {
1273 pte[m].writable = 0;
1274 pte[m].user_page = 0;
1275 pte[m].write_through = 0;
1276 pte[m].cache_disable = 0;
1277 pte[m].accessed = 0;
1279 pte[m].pte_attr = 0;
1280 pte[m].global_page = 0;
1281 pte[m].vmm_info = 0;
1282 pte[m].page_base_addr = 0;
1286 pte[m].writable = 1;
1287 pte[m].user_page = 1;
1288 pte[m].write_through = 0;
1289 pte[m].cache_disable = 0;
1290 pte[m].accessed = 0;
1292 pte[m].pte_attr = 0;
1293 pte[m].global_page = 0;
1294 pte[m].vmm_info = 0;
1296 if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
1302 pte[m].page_base_addr = PAGE_BASE_ADDR(host_addr);
1304 //PrintPTE64(current_page_addr, &(pte[m]));
1312 current_page_addr += PAGE_SIZE;
1315 if (pte_present == 0) {
1316 V3_FreePage(V3_PAddr(pte));
1319 pde[k].writable = 0;
1320 pde[k].user_page = 0;
1321 pde[k].write_through = 0;
1322 pde[k].cache_disable = 0;
1323 pde[k].accessed = 0;
1325 pde[k].large_page = 0;
1326 //pde[k].global_page = 0;
1327 pde[k].vmm_info = 0;
1328 pde[k].pt_base_addr = 0;
1331 pde[k].writable = 1;
1332 pde[k].user_page = 1;
1333 pde[k].write_through = 0;
1334 pde[k].cache_disable = 0;
1335 pde[k].accessed = 0;
1337 pde[k].large_page = 0;
1338 //pde[k].global_page = 0;
1339 pde[k].vmm_info = 0;
1340 pde[k].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1346 if (pde_present == 0) {
1347 V3_FreePage(V3_PAddr(pde));
1349 pdpe[j].present = 0;
1350 pdpe[j].writable = 0;
1351 pdpe[j].user_page = 0;
1352 pdpe[j].write_through = 0;
1353 pdpe[j].cache_disable = 0;
1354 pdpe[j].accessed = 0;
1356 pdpe[j].large_page = 0;
1357 //pdpe[j].global_page = 0;
1358 pdpe[j].vmm_info = 0;
1359 pdpe[j].pd_base_addr = 0;
1361 pdpe[j].present = 1;
1362 pdpe[j].writable = 1;
1363 pdpe[j].user_page = 1;
1364 pdpe[j].write_through = 0;
1365 pdpe[j].cache_disable = 0;
1366 pdpe[j].accessed = 0;
1368 pdpe[j].large_page = 0;
1369 //pdpe[j].global_page = 0;
1370 pdpe[j].vmm_info = 0;
1371 pdpe[j].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
1379 PrintDebug("PML index=%d\n", i);
1381 if (pdpe_present == 0) {
1382 V3_FreePage(V3_PAddr(pdpe));
1385 pml[i].writable = 0;
1386 pml[i].user_page = 0;
1387 pml[i].write_through = 0;
1388 pml[i].cache_disable = 0;
1389 pml[i].accessed = 0;
1390 pml[i].reserved = 0;
1391 //pml[i].large_page = 0;
1392 //pml[i].global_page = 0;
1393 pml[i].vmm_info = 0;
1394 pml[i].pdp_base_addr = 0;
1397 pml[i].writable = 1;
1398 pml[i].user_page = 1;
1399 pml[i].write_through = 0;
1400 pml[i].cache_disable = 0;
1401 pml[i].accessed = 0;
1402 pml[i].reserved = 0;
1403 //pml[i].large_page = 0;
1404 //pml[i].global_page = 0;
1405 pml[i].vmm_info = 0;
1406 pml[i].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe));
1414 int v3_walk_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3,
1415 int (*callback)(int level, addr_t page_ptr, addr_t page_pa, void * private_data),
1416 void * private_data) {
1417 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1418 pde32_t * guest_pde = NULL;
1422 PrintError("Call back was not specified\n");
1426 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1427 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1428 (void *)guest_pde_pa);
1432 callback(PAGE_PD32, (addr_t)guest_pde, guest_pde_pa, private_data);
1434 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1435 if (guest_pde[i].present) {
1436 if (guest_pde[i].large_page) {
1437 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(guest_pde[i]);
1438 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1439 addr_t large_page_va = 0;
1441 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1442 PrintError("Could not get virtual address of Guest 4MB Page (PA=%p)\n",
1443 (void *)large_page_pa);
1447 callback(PAGE_4MB, large_page_va, large_page_pa, private_data);
1449 addr_t pte_pa = BASE_TO_PAGE_ADDR(guest_pde[i].pt_base_addr);
1450 pte32_t * tmp_pte = NULL;
1452 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1453 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1458 callback(PAGE_PT32, (addr_t)tmp_pte, pte_pa, private_data);
1460 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1461 if (tmp_pte[j].present) {
1462 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1465 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1466 PrintError("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1471 callback(PAGE_4KB, page_va, page_pa, private_data);
1481 int v3_walk_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3,
1482 int (*callback)(int level, addr_t page_ptr, addr_t page_pa, void * private_data),
1483 void * private_data) {
1484 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1485 pdpe32pae_t * guest_pdpe = NULL;
1489 PrintError("Call back was not specified\n");
1493 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
1494 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1495 (void *)guest_pdpe_pa);
1501 callback(PAGE_PDP32PAE, (addr_t)guest_pdpe, guest_pdpe_pa, private_data);
1503 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1504 if (guest_pdpe[i].present) {
1505 addr_t pde_pa = BASE_TO_PAGE_ADDR(guest_pdpe[i].pd_base_addr);
1506 pde32pae_t * tmp_pde = NULL;
1508 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1509 PrintError("Could not get virtual address of Guest PDE32PAE (PA=%p)\n",
1514 callback(PAGE_PD32PAE, (addr_t)tmp_pde, pde_pa, private_data);
1516 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1517 if (tmp_pde[j].present) {
1518 if (tmp_pde[j].large_page) {
1519 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1520 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1521 addr_t large_page_va = 0;
1523 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1524 PrintError("Could not get virtual address of Guest 2MB Page (PA=%p)\n",
1525 (void *)large_page_pa);
1529 callback(PAGE_2MB, large_page_va, large_page_pa, private_data);
1531 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1532 pte32pae_t * tmp_pte = NULL;
1534 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1535 PrintError("Could not get virtual address of Guest PTE32PAE (PA=%p)\n",
1540 callback(PAGE_PT32PAE, (addr_t)tmp_pte, pte_pa, private_data);
1542 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1543 if (tmp_pte[k].present) {
1544 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1547 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1548 PrintError("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1553 callback(PAGE_4KB, page_va, page_pa, private_data);
1567 int v3_walk_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3,
1568 int (*callback)(int level, addr_t page_ptr, addr_t page_pa, void * private_data),
1569 void * private_data) {
1570 addr_t guest_pml_pa = CR3_TO_PML4E64_PA(guest_cr3);
1571 pml4e64_t * guest_pml = NULL;
1575 PrintError("Call back was not specified\n");
1579 if (guest_pa_to_host_va(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
1580 PrintError("Could not get virtual address of Guest PML464 (PA=%p)\n",
1586 callback(PAGE_PML464, (addr_t)guest_pml, guest_pml_pa, private_data);
1588 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1589 if (guest_pml[i].present) {
1590 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(guest_pml[i].pdp_base_addr);
1591 pdpe64_t * tmp_pdpe = NULL;
1594 if (guest_pa_to_host_va(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
1595 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1600 callback(PAGE_PDP64, (addr_t)tmp_pdpe, pdpe_pa, private_data);
1602 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1603 if (tmp_pdpe[j].present) {
1604 if (tmp_pdpe[j].large_page) {
1605 pdpe64_1GB_t * large_pdpe = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1606 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdpe->page_base_addr);
1607 addr_t large_page_va = 0;
1609 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1610 PrintError("Could not get virtual address of Guest 1GB page (PA=%p)\n",
1611 (void *)large_page_pa);
1615 callback(PAGE_1GB, (addr_t)large_page_va, large_page_pa, private_data);
1618 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1619 pde64_t * tmp_pde = NULL;
1621 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1622 PrintError("Could not get virtual address of Guest PDE64 (PA=%p)\n",
1627 callback(PAGE_PD64, (addr_t)tmp_pde, pde_pa, private_data);
1629 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1630 if (tmp_pde[k].present) {
1631 if (tmp_pde[k].large_page) {
1632 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1633 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1634 addr_t large_page_va = 0;
1636 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1637 PrintError("Could not get virtual address of Guest 2MB page (PA=%p)\n",
1638 (void *)large_page_pa);
1642 callback(PAGE_2MB, large_page_va, large_page_pa, private_data);
1644 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1645 pte64_t * tmp_pte = NULL;
1647 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1648 PrintError("Could not get virtual address of Guest PTE64 (PA=%p)\n",
1653 callback(PAGE_PT64, (addr_t)tmp_pte, pte_pa, private_data);
1655 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1656 if (tmp_pte[m].present) {
1657 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1660 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1661 PrintError("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1666 callback(PAGE_4KB, page_va, page_pa, private_data);
1680 int v3_walk_host_pt_32(v3_reg_t host_cr3,
1681 int (*callback)(int level, addr_t page_ptr, addr_t page_pa, void * private_data),
1682 void * private_data) {
1683 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1684 addr_t pde_pa = CR3_TO_PDE32_PA(host_cr3);
1688 PrintError("Call back was not specified\n");
1692 callback(PAGE_PD32, (addr_t)host_pde, pde_pa, private_data);
1694 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1695 if (host_pde[i].present) {
1696 if (host_pde[i].large_page) {
1697 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(host_pde[i]);
1698 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1700 callback(PAGE_4MB, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1702 addr_t pte_pa = BASE_TO_PAGE_ADDR(host_pde[i].pt_base_addr);
1703 pte32_t * tmp_pte = (pte32_t *)V3_VAddr((void *)pte_pa);
1705 callback(PAGE_PT32, (addr_t)tmp_pte, pte_pa, private_data);
1707 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1708 if (tmp_pte[j].present) {
1709 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1710 callback(PAGE_4KB, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
1723 int v3_walk_host_pt_32pae(v3_reg_t host_cr3,
1724 void (*callback)(page_type_t type, addr_t page_ptr, addr_t page_pa, void * private_data),
1725 void * private_data) {
1726 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1727 addr_t pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1731 PrintError("Callback was not specified\n");
1735 callback(PAGE_PDP32PAE, (addr_t)host_pdpe, pdpe_pa, private_data);
1737 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1738 if (host_pdpe[i].present) {
1739 addr_t pde_pa = BASE_TO_PAGE_ADDR(host_pdpe[i].pd_base_addr);
1740 pde32pae_t * tmp_pde = (pde32pae_t *)V3_VAddr((void *)pde_pa);
1742 callback(PAGE_PD32PAE, (addr_t)tmp_pde, pde_pa, private_data);
1744 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1745 if (tmp_pde[j].present) {
1747 if (tmp_pde[j].large_page) {
1748 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1749 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1751 callback(PAGE_2MB, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1753 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1754 pte32pae_t * tmp_pte = (pte32pae_t *)V3_VAddr((void *)pte_pa);
1756 callback(PAGE_PT32PAE, (addr_t)tmp_pte, pte_pa, private_data);
1758 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1759 if (tmp_pte[k].present) {
1760 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1761 callback(PAGE_4KB, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
1773 int v3_walk_host_pt_64(v3_reg_t host_cr3,
1774 void (*callback)(page_type_t type, addr_t page_ptr, addr_t page_pa, void * private_data),
1775 void * private_data) {
1776 pml4e64_t * host_pml = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1777 addr_t pml_pa = CR3_TO_PML4E64_PA(host_cr3);
1781 PrintError("Callback was not specified\n");
1785 callback(PAGE_PML464, (addr_t)host_pml, pml_pa, private_data);
1787 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1788 if (host_pml[i].present) {
1789 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(host_pml[i].pdp_base_addr);
1790 pdpe64_t * tmp_pdpe = (pdpe64_t *)V3_VAddr((void *)pdpe_pa);
1792 callback(PAGE_PDP64, (addr_t)tmp_pdpe, pdpe_pa, private_data);
1794 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1795 if (tmp_pdpe[j].present) {
1796 if (tmp_pdpe[j].large_page) {
1797 pdpe64_1GB_t * large_pdp = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1798 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_base_addr);
1800 callback(PAGE_1GB, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1802 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1803 pde64_t * tmp_pde = (pde64_t *)V3_VAddr((void *)pde_pa);
1805 callback(PAGE_PD64, (addr_t)tmp_pde, pde_pa, private_data);
1807 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1808 if (tmp_pde[k].present) {
1809 if (tmp_pde[k].large_page) {
1810 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1811 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1813 callback(PAGE_2MB, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1815 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1816 pte64_t * tmp_pte = (pte64_t *)V3_VAddr((void *)pte_pa);
1818 callback(PAGE_PT64, (addr_t)tmp_pte, pte_pa, private_data);
1820 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1821 if (tmp_pte[m].present) {
1822 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1823 callback(PAGE_4KB, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);