2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
28 #define USE_VMM_PAGING_DEBUG
29 // All of the debug functions defined in vmm_paging.h are implemented in this file
30 #include "vmm_paging_debug.h"
31 #undef USE_VMM_PAGING_DEBUG
34 void delete_page_tables_32(pde32_t * pde) {
41 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
43 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
44 PrintDebug("PTE base addr %x \n", pde[i].pt_base_addr);
45 pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
47 PrintDebug("Deleting PTE %d (%p)\n", i, pte);
52 PrintDebug("Deleting PDE (%p)\n", pde);
53 V3_FreePage(V3_PAddr(pde));
56 void delete_page_tables_32PAE(pdpe32pae_t * pdpe) {
57 PrintError("Unimplemented function\n");
60 void delete_page_tables_64(pml4e64_t * pml4) {
61 PrintError("Unimplemented function\n");
65 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
66 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
67 pde32_t * guest_pde = 0;
68 addr_t guest_pte_pa = 0;
70 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t*)&guest_pde) == -1) {
71 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
72 (void *)guest_pde_pa);
76 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
77 case PT_ENTRY_NOT_PRESENT:
80 case PT_ENTRY_LARGE_PAGE:
81 *paddr = guest_pte_pa;
85 pte32_t * guest_pte = NULL;
87 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
88 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
89 (void *)guest_pte_pa);
93 if (pte32_lookup(guest_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
101 // should never get here
106 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
107 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
108 pdpe32pae_t * guest_pdpe = 0;
109 addr_t guest_pde_pa = 0;
111 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
112 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
113 (void *)guest_pdpe_pa);
117 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
119 case PT_ENTRY_NOT_PRESENT:
124 pde32pae_t * guest_pde = NULL;
125 addr_t guest_pte_pa = 0;
127 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
128 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
129 (void *)guest_pde_pa);
133 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
135 case PT_ENTRY_NOT_PRESENT:
138 case PT_ENTRY_LARGE_PAGE:
139 *paddr = guest_pte_pa;
143 pte32pae_t * guest_pte = NULL;
145 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
146 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
147 (void *)guest_pte_pa);
151 if (pte32pae_lookup(guest_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
163 // should never get here
167 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
168 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
169 pml4e64_t * guest_pmle = 0;
170 addr_t guest_pdpe_pa = 0;
172 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
173 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
174 (void *)guest_pml4_pa);
178 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
179 case PT_ENTRY_NOT_PRESENT:
184 pdpe64_t * guest_pdp = NULL;
185 addr_t guest_pde_pa = 0;
187 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
188 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
189 (void *)guest_pdpe_pa);
193 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
194 case PT_ENTRY_NOT_PRESENT:
197 case PT_ENTRY_LARGE_PAGE:
199 PrintError("1 Gigabyte Pages not supported\n");
203 pde64_t * guest_pde = NULL;
204 addr_t guest_pte_pa = 0;
206 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
207 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
208 (void *)guest_pde_pa);
212 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
213 case PT_ENTRY_NOT_PRESENT:
216 case PT_ENTRY_LARGE_PAGE:
217 *paddr = guest_pte_pa;
221 pte64_t * guest_pte = NULL;
223 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
224 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
225 (void *)guest_pte_pa);
229 if (pte64_lookup(guest_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
243 // should never get here
249 int v3_translate_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
250 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
251 pte32_t * host_pte = 0;
253 switch (pde32_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
254 case PT_ENTRY_NOT_PRESENT:
257 case PT_ENTRY_LARGE_PAGE:
258 *paddr = (addr_t)host_pte;
261 if (pte32_lookup(V3_VAddr(host_pte), vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
267 // should never get here
272 int v3_translate_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
273 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
274 pde32pae_t * host_pde = NULL;
275 pte32pae_t * host_pte = NULL;
277 switch (pdpe32pae_lookup(host_pdpe, vaddr, (addr_t *)&host_pde)) {
278 case PT_ENTRY_NOT_PRESENT:
282 switch (pde32pae_lookup(V3_VAddr(host_pde), vaddr, (addr_t *)&host_pte)) {
283 case PT_ENTRY_NOT_PRESENT:
286 case PT_ENTRY_LARGE_PAGE:
287 *paddr = (addr_t)host_pte;
290 if (pte32pae_lookup(V3_VAddr(host_pte), vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
299 // should never get here
304 int v3_translate_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
305 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
306 pdpe64_t * host_pdpe = NULL;
307 pde64_t * host_pde = NULL;
308 pte64_t * host_pte = NULL;
310 switch(pml4e64_lookup(host_pmle, vaddr, (addr_t *)&host_pdpe)) {
311 case PT_ENTRY_NOT_PRESENT:
315 switch(pdpe64_lookup(V3_VAddr(host_pdpe), vaddr, (addr_t *)&host_pde)) {
316 case PT_ENTRY_NOT_PRESENT:
319 case PT_ENTRY_LARGE_PAGE:
321 PrintError("1 Gigabyte Pages not supported\n");
324 switch (pde64_lookup(V3_VAddr(host_pde), vaddr, (addr_t *)&host_pte)) {
325 case PT_ENTRY_NOT_PRESENT:
328 case PT_ENTRY_LARGE_PAGE:
329 *paddr = (addr_t)host_pte;
332 if (pte64_lookup(V3_VAddr(host_pte), vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
342 // should never get here
351 * PAGE TABLE LOOKUP FUNCTIONS
354 * The value of entry is a return type:
355 * Page not present: *entry = 0
356 * Large Page: *entry = translated physical address (byte granularity)
357 * PTE entry: *entry is the address of the PTE Page
362 * 32 bit Page Table lookup functions
366 pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
367 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
369 if (!pde_entry->present) {
371 return PT_ENTRY_NOT_PRESENT;
372 } else if (pde_entry->large_page) {
373 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
375 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
376 *entry += PAGE_OFFSET_4MB(addr);
378 return PT_ENTRY_LARGE_PAGE;
380 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
381 return PT_ENTRY_PAGE;
387 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
389 pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
390 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
392 if (!pte_entry->present) {
394 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
395 return PT_ENTRY_NOT_PRESENT;
397 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
398 return PT_ENTRY_PAGE;
407 * 32 bit PAE Page Table lookup functions
410 pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
411 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
413 if (!pdpe_entry->present) {
415 return PT_ENTRY_NOT_PRESENT;
417 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
418 return PT_ENTRY_PAGE;
422 pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
423 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
425 if (!pde_entry->present) {
427 return PT_ENTRY_NOT_PRESENT;
428 } else if (pde_entry->large_page) {
429 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
431 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
432 *entry += PAGE_OFFSET_2MB(addr);
434 return PT_ENTRY_LARGE_PAGE;
436 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
437 return PT_ENTRY_PAGE;
441 pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
442 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
444 if (!pte_entry->present) {
446 return PT_ENTRY_NOT_PRESENT;
448 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
449 return PT_ENTRY_PAGE;
457 * 64 bit Page Table lookup functions
460 pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
461 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
463 if (!pml_entry->present) {
465 return PT_ENTRY_NOT_PRESENT;
467 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
468 return PT_ENTRY_PAGE;
472 pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
473 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
475 if (!pdpe_entry->present) {
477 return PT_ENTRY_NOT_PRESENT;
478 } else if (pdpe_entry->large_page) {
479 PrintError("1 Gigabyte pages not supported\n");
483 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
484 return PT_ENTRY_PAGE;
488 pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
489 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
491 if (!pde_entry->present) {
493 return PT_ENTRY_NOT_PRESENT;
494 } else if (pde_entry->large_page) {
495 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
497 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
498 *entry += PAGE_OFFSET_2MB(addr);
500 return PT_ENTRY_LARGE_PAGE;
502 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
503 return PT_ENTRY_PAGE;
507 pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
508 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
510 if (!pte_entry->present) {
512 return PT_ENTRY_NOT_PRESENT;
514 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
515 return PT_ENTRY_PAGE;
528 * Page Table Access Checks
537 int v3_check_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
538 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
539 pte32_t * host_pte = 0;
543 // Check accessibility of PDE
544 *access_status = v3_can_access_pde32(host_pde, vaddr, access_type);
546 if (*access_status != PT_ACCESS_OK) {
552 switch (pde32_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
553 case PT_ENTRY_LARGE_PAGE:
556 *access_status = v3_can_access_pte32(V3_VAddr(host_pte), vaddr, access_type);
558 if (*access_status != PT_ACCESS_OK) {
567 // should never get here
571 int v3_check_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
572 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
573 pde32pae_t * host_pde = NULL;
574 pte32pae_t * host_pte = NULL;
577 *access_status = v3_can_access_pdpe32pae(host_pdpe, vaddr, access_type);
579 if (*access_status != PT_ACCESS_OK) {
585 switch (pdpe32pae_lookup(host_pdpe, vaddr, (addr_t *)&host_pde)) {
587 *access_status = v3_can_access_pde32pae(V3_VAddr(host_pde), vaddr, access_type);
589 if (*access_status != PT_ACCESS_OK) {
595 switch (pde32pae_lookup(V3_VAddr(host_pde), vaddr, (addr_t *)&host_pte)) {
596 case PT_ENTRY_LARGE_PAGE:
599 *access_status = v3_can_access_pte32pae(V3_VAddr(host_pte), vaddr, access_type);
601 if (*access_status != PT_ACCESS_OK) {
613 // should never get here
619 int v3_check_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
620 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
621 pdpe64_t * host_pdpe = NULL;
622 pde64_t * host_pde = NULL;
623 pte64_t * host_pte = NULL;
627 *access_status = v3_can_access_pml4e64(host_pmle, vaddr, access_type);
629 if (*access_status != PT_ACCESS_OK) {
635 switch(pml4e64_lookup(host_pmle, vaddr, (addr_t *)&host_pdpe)) {
637 *access_status = v3_can_access_pdpe64(V3_VAddr(host_pdpe), vaddr, access_type);
639 if (*access_status != PT_ACCESS_OK) {
645 switch(pdpe64_lookup(V3_VAddr(host_pdpe), vaddr, (addr_t *)&host_pde)) {
646 case PT_ENTRY_LARGE_PAGE:
649 *access_status = v3_can_access_pde64(V3_VAddr(host_pde), vaddr, access_type);
651 if (*access_status != PT_ACCESS_OK) {
657 switch (pde64_lookup(V3_VAddr(host_pde), vaddr, (addr_t *)&host_pte)) {
658 case PT_ENTRY_LARGE_PAGE:
661 *access_status = v3_can_access_pte64(V3_VAddr(host_pte), vaddr, access_type);
663 if (*access_status != PT_ACCESS_OK) {
678 // should never get here
686 int v3_check_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
687 pf_error_t access_type, pt_access_status_t * access_status) {
688 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
689 pde32_t * guest_pde = NULL;
690 addr_t guest_pte_pa = 0;
693 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t*)&guest_pde) == -1) {
694 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
695 (void *)guest_pde_pa);
700 // Check accessibility of PDE
701 *access_status = v3_can_access_pde32(guest_pde, vaddr, access_type);
703 if (*access_status != PT_ACCESS_OK) {
709 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
710 case PT_ENTRY_LARGE_PAGE:
714 pte32_t * guest_pte = NULL;
716 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
717 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
718 (void *)guest_pte_pa);
722 *access_status = v3_can_access_pte32(guest_pte, vaddr, access_type);
724 if (*access_status != PT_ACCESS_OK) {
733 // should never get here
741 int v3_check_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
742 pf_error_t access_type, pt_access_status_t * access_status) {
743 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
744 pdpe32pae_t * guest_pdpe = NULL;
745 addr_t guest_pde_pa = 0;
748 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
749 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
750 (void *)guest_pdpe_pa);
754 *access_status = v3_can_access_pdpe32pae(guest_pdpe, vaddr, access_type);
756 if (*access_status != PT_ACCESS_OK) {
762 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa)) {
765 pde32pae_t * guest_pde = NULL;
766 addr_t guest_pte_pa = 0;
768 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
769 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
770 (void *)guest_pde_pa);
774 *access_status = v3_can_access_pde32pae(guest_pde, vaddr, access_type);
776 if (*access_status != PT_ACCESS_OK) {
782 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa)) {
783 case PT_ENTRY_LARGE_PAGE:
787 pte32pae_t * guest_pte = NULL;
789 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
790 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
791 (void *)guest_pte_pa);
795 *access_status = v3_can_access_pte32pae(guest_pte, vaddr, access_type);
797 if (*access_status != PT_ACCESS_OK) {
811 // should never get here
816 pte64_t * guest_pte = NULL;
818 int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
819 pf_error_t access_type, pt_access_status_t * access_status) {
820 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
821 pml4e64_t * guest_pmle = NULL;
822 addr_t guest_pdpe_pa = 0;
825 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
826 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
827 (void *)guest_pml4_pa);
831 *access_status = v3_can_access_pml4e64(guest_pmle, vaddr, access_type);
833 if (*access_status != PT_ACCESS_OK) {
839 switch(pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
842 pdpe64_t * guest_pdp = NULL;
843 addr_t guest_pde_pa = 0;
845 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
846 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
847 (void *)guest_pdpe_pa);
851 *access_status = v3_can_access_pdpe64(guest_pdp, vaddr, access_type);
853 if (*access_status != PT_ACCESS_OK) {
859 switch(pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
860 case PT_ENTRY_LARGE_PAGE:
864 pde64_t * guest_pde = NULL;
865 addr_t guest_pte_pa = 0;
867 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
868 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
869 (void *)guest_pde_pa);
873 *access_status = v3_can_access_pde64(guest_pde, vaddr, access_type);
875 if (*access_status != PT_ACCESS_OK) {
881 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
882 case PT_ENTRY_LARGE_PAGE:
886 pte64_t * guest_pte = NULL;
888 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
889 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
890 (void *)guest_pte_pa);
894 *access_status = v3_can_access_pte64(guest_pte, vaddr, access_type);
896 if (*access_status != PT_ACCESS_OK) {
914 // should never get here
922 static pt_access_status_t can_access_pt_entry(gen_pt_t * pt, pf_error_t access_type) {
923 if (pt->present == 0) {
924 return PT_ACCESS_NOT_PRESENT;
925 } else if ((pt->writable == 0) && (access_type.write == 1)) {
926 return PT_ACCESS_WRITE_ERROR;
927 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
929 return PT_ACCESS_USER_ERROR;
938 * 32 bit access checks
940 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
941 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
942 return can_access_pt_entry(entry, access_type);
945 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
946 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
947 return can_access_pt_entry(entry, access_type);
952 * 32 bit PAE access checks
954 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
955 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
956 return can_access_pt_entry(entry, access_type);
959 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
960 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
961 return can_access_pt_entry(entry, access_type);
964 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
965 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
966 return can_access_pt_entry(entry, access_type);
970 * 64 Bit access checks
972 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
973 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
974 return can_access_pt_entry(entry, access_type);
977 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
978 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
979 return can_access_pt_entry(entry, access_type);
982 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
983 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
984 return can_access_pt_entry(entry, access_type);
987 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
988 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
989 return can_access_pt_entry(entry, access_type);
997 /* We generate a page table to correspond to a given memory layout
998 * pulling pages from the mem_list when necessary
999 * If there are any gaps in the layout, we add them as unmapped pages
1001 pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
1002 addr_t current_page_addr = 0;
1004 struct shadow_map * map = &(guest_info->mem_map);
1006 pde32_t * pde = V3_VAddr(V3_AllocPages(1));
1008 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1009 int pte_present = 0;
1010 pte32_t * pte = V3_VAddr(V3_AllocPages(1));
1013 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1014 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
1017 (region->host_type == HOST_REGION_HOOK) ||
1018 (region->host_type == HOST_REGION_UNALLOCATED) ||
1019 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
1020 (region->host_type == HOST_REGION_REMOTE) ||
1021 (region->host_type == HOST_REGION_SWAPPED)) {
1023 pte[j].writable = 0;
1024 pte[j].user_page = 0;
1025 pte[j].write_through = 0;
1026 pte[j].cache_disable = 0;
1027 pte[j].accessed = 0;
1029 pte[j].pte_attr = 0;
1030 pte[j].global_page = 0;
1031 pte[j].vmm_info = 0;
1032 pte[j].page_base_addr = 0;
1036 pte[j].writable = 1;
1037 pte[j].user_page = 1;
1038 pte[j].write_through = 0;
1039 pte[j].cache_disable = 0;
1040 pte[j].accessed = 0;
1042 pte[j].pte_attr = 0;
1043 pte[j].global_page = 0;
1044 pte[j].vmm_info = 0;
1046 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
1052 pte[j].page_base_addr = host_addr >> 12;
1057 current_page_addr += PAGE_SIZE;
1060 if (pte_present == 0) {
1061 V3_FreePage(V3_PAddr(pte));
1064 pde[i].writable = 0;
1065 pde[i].user_page = 0;
1066 pde[i].write_through = 0;
1067 pde[i].cache_disable = 0;
1068 pde[i].accessed = 0;
1069 pde[i].reserved = 0;
1070 pde[i].large_page = 0;
1071 pde[i].global_page = 0;
1072 pde[i].vmm_info = 0;
1073 pde[i].pt_base_addr = 0;
1076 pde[i].writable = 1;
1077 pde[i].user_page = 1;
1078 pde[i].write_through = 0;
1079 pde[i].cache_disable = 0;
1080 pde[i].accessed = 0;
1081 pde[i].reserved = 0;
1082 pde[i].large_page = 0;
1083 pde[i].global_page = 0;
1084 pde[i].vmm_info = 0;
1085 pde[i].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1094 /* We generate a page table to correspond to a given memory layout
1095 * pulling pages from the mem_list when necessary
1096 * If there are any gaps in the layout, we add them as unmapped pages
1098 pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
1099 addr_t current_page_addr = 0;
1101 struct shadow_map * map = &(guest_info->mem_map);
1103 pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
1104 memset(pdpe, 0, PAGE_SIZE);
1106 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1107 int pde_present = 0;
1108 pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
1110 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1113 int pte_present = 0;
1114 pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
1117 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1118 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
1121 (region->host_type == HOST_REGION_HOOK) ||
1122 (region->host_type == HOST_REGION_UNALLOCATED) ||
1123 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
1124 (region->host_type == HOST_REGION_REMOTE) ||
1125 (region->host_type == HOST_REGION_SWAPPED)) {
1127 pte[k].writable = 0;
1128 pte[k].user_page = 0;
1129 pte[k].write_through = 0;
1130 pte[k].cache_disable = 0;
1131 pte[k].accessed = 0;
1133 pte[k].pte_attr = 0;
1134 pte[k].global_page = 0;
1135 pte[k].vmm_info = 0;
1136 pte[k].page_base_addr = 0;
1141 pte[k].writable = 1;
1142 pte[k].user_page = 1;
1143 pte[k].write_through = 0;
1144 pte[k].cache_disable = 0;
1145 pte[k].accessed = 0;
1147 pte[k].pte_attr = 0;
1148 pte[k].global_page = 0;
1149 pte[k].vmm_info = 0;
1151 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
1157 pte[k].page_base_addr = host_addr >> 12;
1163 current_page_addr += PAGE_SIZE;
1166 if (pte_present == 0) {
1167 V3_FreePage(V3_PAddr(pte));
1170 pde[j].writable = 0;
1171 pde[j].user_page = 0;
1172 pde[j].write_through = 0;
1173 pde[j].cache_disable = 0;
1174 pde[j].accessed = 0;
1176 pde[j].large_page = 0;
1177 pde[j].global_page = 0;
1178 pde[j].vmm_info = 0;
1179 pde[j].pt_base_addr = 0;
1183 pde[j].writable = 1;
1184 pde[j].user_page = 1;
1185 pde[j].write_through = 0;
1186 pde[j].cache_disable = 0;
1187 pde[j].accessed = 0;
1189 pde[j].large_page = 0;
1190 pde[j].global_page = 0;
1191 pde[j].vmm_info = 0;
1192 pde[j].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1200 if (pde_present == 0) {
1201 V3_FreePage(V3_PAddr(pde));
1203 pdpe[i].present = 0;
1205 pdpe[i].write_through = 0;
1206 pdpe[i].cache_disable = 0;
1207 pdpe[i].accessed = 0;
1210 pdpe[i].vmm_info = 0;
1211 pdpe[i].pd_base_addr = 0;
1214 pdpe[i].present = 1;
1216 pdpe[i].write_through = 0;
1217 pdpe[i].cache_disable = 0;
1218 pdpe[i].accessed = 0;
1221 pdpe[i].vmm_info = 0;
1222 pdpe[i].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
1237 pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
1238 addr_t current_page_addr = 0;
1240 struct shadow_map * map = &(info->mem_map);
1242 pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
1244 for (i = 0; i < 1; i++) {
1245 int pdpe_present = 0;
1246 pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
1248 for (j = 0; j < 20; j++) {
1249 int pde_present = 0;
1250 pde64_t * pde = V3_VAddr(V3_AllocPages(1));
1252 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1253 int pte_present = 0;
1254 pte64_t * pte = V3_VAddr(V3_AllocPages(1));
1257 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1258 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
1263 (region->host_type == HOST_REGION_HOOK) ||
1264 (region->host_type == HOST_REGION_UNALLOCATED) ||
1265 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
1266 (region->host_type == HOST_REGION_REMOTE) ||
1267 (region->host_type == HOST_REGION_SWAPPED)) {
1269 pte[m].writable = 0;
1270 pte[m].user_page = 0;
1271 pte[m].write_through = 0;
1272 pte[m].cache_disable = 0;
1273 pte[m].accessed = 0;
1275 pte[m].pte_attr = 0;
1276 pte[m].global_page = 0;
1277 pte[m].vmm_info = 0;
1278 pte[m].page_base_addr = 0;
1282 pte[m].writable = 1;
1283 pte[m].user_page = 1;
1284 pte[m].write_through = 0;
1285 pte[m].cache_disable = 0;
1286 pte[m].accessed = 0;
1288 pte[m].pte_attr = 0;
1289 pte[m].global_page = 0;
1290 pte[m].vmm_info = 0;
1292 if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
1298 pte[m].page_base_addr = PAGE_BASE_ADDR(host_addr);
1300 //PrintPTE64(current_page_addr, &(pte[m]));
1308 current_page_addr += PAGE_SIZE;
1311 if (pte_present == 0) {
1312 V3_FreePage(V3_PAddr(pte));
1315 pde[k].writable = 0;
1316 pde[k].user_page = 0;
1317 pde[k].write_through = 0;
1318 pde[k].cache_disable = 0;
1319 pde[k].accessed = 0;
1321 pde[k].large_page = 0;
1322 //pde[k].global_page = 0;
1323 pde[k].vmm_info = 0;
1324 pde[k].pt_base_addr = 0;
1327 pde[k].writable = 1;
1328 pde[k].user_page = 1;
1329 pde[k].write_through = 0;
1330 pde[k].cache_disable = 0;
1331 pde[k].accessed = 0;
1333 pde[k].large_page = 0;
1334 //pde[k].global_page = 0;
1335 pde[k].vmm_info = 0;
1336 pde[k].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1342 if (pde_present == 0) {
1343 V3_FreePage(V3_PAddr(pde));
1345 pdpe[j].present = 0;
1346 pdpe[j].writable = 0;
1347 pdpe[j].user_page = 0;
1348 pdpe[j].write_through = 0;
1349 pdpe[j].cache_disable = 0;
1350 pdpe[j].accessed = 0;
1352 pdpe[j].large_page = 0;
1353 //pdpe[j].global_page = 0;
1354 pdpe[j].vmm_info = 0;
1355 pdpe[j].pd_base_addr = 0;
1357 pdpe[j].present = 1;
1358 pdpe[j].writable = 1;
1359 pdpe[j].user_page = 1;
1360 pdpe[j].write_through = 0;
1361 pdpe[j].cache_disable = 0;
1362 pdpe[j].accessed = 0;
1364 pdpe[j].large_page = 0;
1365 //pdpe[j].global_page = 0;
1366 pdpe[j].vmm_info = 0;
1367 pdpe[j].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
1375 PrintDebug("PML index=%d\n", i);
1377 if (pdpe_present == 0) {
1378 V3_FreePage(V3_PAddr(pdpe));
1381 pml[i].writable = 0;
1382 pml[i].user_page = 0;
1383 pml[i].write_through = 0;
1384 pml[i].cache_disable = 0;
1385 pml[i].accessed = 0;
1386 pml[i].reserved = 0;
1387 //pml[i].large_page = 0;
1388 //pml[i].global_page = 0;
1389 pml[i].vmm_info = 0;
1390 pml[i].pdp_base_addr = 0;
1393 pml[i].writable = 1;
1394 pml[i].user_page = 1;
1395 pml[i].write_through = 0;
1396 pml[i].cache_disable = 0;
1397 pml[i].accessed = 0;
1398 pml[i].reserved = 0;
1399 //pml[i].large_page = 0;
1400 //pml[i].global_page = 0;
1401 pml[i].vmm_info = 0;
1402 pml[i].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe));