2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
28 #define USE_VMM_PAGING_DEBUG
29 // All of the debug functions defined in vmm_paging.h are implemented in this file
30 #include "vmm_paging_debug.h"
31 #undef USE_VMM_PAGING_DEBUG
34 void delete_page_tables_32(pde32_t * pde) {
41 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
43 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
44 PrintDebug("PTE base addr %x \n", pde[i].pt_base_addr);
45 pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
47 PrintDebug("Deleting PTE %d (%p)\n", i, pte);
52 PrintDebug("Deleting PDE (%p)\n", pde);
53 V3_FreePage(V3_PAddr(pde));
56 void delete_page_tables_32PAE(pdpe32pae_t * pdpe) {
57 PrintError("Unimplemented function\n");
60 void delete_page_tables_64(pml4e64_t * pml4) {
61 PrintError("Unimplemented function\n");
65 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
66 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
67 pde32_t * guest_pde = 0;
68 addr_t guest_pte_pa = 0;
70 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t*)&guest_pde) == -1) {
71 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
72 (void *)guest_pde_pa);
76 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
77 case PT_ENTRY_NOT_PRESENT:
80 case PT_ENTRY_LARGE_PAGE:
81 *paddr = guest_pte_pa;
85 pte32_t * guest_pte = NULL;
87 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
88 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
89 (void *)guest_pte_pa);
93 if (pte32_lookup(guest_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
105 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
106 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
107 pdpe32pae_t * guest_pdpe = 0;
108 addr_t guest_pde_pa = 0;
110 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
111 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
112 (void *)guest_pdpe_pa);
116 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
118 case PT_ENTRY_NOT_PRESENT:
123 pde32pae_t * guest_pde = NULL;
124 addr_t guest_pte_pa = 0;
126 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
127 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
128 (void *)guest_pde_pa);
132 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
134 case PT_ENTRY_NOT_PRESENT:
137 case PT_ENTRY_LARGE_PAGE:
138 *paddr = guest_pte_pa;
142 pte32pae_t * guest_pte = NULL;
144 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
145 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
146 (void *)guest_pte_pa);
150 if (pte32pae_lookup(guest_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
165 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
166 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
167 pml4e64_t * guest_pmle = 0;
168 addr_t guest_pdpe_pa = 0;
170 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
171 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
172 (void *)guest_pml4_pa);
176 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
177 case PT_ENTRY_NOT_PRESENT:
182 pdpe64_t * guest_pdp = NULL;
183 addr_t guest_pde_pa = 0;
185 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
186 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
187 (void *)guest_pdpe_pa);
191 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
192 case PT_ENTRY_NOT_PRESENT:
195 case PT_ENTRY_LARGE_PAGE:
197 PrintError("1 Gigabyte Pages not supported\n");
201 pde64_t * guest_pde = NULL;
202 addr_t guest_pte_pa = 0;
204 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
205 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
206 (void *)guest_pde_pa);
210 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
211 case PT_ENTRY_NOT_PRESENT:
214 case PT_ENTRY_LARGE_PAGE:
215 *paddr = guest_pte_pa;
219 pte64_t * guest_pte = NULL;
221 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
222 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
223 (void *)guest_pte_pa);
227 if (pte64_lookup(guest_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
245 int v3_translate_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
246 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
247 pte32_t * host_pte = 0;
249 switch (pde32_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
250 case PT_ENTRY_NOT_PRESENT:
253 case PT_ENTRY_LARGE_PAGE:
254 *paddr = (addr_t)host_pte;
257 if (pte32_lookup(host_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
263 // should never get here
268 int v3_translate_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
269 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
270 pde32pae_t * host_pde = NULL;
271 pte32pae_t * host_pte = NULL;
273 switch (pdpe32pae_lookup(host_pdpe, vaddr, (addr_t *)&host_pde)) {
274 case PT_ENTRY_NOT_PRESENT:
278 switch (pde32pae_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
279 case PT_ENTRY_NOT_PRESENT:
282 case PT_ENTRY_LARGE_PAGE:
283 *paddr = (addr_t)host_pte;
286 if (pte32pae_lookup(host_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
295 // should never get here
300 int v3_translate_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
301 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
302 pdpe64_t * host_pdpe = NULL;
303 pde64_t * host_pde = NULL;
304 pte64_t * host_pte = NULL;
306 switch(pml4e64_lookup(host_pmle, vaddr, (addr_t *)&host_pdpe)) {
307 case PT_ENTRY_NOT_PRESENT:
311 switch(pdpe64_lookup(host_pdpe, vaddr, (addr_t *)&host_pde)) {
312 case PT_ENTRY_NOT_PRESENT:
315 case PT_ENTRY_LARGE_PAGE:
317 PrintError("1 Gigabyte Pages not supported\n");
320 switch (pde64_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
321 case PT_ENTRY_NOT_PRESENT:
324 case PT_ENTRY_LARGE_PAGE:
326 if (pte64_lookup(host_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
336 // should never get here
345 * PAGE TABLE LOOKUP FUNCTIONS
348 * The value of entry is a return type:
349 * Page not present: *entry = 0
350 * Large Page: *entry = translated physical address (byte granularity)
351 * PTE entry: *entry is the address of the PTE Page
356 * 32 bit Page Table lookup functions
360 pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
361 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
363 if (!pde_entry->present) {
365 return PT_ENTRY_NOT_PRESENT;
366 } else if (pde_entry->large_page) {
367 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
369 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
370 *entry += PAGE_OFFSET_4MB(addr);
372 return PT_ENTRY_LARGE_PAGE;
374 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
375 return PT_ENTRY_PAGE;
381 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
383 pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
384 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
386 if (!pte_entry->present) {
388 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
389 return PT_ENTRY_NOT_PRESENT;
391 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
392 return PT_ENTRY_PAGE;
401 * 32 bit PAE Page Table lookup functions
404 pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
405 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
407 if (!pdpe_entry->present) {
409 return PT_ENTRY_NOT_PRESENT;
411 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
412 return PT_ENTRY_PAGE;
416 pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
417 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
419 if (!pde_entry->present) {
421 return PT_ENTRY_NOT_PRESENT;
422 } else if (pde_entry->large_page) {
423 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
425 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
426 *entry += PAGE_OFFSET_2MB(addr);
428 return PT_ENTRY_LARGE_PAGE;
430 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
431 return PT_ENTRY_PAGE;
435 pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
436 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
438 if (!pte_entry->present) {
440 return PT_ENTRY_NOT_PRESENT;
442 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
443 return PT_ENTRY_PAGE;
451 * 64 bit Page Table lookup functions
454 pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
455 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
457 if (!pml_entry->present) {
459 return PT_ENTRY_NOT_PRESENT;
461 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
462 return PT_ENTRY_PAGE;
466 pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
467 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
469 if (!pdpe_entry->present) {
471 return PT_ENTRY_NOT_PRESENT;
472 } else if (pdpe_entry->large_page) {
473 PrintError("1 Gigabyte pages not supported\n");
477 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
478 return PT_ENTRY_PAGE;
482 pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
483 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
485 if (!pde_entry->present) {
487 return PT_ENTRY_NOT_PRESENT;
488 } else if (pde_entry->large_page) {
489 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
491 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
492 *entry += PAGE_OFFSET_2MB(addr);
494 return PT_ENTRY_LARGE_PAGE;
496 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
497 return PT_ENTRY_PAGE;
501 pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
502 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
504 if (!pte_entry->present) {
506 return PT_ENTRY_NOT_PRESENT;
508 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
509 return PT_ENTRY_PAGE;
522 static pt_access_status_t can_access_pt(gen_pt_t * pt, pf_error_t access_type) {
523 if (pt->present == 0) {
524 return PT_ACCESS_NOT_PRESENT;
525 } else if ((pt->writable == 0) && (access_type.write == 1)) {
526 return PT_ACCESS_WRITE_ERROR;
527 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
529 return PT_ACCESS_USER_ERROR;
538 * 32 bit access checks
540 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
541 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
542 return can_access_pt(entry, access_type);
545 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
546 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
547 return can_access_pt(entry, access_type);
552 * 32 bit PAE access checks
554 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
555 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
556 return can_access_pt(entry, access_type);
559 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
560 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
561 return can_access_pt(entry, access_type);
564 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
565 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
566 return can_access_pt(entry, access_type);
570 * 64 Bit access checks
572 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
573 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
574 return can_access_pt(entry, access_type);
577 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
578 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
579 return can_access_pt(entry, access_type);
582 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
583 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
584 return can_access_pt(entry, access_type);
587 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
588 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
589 return can_access_pt(entry, access_type);
597 /* We generate a page table to correspond to a given memory layout
598 * pulling pages from the mem_list when necessary
599 * If there are any gaps in the layout, we add them as unmapped pages
601 pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
602 addr_t current_page_addr = 0;
604 struct shadow_map * map = &(guest_info->mem_map);
606 pde32_t * pde = V3_VAddr(V3_AllocPages(1));
608 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
610 pte32_t * pte = V3_VAddr(V3_AllocPages(1));
613 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
614 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
617 (region->host_type == HOST_REGION_HOOK) ||
618 (region->host_type == HOST_REGION_UNALLOCATED) ||
619 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
620 (region->host_type == HOST_REGION_REMOTE) ||
621 (region->host_type == HOST_REGION_SWAPPED)) {
624 pte[j].user_page = 0;
625 pte[j].write_through = 0;
626 pte[j].cache_disable = 0;
630 pte[j].global_page = 0;
632 pte[j].page_base_addr = 0;
637 pte[j].user_page = 1;
638 pte[j].write_through = 0;
639 pte[j].cache_disable = 0;
643 pte[j].global_page = 0;
646 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
652 pte[j].page_base_addr = host_addr >> 12;
657 current_page_addr += PAGE_SIZE;
660 if (pte_present == 0) {
661 V3_FreePage(V3_PAddr(pte));
665 pde[i].user_page = 0;
666 pde[i].write_through = 0;
667 pde[i].cache_disable = 0;
670 pde[i].large_page = 0;
671 pde[i].global_page = 0;
673 pde[i].pt_base_addr = 0;
677 pde[i].user_page = 1;
678 pde[i].write_through = 0;
679 pde[i].cache_disable = 0;
682 pde[i].large_page = 0;
683 pde[i].global_page = 0;
685 pde[i].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
694 /* We generate a page table to correspond to a given memory layout
695 * pulling pages from the mem_list when necessary
696 * If there are any gaps in the layout, we add them as unmapped pages
698 pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
699 addr_t current_page_addr = 0;
701 struct shadow_map * map = &(guest_info->mem_map);
703 pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
704 memset(pdpe, 0, PAGE_SIZE);
706 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
708 pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
710 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
714 pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
717 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
718 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
721 (region->host_type == HOST_REGION_HOOK) ||
722 (region->host_type == HOST_REGION_UNALLOCATED) ||
723 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
724 (region->host_type == HOST_REGION_REMOTE) ||
725 (region->host_type == HOST_REGION_SWAPPED)) {
728 pte[k].user_page = 0;
729 pte[k].write_through = 0;
730 pte[k].cache_disable = 0;
734 pte[k].global_page = 0;
736 pte[k].page_base_addr = 0;
742 pte[k].user_page = 1;
743 pte[k].write_through = 0;
744 pte[k].cache_disable = 0;
748 pte[k].global_page = 0;
751 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
757 pte[k].page_base_addr = host_addr >> 12;
763 current_page_addr += PAGE_SIZE;
766 if (pte_present == 0) {
767 V3_FreePage(V3_PAddr(pte));
771 pde[j].user_page = 0;
772 pde[j].write_through = 0;
773 pde[j].cache_disable = 0;
776 pde[j].large_page = 0;
777 pde[j].global_page = 0;
779 pde[j].pt_base_addr = 0;
784 pde[j].user_page = 1;
785 pde[j].write_through = 0;
786 pde[j].cache_disable = 0;
789 pde[j].large_page = 0;
790 pde[j].global_page = 0;
792 pde[j].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
800 if (pde_present == 0) {
801 V3_FreePage(V3_PAddr(pde));
805 pdpe[i].write_through = 0;
806 pdpe[i].cache_disable = 0;
807 pdpe[i].accessed = 0;
810 pdpe[i].vmm_info = 0;
811 pdpe[i].pd_base_addr = 0;
816 pdpe[i].write_through = 0;
817 pdpe[i].cache_disable = 0;
818 pdpe[i].accessed = 0;
821 pdpe[i].vmm_info = 0;
822 pdpe[i].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
837 pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
838 addr_t current_page_addr = 0;
840 struct shadow_map * map = &(info->mem_map);
842 pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
844 for (i = 0; i < 1; i++) {
845 int pdpe_present = 0;
846 pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
848 for (j = 0; j < 20; j++) {
850 pde64_t * pde = V3_VAddr(V3_AllocPages(1));
852 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
854 pte64_t * pte = V3_VAddr(V3_AllocPages(1));
857 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
858 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
863 (region->host_type == HOST_REGION_HOOK) ||
864 (region->host_type == HOST_REGION_UNALLOCATED) ||
865 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
866 (region->host_type == HOST_REGION_REMOTE) ||
867 (region->host_type == HOST_REGION_SWAPPED)) {
870 pte[m].user_page = 0;
871 pte[m].write_through = 0;
872 pte[m].cache_disable = 0;
876 pte[m].global_page = 0;
878 pte[m].page_base_addr = 0;
883 pte[m].user_page = 1;
884 pte[m].write_through = 0;
885 pte[m].cache_disable = 0;
889 pte[m].global_page = 0;
892 if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
898 pte[m].page_base_addr = PAGE_BASE_ADDR(host_addr);
900 //PrintPTE64(current_page_addr, &(pte[m]));
908 current_page_addr += PAGE_SIZE;
911 if (pte_present == 0) {
912 V3_FreePage(V3_PAddr(pte));
916 pde[k].user_page = 0;
917 pde[k].write_through = 0;
918 pde[k].cache_disable = 0;
921 pde[k].large_page = 0;
922 //pde[k].global_page = 0;
924 pde[k].pt_base_addr = 0;
928 pde[k].user_page = 1;
929 pde[k].write_through = 0;
930 pde[k].cache_disable = 0;
933 pde[k].large_page = 0;
934 //pde[k].global_page = 0;
936 pde[k].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
942 if (pde_present == 0) {
943 V3_FreePage(V3_PAddr(pde));
946 pdpe[j].writable = 0;
947 pdpe[j].user_page = 0;
948 pdpe[j].write_through = 0;
949 pdpe[j].cache_disable = 0;
950 pdpe[j].accessed = 0;
952 pdpe[j].large_page = 0;
953 //pdpe[j].global_page = 0;
954 pdpe[j].vmm_info = 0;
955 pdpe[j].pd_base_addr = 0;
958 pdpe[j].writable = 1;
959 pdpe[j].user_page = 1;
960 pdpe[j].write_through = 0;
961 pdpe[j].cache_disable = 0;
962 pdpe[j].accessed = 0;
964 pdpe[j].large_page = 0;
965 //pdpe[j].global_page = 0;
966 pdpe[j].vmm_info = 0;
967 pdpe[j].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
975 PrintDebug("PML index=%d\n", i);
977 if (pdpe_present == 0) {
978 V3_FreePage(V3_PAddr(pdpe));
982 pml[i].user_page = 0;
983 pml[i].write_through = 0;
984 pml[i].cache_disable = 0;
987 //pml[i].large_page = 0;
988 //pml[i].global_page = 0;
990 pml[i].pdp_base_addr = 0;
994 pml[i].user_page = 1;
995 pml[i].write_through = 0;
996 pml[i].cache_disable = 0;
999 //pml[i].large_page = 0;
1000 //pml[i].global_page = 0;
1001 pml[i].vmm_info = 0;
1002 pml[i].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe));