2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
28 #define USE_VMM_PAGING_DEBUG
29 // All of the debug functions defined in vmm_paging.h are implemented in this file
30 #include "vmm_paging_debug.h"
31 #undef USE_VMM_PAGING_DEBUG
34 void delete_page_tables_32(pde32_t * pde) {
41 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
43 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
44 PrintDebug("PTE base addr %x \n", pde[i].pt_base_addr);
45 pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
47 PrintDebug("Deleting PTE %d (%p)\n", i, pte);
52 PrintDebug("Deleting PDE (%p)\n", pde);
53 V3_FreePage(V3_PAddr(pde));
56 void delete_page_tables_32PAE(pdpe32pae_t * pdpe) {
57 PrintError("Unimplemented function\n");
60 void delete_page_tables_64(pml4e64_t * pml4) {
61 PrintError("Unimplemented function\n");
65 int v3_translate_guest_pt_32(struct guest_info * info, addr_t guest_cr3, addr_t vaddr, addr_t * paddr) {
66 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
67 pde32_t * guest_pde = 0;
68 addr_t guest_pte_pa = 0;
70 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t*)&guest_pde) == -1) {
71 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
72 (void *)guest_pde_pa);
76 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
77 case PT_ENTRY_NOT_PRESENT:
80 case PT_ENTRY_LARGE_PAGE:
81 *paddr = guest_pte_pa;
85 pte32_t * guest_pte = NULL;
87 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
88 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
89 (void *)guest_pte_pa);
93 if (pte32_lookup(guest_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
105 int v3_translate_guest_pt_32pae(struct guest_info * info, addr_t guest_cr3, addr_t vaddr, addr_t * paddr) {
106 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
107 pdpe32pae_t * guest_pdpe = 0;
108 addr_t guest_pde_pa = 0;
110 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
111 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
112 (void *)guest_pdpe_pa);
116 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
118 case PT_ENTRY_NOT_PRESENT:
123 pde32pae_t * guest_pde = NULL;
124 addr_t guest_pte_pa = 0;
126 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
127 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
128 (void *)guest_pde_pa);
132 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
134 case PT_ENTRY_NOT_PRESENT:
137 case PT_ENTRY_LARGE_PAGE:
138 *paddr = guest_pte_pa;
142 pte32pae_t * guest_pte = NULL;
144 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
145 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
146 (void *)guest_pte_pa);
150 if (pte32pae_lookup(guest_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
165 int v3_translate_guest_pt_64(struct guest_info * info, addr_t guest_cr3, addr_t vaddr, addr_t * paddr) {
166 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
167 pml4e64_t * guest_pmle = 0;
168 addr_t guest_pdpe_pa = 0;
170 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
171 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
172 (void *)guest_pml4_pa);
176 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
177 case PT_ENTRY_NOT_PRESENT:
182 pdpe64_t * guest_pdp = NULL;
183 addr_t guest_pde_pa = 0;
185 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
186 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
187 (void *)guest_pdpe_pa);
191 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
192 case PT_ENTRY_NOT_PRESENT:
195 case PT_ENTRY_LARGE_PAGE:
197 PrintError("1 Gigabyte Pages not supported\n");
201 pde64_t * guest_pde = NULL;
202 addr_t guest_pte_pa = 0;
204 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
205 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
206 (void *)guest_pde_pa);
210 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
211 case PT_ENTRY_NOT_PRESENT:
214 case PT_ENTRY_LARGE_PAGE:
215 *paddr = guest_pte_pa;
219 pte64_t * guest_pte = NULL;
221 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
222 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
223 (void *)guest_pte_pa);
227 if (pte64_lookup(guest_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
245 int v3_translate_host_pt_32(addr_t host_cr3, addr_t vaddr, addr_t * paddr) {
246 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
247 pte32_t * host_pte = 0;
249 switch (pde32_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
250 case PT_ENTRY_NOT_PRESENT:
253 case PT_ENTRY_LARGE_PAGE:
254 *paddr = (addr_t)host_pte;
257 if (pte32_lookup(host_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
263 // should never get here
268 int v3_translate_host_pt_32pae(addr_t host_cr3, addr_t vaddr, addr_t * paddr) {
269 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
270 pde32pae_t * host_pde = NULL;
271 pte32pae_t * host_pte = NULL;
273 switch (pdpe32pae_lookup(host_pdpe, vaddr, (addr_t *)&host_pde)) {
274 case PT_ENTRY_NOT_PRESENT:
278 switch (pde32pae_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
279 case PT_ENTRY_NOT_PRESENT:
282 case PT_ENTRY_LARGE_PAGE:
283 *paddr = (addr_t)host_pte;
286 if (pte32pae_lookup(host_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
295 // should never get here
300 int v3_translate_host_pt_64(addr_t host_cr3, addr_t vaddr, addr_t * paddr) {
301 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
302 pdpe64_t * host_pdpe = NULL;
303 pde64_t * host_pde = NULL;
304 pte64_t * host_pte = NULL;
306 switch(pml4e64_lookup(host_pmle, vaddr, (addr_t *)&host_pdpe)) {
307 case PT_ENTRY_NOT_PRESENT:
311 switch(pdpe64_lookup(host_pdpe, vaddr, (addr_t *)&host_pde)) {
312 case PT_ENTRY_NOT_PRESENT:
315 case PT_ENTRY_LARGE_PAGE:
317 PrintError("1 Gigabyte Pages not supported\n");
320 switch (pde64_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
321 case PT_ENTRY_NOT_PRESENT:
324 case PT_ENTRY_LARGE_PAGE:
326 if (pte64_lookup(host_pte, vaddr, paddr) == PT_ENTRY_NOT_PRESENT) {
336 // should never get here
345 * PAGE TABLE LOOKUP FUNCTIONS
348 * The value of entry is a return type:
349 * Page not present: *entry = 0
350 * Large Page: *entry = translated physical address (byte granularity)
351 * PTE entry: *entry is the address of the PTE Page
356 * 32 bit Page Table lookup functions
360 pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
361 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
363 if (!pde_entry->present) {
365 return PT_ENTRY_NOT_PRESENT;
366 } else if (pde_entry->large_page) {
367 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
369 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
370 *entry += PAGE_OFFSET_4MB(addr);
372 return PT_ENTRY_LARGE_PAGE;
374 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
375 return PT_ENTRY_PAGE;
381 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
383 pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
384 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
386 if (!pte_entry->present) {
388 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
389 return PT_ENTRY_NOT_PRESENT;
391 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
392 return PT_ENTRY_PAGE;
401 * 32 bit PAE Page Table lookup functions
404 pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
405 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
407 if (!pdpe_entry->present) {
409 return PT_ENTRY_NOT_PRESENT;
411 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
412 return PT_ENTRY_PAGE;
416 pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
417 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
419 if (!pde_entry->present) {
421 return PT_ENTRY_NOT_PRESENT;
422 } else if (pde_entry->large_page) {
423 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
425 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
426 *entry += PAGE_OFFSET_2MB(addr);
428 return PT_ENTRY_LARGE_PAGE;
430 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
431 return PT_ENTRY_PAGE;
435 pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
436 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
438 if (!pte_entry->present) {
440 return PT_ENTRY_NOT_PRESENT;
442 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
443 return PT_ENTRY_PAGE;
451 * 64 bit Page Table lookup functions
454 pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
455 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
457 if (!pml_entry->present) {
459 return PT_ENTRY_NOT_PRESENT;
461 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
462 return PT_ENTRY_PAGE;
466 pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
467 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
469 if (!pdpe_entry->present) {
471 return PT_ENTRY_NOT_PRESENT;
472 } else if (pdpe_entry->large_page) {
473 PrintError("1 Gigabyte pages not supported\n");
477 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
478 return PT_ENTRY_PAGE;
482 pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
483 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
485 if (!pde_entry->present) {
487 return PT_ENTRY_NOT_PRESENT;
488 } else if (pde_entry->large_page) {
489 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
491 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
492 *entry += PAGE_OFFSET_2MB(addr);
494 return PT_ENTRY_LARGE_PAGE;
496 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
497 return PT_ENTRY_PAGE;
501 pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
502 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
504 if (!pte_entry->present) {
506 return PT_ENTRY_NOT_PRESENT;
508 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
509 return PT_ENTRY_PAGE;
529 pt_access_status_t can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
530 pde32_t * entry = &pde[PDE32_INDEX(addr)];
532 if (entry->present == 0) {
533 return PT_ACCESS_NOT_PRESENT;
534 } else if ((entry->writable == 0) && (access_type.write == 1)) {
535 return PT_ACCESS_WRITE_ERROR;
536 } else if ((entry->user_page == 0) && (access_type.user == 1)) {
538 return PT_ACCESS_USER_ERROR;
545 pt_access_status_t can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
546 pte32_t * entry = &pte[PTE32_INDEX(addr)];
548 if (entry->present == 0) {
549 return PT_ACCESS_NOT_PRESENT;
550 } else if ((entry->writable == 0) && (access_type.write == 1)) {
551 return PT_ACCESS_WRITE_ERROR;
552 } else if ((entry->user_page == 0) && (access_type.user == 1)) {
554 return PT_ACCESS_USER_ERROR;
563 /* We generate a page table to correspond to a given memory layout
564 * pulling pages from the mem_list when necessary
565 * If there are any gaps in the layout, we add them as unmapped pages
567 pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
568 addr_t current_page_addr = 0;
570 struct shadow_map * map = &(guest_info->mem_map);
572 pde32_t * pde = V3_VAddr(V3_AllocPages(1));
574 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
576 pte32_t * pte = V3_VAddr(V3_AllocPages(1));
579 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
580 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
583 (region->host_type == HOST_REGION_HOOK) ||
584 (region->host_type == HOST_REGION_UNALLOCATED) ||
585 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
586 (region->host_type == HOST_REGION_REMOTE) ||
587 (region->host_type == HOST_REGION_SWAPPED)) {
590 pte[j].user_page = 0;
591 pte[j].write_through = 0;
592 pte[j].cache_disable = 0;
596 pte[j].global_page = 0;
598 pte[j].page_base_addr = 0;
603 pte[j].user_page = 1;
604 pte[j].write_through = 0;
605 pte[j].cache_disable = 0;
609 pte[j].global_page = 0;
612 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
618 pte[j].page_base_addr = host_addr >> 12;
623 current_page_addr += PAGE_SIZE;
626 if (pte_present == 0) {
627 V3_FreePage(V3_PAddr(pte));
631 pde[i].user_page = 0;
632 pde[i].write_through = 0;
633 pde[i].cache_disable = 0;
636 pde[i].large_page = 0;
637 pde[i].global_page = 0;
639 pde[i].pt_base_addr = 0;
643 pde[i].user_page = 1;
644 pde[i].write_through = 0;
645 pde[i].cache_disable = 0;
648 pde[i].large_page = 0;
649 pde[i].global_page = 0;
651 pde[i].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
660 /* We generate a page table to correspond to a given memory layout
661 * pulling pages from the mem_list when necessary
662 * If there are any gaps in the layout, we add them as unmapped pages
664 pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
665 addr_t current_page_addr = 0;
667 struct shadow_map * map = &(guest_info->mem_map);
669 pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
670 memset(pdpe, 0, PAGE_SIZE);
672 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
674 pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
676 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
680 pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
683 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
684 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
687 (region->host_type == HOST_REGION_HOOK) ||
688 (region->host_type == HOST_REGION_UNALLOCATED) ||
689 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
690 (region->host_type == HOST_REGION_REMOTE) ||
691 (region->host_type == HOST_REGION_SWAPPED)) {
694 pte[k].user_page = 0;
695 pte[k].write_through = 0;
696 pte[k].cache_disable = 0;
700 pte[k].global_page = 0;
702 pte[k].page_base_addr = 0;
708 pte[k].user_page = 1;
709 pte[k].write_through = 0;
710 pte[k].cache_disable = 0;
714 pte[k].global_page = 0;
717 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
723 pte[k].page_base_addr = host_addr >> 12;
729 current_page_addr += PAGE_SIZE;
732 if (pte_present == 0) {
733 V3_FreePage(V3_PAddr(pte));
737 pde[j].user_page = 0;
738 pde[j].write_through = 0;
739 pde[j].cache_disable = 0;
742 pde[j].large_page = 0;
743 pde[j].global_page = 0;
745 pde[j].pt_base_addr = 0;
750 pde[j].user_page = 1;
751 pde[j].write_through = 0;
752 pde[j].cache_disable = 0;
755 pde[j].large_page = 0;
756 pde[j].global_page = 0;
758 pde[j].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
766 if (pde_present == 0) {
767 V3_FreePage(V3_PAddr(pde));
771 pdpe[i].write_through = 0;
772 pdpe[i].cache_disable = 0;
773 pdpe[i].accessed = 0;
776 pdpe[i].vmm_info = 0;
777 pdpe[i].pd_base_addr = 0;
782 pdpe[i].write_through = 0;
783 pdpe[i].cache_disable = 0;
784 pdpe[i].accessed = 0;
787 pdpe[i].vmm_info = 0;
788 pdpe[i].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
803 pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
804 addr_t current_page_addr = 0;
806 struct shadow_map * map = &(info->mem_map);
808 pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
810 for (i = 0; i < 1; i++) {
811 int pdpe_present = 0;
812 pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
814 for (j = 0; j < 20; j++) {
816 pde64_t * pde = V3_VAddr(V3_AllocPages(1));
818 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
820 pte64_t * pte = V3_VAddr(V3_AllocPages(1));
823 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
824 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
829 (region->host_type == HOST_REGION_HOOK) ||
830 (region->host_type == HOST_REGION_UNALLOCATED) ||
831 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
832 (region->host_type == HOST_REGION_REMOTE) ||
833 (region->host_type == HOST_REGION_SWAPPED)) {
836 pte[m].user_page = 0;
837 pte[m].write_through = 0;
838 pte[m].cache_disable = 0;
842 pte[m].global_page = 0;
844 pte[m].page_base_addr = 0;
849 pte[m].user_page = 1;
850 pte[m].write_through = 0;
851 pte[m].cache_disable = 0;
855 pte[m].global_page = 0;
858 if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
864 pte[m].page_base_addr = PAGE_BASE_ADDR(host_addr);
866 //PrintPTE64(current_page_addr, &(pte[m]));
874 current_page_addr += PAGE_SIZE;
877 if (pte_present == 0) {
878 V3_FreePage(V3_PAddr(pte));
882 pde[k].user_page = 0;
883 pde[k].write_through = 0;
884 pde[k].cache_disable = 0;
887 pde[k].large_page = 0;
888 //pde[k].global_page = 0;
890 pde[k].pt_base_addr = 0;
894 pde[k].user_page = 1;
895 pde[k].write_through = 0;
896 pde[k].cache_disable = 0;
899 pde[k].large_page = 0;
900 //pde[k].global_page = 0;
902 pde[k].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
908 if (pde_present == 0) {
909 V3_FreePage(V3_PAddr(pde));
912 pdpe[j].writable = 0;
913 pdpe[j].user_page = 0;
914 pdpe[j].write_through = 0;
915 pdpe[j].cache_disable = 0;
916 pdpe[j].accessed = 0;
918 pdpe[j].large_page = 0;
919 //pdpe[j].global_page = 0;
920 pdpe[j].vmm_info = 0;
921 pdpe[j].pd_base_addr = 0;
924 pdpe[j].writable = 1;
925 pdpe[j].user_page = 1;
926 pdpe[j].write_through = 0;
927 pdpe[j].cache_disable = 0;
928 pdpe[j].accessed = 0;
930 pdpe[j].large_page = 0;
931 //pdpe[j].global_page = 0;
932 pdpe[j].vmm_info = 0;
933 pdpe[j].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
941 PrintDebug("PML index=%d\n", i);
943 if (pdpe_present == 0) {
944 V3_FreePage(V3_PAddr(pdpe));
948 pml[i].user_page = 0;
949 pml[i].write_through = 0;
950 pml[i].cache_disable = 0;
953 //pml[i].large_page = 0;
954 //pml[i].global_page = 0;
956 pml[i].pdp_base_addr = 0;
960 pml[i].user_page = 1;
961 pml[i].write_through = 0;
962 pml[i].cache_disable = 0;
965 //pml[i].large_page = 0;
966 //pml[i].global_page = 0;
968 pml[i].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe));