-/* Northwestern University */
-/* (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> */
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
+ * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Jack Lange <jarusl@cs.northwestern.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
#include <palacios/vmm_paging.h>
-void delete_page_tables_pde32(pde32_t * pde) {
- int i;//, j;
+
+
+
+void delete_page_tables_32(pde32_t * pde) {
+ int i;
if (pde == NULL) {
return;
for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
if (pde[i].present) {
- pte32_t * pte = (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER);
+ // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
+ PrintDebug("PTE base addr %x \n", pde[i].pt_base_addr);
+ pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
- /*
- for (j = 0; (j < MAX_PTE32_ENTRIES); j++) {
- if ((pte[j].present)) {
- os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
- }
- }
- */
- //PrintDebug("Deleting PTE %d (%x)\n", i, pte);
+ PrintDebug("Deleting PTE %d (%p)\n", i, pte);
V3_FreePage(pte);
}
}
- // PrintDebug("Deleting PDE (%x)\n", pde);
- V3_FreePage(pde);
+ PrintDebug("Deleting PDE (%p)\n", pde);
+ V3_FreePage(V3_PAddr(pde));
+}
+
+void delete_page_tables_32PAE(pdpe32pae_t * pdpe) {
+ PrintError("Unimplemented function\n");
+}
+
+void delete_page_tables_64(pml4e64_t * pml4) {
+ PrintError("Unimplemented function\n");
}
* pulling pages from the mem_list when necessary
* If there are any gaps in the layout, we add them as unmapped pages
*/
-pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info) {
- ullong_t current_page_addr = 0;
+pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
+ addr_t current_page_addr = 0;
int i, j;
struct shadow_map * map = &(guest_info->mem_map);
- pde32_t * pde = V3_AllocPages(1);
+ pde32_t * pde = V3_VAddr(V3_AllocPages(1));
for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
int pte_present = 0;
- pte32_t * pte = V3_AllocPages(1);
+ pte32_t * pte = V3_VAddr(V3_AllocPages(1));
for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
}
if (pte_present == 0) {
- V3_FreePage(pte);
+ V3_FreePage(V3_PAddr(pte));
pde[i].present = 0;
pde[i].writable = 0;
pde[i].large_page = 0;
pde[i].global_page = 0;
pde[i].vmm_info = 0;
- pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(pte);
+ pde[i].pt_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pte));
}
}
}
+/* We generate a page table to correspond to a given memory layout
+ * pulling pages from the mem_list when necessary
+ * If there are any gaps in the layout, we add them as unmapped pages
+ */
+pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
+ addr_t current_page_addr = 0;
+ int i, j, k;
+ struct shadow_map * map = &(guest_info->mem_map);
+
+ pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
+ memset(pdpe, 0, PAGE_SIZE);
+
+ for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
+ int pde_present = 0;
+ pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
+
+ for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
+
+
+ int pte_present = 0;
+ pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
+
+
+ for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
+ struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
+
+ if (!region ||
+ (region->host_type == HOST_REGION_HOOK) ||
+ (region->host_type == HOST_REGION_UNALLOCATED) ||
+ (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
+ (region->host_type == HOST_REGION_REMOTE) ||
+ (region->host_type == HOST_REGION_SWAPPED)) {
+ pte[k].present = 0;
+ pte[k].writable = 0;
+ pte[k].user_page = 0;
+ pte[k].write_through = 0;
+ pte[k].cache_disable = 0;
+ pte[k].accessed = 0;
+ pte[k].dirty = 0;
+ pte[k].pte_attr = 0;
+ pte[k].global_page = 0;
+ pte[k].vmm_info = 0;
+ pte[k].page_base_addr = 0;
+ pte[k].rsvd = 0;
+ } else {
+ addr_t host_addr;
+ pte[k].present = 1;
+ pte[k].writable = 1;
+ pte[k].user_page = 1;
+ pte[k].write_through = 0;
+ pte[k].cache_disable = 0;
+ pte[k].accessed = 0;
+ pte[k].dirty = 0;
+ pte[k].pte_attr = 0;
+ pte[k].global_page = 0;
+ pte[k].vmm_info = 0;
+
+ if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
+ // BIG ERROR
+ // PANIC
+ return NULL;
+ }
+
+ pte[k].page_base_addr = host_addr >> 12;
+ pte[k].rsvd = 0;
+
+ pte_present = 1;
+ }
+
+ current_page_addr += PAGE_SIZE;
+ }
+
+ if (pte_present == 0) {
+ V3_FreePage(V3_PAddr(pte));
+
+ pde[j].present = 0;
+ pde[j].writable = 0;
+ pde[j].user_page = 0;
+ pde[j].write_through = 0;
+ pde[j].cache_disable = 0;
+ pde[j].accessed = 0;
+ pde[j].avail = 0;
+ pde[j].large_page = 0;
+ pde[j].global_page = 0;
+ pde[j].vmm_info = 0;
+ pde[j].pt_base_addr = 0;
+ pde[j].rsvd = 0;
+ } else {
+ pde[j].present = 1;
+ pde[j].writable = 1;
+ pde[j].user_page = 1;
+ pde[j].write_through = 0;
+ pde[j].cache_disable = 0;
+ pde[j].accessed = 0;
+ pde[j].avail = 0;
+ pde[j].large_page = 0;
+ pde[j].global_page = 0;
+ pde[j].vmm_info = 0;
+ pde[j].pt_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pte));
+ pde[j].rsvd = 0;
+
+ pde_present = 1;
+ }
+
+ }
+
+ if (pde_present == 0) {
+ V3_FreePage(V3_PAddr(pde));
+
+ pdpe[i].present = 0;
+ pdpe[i].rsvd = 0;
+ pdpe[i].write_through = 0;
+ pdpe[i].cache_disable = 0;
+ pdpe[i].accessed = 0;
+ pdpe[i].avail = 0;
+ pdpe[i].rsvd2 = 0;
+ pdpe[i].vmm_info = 0;
+ pdpe[i].pd_base_addr = 0;
+ pdpe[i].rsvd3 = 0;
+ } else {
+ pdpe[i].present = 1;
+ pdpe[i].rsvd = 0;
+ pdpe[i].write_through = 0;
+ pdpe[i].cache_disable = 0;
+ pdpe[i].accessed = 0;
+ pdpe[i].avail = 0;
+ pdpe[i].rsvd2 = 0;
+ pdpe[i].vmm_info = 0;
+ pdpe[i].pd_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pde));
+ pdpe[i].rsvd3 = 0;
+ }
+
+ }
+
+
+ return pdpe;
+}
+
+
+
+
+
+
+pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
+ addr_t current_page_addr = 0;
+ int i, j, k, m;
+ struct shadow_map * map = &(info->mem_map);
+
+ pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
+
+ for (i = 0; i < 1; i++) {
+ int pdpe_present = 0;
+ pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
+
+ for (j = 0; j < 20; j++) {
+ int pde_present = 0;
+ pde64_t * pde = V3_VAddr(V3_AllocPages(1));
+
+ for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
+ int pte_present = 0;
+ pte64_t * pte = V3_VAddr(V3_AllocPages(1));
+
+
+ for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
+ struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
+
+
+
+ if (!region ||
+ (region->host_type == HOST_REGION_HOOK) ||
+ (region->host_type == HOST_REGION_UNALLOCATED) ||
+ (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
+ (region->host_type == HOST_REGION_REMOTE) ||
+ (region->host_type == HOST_REGION_SWAPPED)) {
+ pte[m].present = 0;
+ pte[m].writable = 0;
+ pte[m].user_page = 0;
+ pte[m].write_through = 0;
+ pte[m].cache_disable = 0;
+ pte[m].accessed = 0;
+ pte[m].dirty = 0;
+ pte[m].pte_attr = 0;
+ pte[m].global_page = 0;
+ pte[m].vmm_info = 0;
+ pte[m].page_base_addr = 0;
+ } else {
+ addr_t host_addr;
+ pte[m].present = 1;
+ pte[m].writable = 1;
+ pte[m].user_page = 1;
+ pte[m].write_through = 0;
+ pte[m].cache_disable = 0;
+ pte[m].accessed = 0;
+ pte[m].dirty = 0;
+ pte[m].pte_attr = 0;
+ pte[m].global_page = 0;
+ pte[m].vmm_info = 0;
+
+ if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
+ // BIG ERROR
+ // PANIC
+ return NULL;
+ }
+
+ pte[m].page_base_addr = PTE64_BASE_ADDR(host_addr);
+
+ //PrintPTE64(current_page_addr, &(pte[m]));
+
+ pte_present = 1;
+ }
+
+
+
+
+ current_page_addr += PAGE_SIZE;
+ }
+
+ if (pte_present == 0) {
+ V3_FreePage(V3_PAddr(pte));
+
+ pde[k].present = 0;
+ pde[k].writable = 0;
+ pde[k].user_page = 0;
+ pde[k].write_through = 0;
+ pde[k].cache_disable = 0;
+ pde[k].accessed = 0;
+ pde[k].reserved = 0;
+ pde[k].large_page = 0;
+ //pde[k].global_page = 0;
+ pde[k].vmm_info = 0;
+ pde[k].pt_base_addr = 0;
+ } else {
+ pde[k].present = 1;
+ pde[k].writable = 1;
+ pde[k].user_page = 1;
+ pde[k].write_through = 0;
+ pde[k].cache_disable = 0;
+ pde[k].accessed = 0;
+ pde[k].reserved = 0;
+ pde[k].large_page = 0;
+ //pde[k].global_page = 0;
+ pde[k].vmm_info = 0;
+ pde[k].pt_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pte));
+
+ pde_present = 1;
+ }
+ }
+
+ if (pde_present == 0) {
+ V3_FreePage(V3_PAddr(pde));
+
+ pdpe[j].present = 0;
+ pdpe[j].writable = 0;
+ pdpe[j].user_page = 0;
+ pdpe[j].write_through = 0;
+ pdpe[j].cache_disable = 0;
+ pdpe[j].accessed = 0;
+ pdpe[j].reserved = 0;
+ pdpe[j].large_page = 0;
+ //pdpe[j].global_page = 0;
+ pdpe[j].vmm_info = 0;
+ pdpe[j].pd_base_addr = 0;
+ } else {
+ pdpe[j].present = 1;
+ pdpe[j].writable = 1;
+ pdpe[j].user_page = 1;
+ pdpe[j].write_through = 0;
+ pdpe[j].cache_disable = 0;
+ pdpe[j].accessed = 0;
+ pdpe[j].reserved = 0;
+ pdpe[j].large_page = 0;
+ //pdpe[j].global_page = 0;
+ pdpe[j].vmm_info = 0;
+ pdpe[j].pd_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pde));
+
+
+ pdpe_present = 1;
+ }
+
+ }
+
+ PrintDebug("PML index=%d\n", i);
+
+ if (pdpe_present == 0) {
+ V3_FreePage(V3_PAddr(pdpe));
+
+ pml[i].present = 0;
+ pml[i].writable = 0;
+ pml[i].user_page = 0;
+ pml[i].write_through = 0;
+ pml[i].cache_disable = 0;
+ pml[i].accessed = 0;
+ pml[i].reserved = 0;
+ //pml[i].large_page = 0;
+ //pml[i].global_page = 0;
+ pml[i].vmm_info = 0;
+ pml[i].pdp_base_addr = 0;
+ } else {
+ pml[i].present = 1;
+ pml[i].writable = 1;
+ pml[i].user_page = 1;
+ pml[i].write_through = 0;
+ pml[i].cache_disable = 0;
+ pml[i].accessed = 0;
+ pml[i].reserved = 0;
+ //pml[i].large_page = 0;
+ //pml[i].global_page = 0;
+ pml[i].vmm_info = 0;
+ pml[i].pdp_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pdpe));
+ }
+ }
+
+ return pml;
+}
+
+
+
+
void PrintPDE32(addr_t virtual_address, pde32_t * pde)
{
PrintDebug("PDE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
- virtual_address,
- (void *) (pde->pt_base_addr << PAGE_POWER),
+ (void *)virtual_address,
+ (void *)(addr_t) (pde->pt_base_addr << PAGE_POWER),
pde->present,
pde->writable,
pde->user_page,
pde->global_page,
pde->vmm_info);
}
+
void PrintPTE32(addr_t virtual_address, pte32_t * pte)
{
PrintDebug("PTE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
- virtual_address,
- (void*)(pte->page_base_addr << PAGE_POWER),
+ (void *)virtual_address,
+ (void*)(addr_t)(pte->page_base_addr << PAGE_POWER),
pte->present,
pte->writable,
pte->user_page,
+
+
+
+
+
+
+
void PrintPD32(pde32_t * pde)
{
int i;
+
+
void PrintDebugPageTables(pde32_t * pde)
{
int i;
for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
if (pde[i].present) {
PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
- PrintPT32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER));
+ PrintPT32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), (pte32_t *)V3_VAddr((void *)(addr_t)(pde[i].pt_base_addr << PAGE_POWER)));
}
}
}
+
+
+
+
+
+
+
+void PrintPDPE32PAE(addr_t virtual_address, pdpe32pae_t * pdpe)
+{
+ PrintDebug("PDPE %p -> %p : present=%x, wt=%x, cd=%x, accessed=%x, kernelInfo=%x\n",
+ (void *)virtual_address,
+ (void *)(addr_t) (pdpe->pd_base_addr << PAGE_POWER),
+ pdpe->present,
+ pdpe->write_through,
+ pdpe->cache_disable,
+ pdpe->accessed,
+ pdpe->vmm_info);
+}
+
+void PrintPDE32PAE(addr_t virtual_address, pde32pae_t * pde)
+{
+ PrintDebug("PDE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
+ (void *)virtual_address,
+ (void *)(addr_t) (pde->pt_base_addr << PAGE_POWER),
+ pde->present,
+ pde->writable,
+ pde->user_page,
+ pde->write_through,
+ pde->cache_disable,
+ pde->accessed,
+ pde->large_page,
+ pde->global_page,
+ pde->vmm_info);
+}
+
+
+void PrintPTE32PAE(addr_t virtual_address, pte32pae_t * pte)
+{
+ PrintDebug("PTE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
+ (void *)virtual_address,
+ (void*)(addr_t)(pte->page_base_addr << PAGE_POWER),
+ pte->present,
+ pte->writable,
+ pte->user_page,
+ pte->write_through,
+ pte->cache_disable,
+ pte->accessed,
+ pte->dirty,
+ pte->pte_attr,
+ pte->global_page,
+ pte->vmm_info);
+}
+
+
+
+
+
+
+void PrintDebugPageTables32PAE(pdpe32pae_t * pdpe)
+{
+ int i, j, k;
+ pde32pae_t * pde;
+ pte32pae_t * pte;
+ addr_t virtual_addr = 0;
+
+ PrintDebug("Dumping the pages starting with the pde page at %p\n", pdpe);
+
+ for (i = 0; (i < MAX_PDPE32PAE_ENTRIES); i++) {
+
+ if (pdpe[i].present) {
+ pde = (pde32pae_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(pdpe[i].pd_base_addr));
+
+ PrintPDPE32PAE(virtual_addr, &(pdpe[i]));
+
+ for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
+
+ if (pde[j].present) {
+ pte = (pte32pae_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(pde[j].pt_base_addr));
+
+ PrintPDE32PAE(virtual_addr, &(pde[j]));
+
+ for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
+ if (pte[k].present) {
+ PrintPTE32PAE(virtual_addr, &(pte[k]));
+ }
+
+ virtual_addr += PAGE_SIZE;
+ }
+ } else {
+ virtual_addr += PAGE_SIZE * MAX_PTE32PAE_ENTRIES;
+ }
+ }
+ } else {
+ virtual_addr += PAGE_SIZE * MAX_PDE32PAE_ENTRIES * MAX_PTE32PAE_ENTRIES;
+ }
+ }
+}
+
+
+
+void PrintPML4e64(addr_t virtual_address, pml4e64_t * pml)
+{
+ PrintDebug("PML4e64 %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, kernelInfo=%x\n",
+ (void *)virtual_address,
+ (void *)(addr_t) (BASE_TO_PAGE_ADDR(pml->pdp_base_addr)),
+ pml->present,
+ pml->writable,
+ pml->user_page,
+ pml->write_through,
+ pml->cache_disable,
+ pml->accessed,
+ pml->reserved,
+ pml->vmm_info);
+}
+
+void PrintPDPE64(addr_t virtual_address, pdpe64_t * pdpe)
+{
+ PrintDebug("PDPE64 %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
+ (void *)virtual_address,
+ (void *)(addr_t) (BASE_TO_PAGE_ADDR(pdpe->pd_base_addr)),
+ pdpe->present,
+ pdpe->writable,
+ pdpe->user_page,
+ pdpe->write_through,
+ pdpe->cache_disable,
+ pdpe->accessed,
+ pdpe->reserved,
+ pdpe->large_page,
+ 0,//pdpe->global_page,
+ pdpe->vmm_info);
+}
+
+
+
+void PrintPDE64(addr_t virtual_address, pde64_t * pde)
+{
+ PrintDebug("PDE64 %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
+ (void *)virtual_address,
+ (void *)(addr_t) (BASE_TO_PAGE_ADDR(pde->pt_base_addr)),
+ pde->present,
+ pde->writable,
+ pde->user_page,
+ pde->write_through,
+ pde->cache_disable,
+ pde->accessed,
+ pde->reserved,
+ pde->large_page,
+ 0,//pde->global_page,
+ pde->vmm_info);
+}
+
+
+void PrintPTE64(addr_t virtual_address, pte64_t * pte)
+{
+ PrintDebug("PTE64 %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
+ (void *)virtual_address,
+ (void*)(addr_t)(BASE_TO_PAGE_ADDR(pte->page_base_addr)),
+ pte->present,
+ pte->writable,
+ pte->user_page,
+ pte->write_through,
+ pte->cache_disable,
+ pte->accessed,
+ pte->dirty,
+ pte->pte_attr,
+ pte->global_page,
+ pte->vmm_info);
+}
+
+
+
+
+
+void PrintPageTree_64(addr_t virtual_addr, pml4e64_t * pml) {
+ uint_t pml4_index = PML4E64_INDEX(virtual_addr);
+ uint_t pdpe_index = PDPE64_INDEX(virtual_addr);
+ uint_t pde_index = PDE64_INDEX(virtual_addr);
+ uint_t pte_index = PTE64_INDEX(virtual_addr);
+
+ PrintPML4e64(virtual_addr, &(pml[pml4_index]));
+ if (pml[pml4_index].present) {
+ pdpe64_t * pdpe = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(pml[pml4_index].pdp_base_addr));
+ PrintPDPE64(virtual_addr, &(pdpe[pdpe_index]));
+
+ if (pdpe[pdpe_index].present) {
+ pde64_t * pde = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr));
+ PrintPDE64(virtual_addr, &(pde[pde_index]));
+
+ if (pde[pde_index].present) {
+ pte64_t * pte = (pte64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr));
+ PrintPTE64(virtual_addr, &(pte[pte_index]));
+ }
+
+ }
+
+ }
+
+}
+
+
+
+
+void PrintPageTree(v3_vm_cpu_mode_t cpu_mode, addr_t virtual_addr, addr_t cr3) {
+ switch (cpu_mode) {
+ case LONG:
+ case LONG_32_COMPAT:
+ case LONG_16_COMPAT:
+ PrintPageTree_64(virtual_addr, CR3_TO_PML4E64_VA(cr3));
+ break;
+ default:
+ PrintError("Unsupported CPU MODE %d\n", cpu_mode);
+ break;
+ }
+}