From: Chunxiao Diao, Daniel Zuo, Yuanbo Fan Date: Mon, 26 May 2014 20:58:49 +0000 (-0500) Subject: Convert shadow paging to use 32 PAE (Direct Paging) X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?p=palacios.git;a=commitdiff_plain;h=774bac9fbb03ef8bf7c2ca2c79a8b87c9bc4c526 Convert shadow paging to use 32 PAE (Direct Paging) This changes shadow paging to use, at minimum, 32PAE shadow page tables. This makes it possible to place a guest using shadow paging anywhere in host physical memory instead of just the first 4 GB This patch converts the direct pager to use 32PAE. The direct pager is active when the guest has no installed page tables. All patches of this name need to be applied for this to work. --- diff --git a/palacios/src/palacios/vmm_direct_paging.c b/palacios/src/palacios/vmm_direct_paging.c index 5f5be4c..6bf987e 100644 --- a/palacios/src/palacios/vmm_direct_paging.c +++ b/palacios/src/palacios/vmm_direct_paging.c @@ -23,6 +23,7 @@ #include #include #include +#include #ifndef V3_CONFIG_DEBUG_NESTED_PAGING @@ -35,9 +36,9 @@ static addr_t create_generic_pt_page(struct guest_info *core) { void * page = 0; void *temp; - temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, - core->shdw_pg_mode==SHADOW_PAGING ? V3_ALLOC_PAGES_CONSTRAINT_4GB : 0); - if (!temp) { + temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, 0); // no constraints + + if (!temp) { PrintError(VM_NONE, VCORE_NONE,"Cannot allocate page\n"); return 0; } @@ -66,8 +67,8 @@ int v3_free_passthrough_pts(struct guest_info * core) { switch(mode) { case REAL: case PROTECTED: - delete_page_tables_32((pde32_t *)V3_VAddr((void *)(core->direct_map_pt))); - break; + // Intentional fallthrough here + // There are *only* PAE tables case PROTECTED_PAE: case LONG: case LONG_32_COMPAT: @@ -100,9 +101,13 @@ int v3_activate_passthrough_pt(struct guest_info * info) { // For now... But we need to change this.... // As soon as shadow paging becomes active the passthrough tables are hosed // So this will cause chaos if it is called at that time - - info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt); - //PrintError(info->vm_info, info, "Activate Passthrough Page tables not implemented\n"); + struct cr3_32_PAE * shadow_cr3 = (struct cr3_32_PAE *) &(info->ctrl_regs.cr3); + struct cr4_32 * shadow_cr4 = (struct cr4_32 *) &(info->ctrl_regs.cr4); + addr_t shadow_pt_addr = *(addr_t*)&(info->direct_map_pt); + // Passthrough PTs will only be PAE page tables. + shadow_cr3->pdpt_base_addr = shadow_pt_addr >> 5; + shadow_cr4->pae = 1; + PrintDebug(info->vm_info, info, "Activated Passthrough Page tables\n"); return 0; } @@ -113,8 +118,8 @@ int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, switch(mode) { case REAL: case PROTECTED: - return handle_passthrough_pagefault_32(info, fault_addr, error_code); - + // Note intentional fallthrough here + // There are only PAE page tables now case PROTECTED_PAE: case LONG: case LONG_32_COMPAT: @@ -161,8 +166,8 @@ int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr) { switch(mode) { case REAL: case PROTECTED: - return invalidate_addr_32(info, inv_addr); - + // Intentional fallthrough - there + // are only PAE page tables now case PROTECTED_PAE: case LONG: case LONG_32_COMPAT: @@ -184,8 +189,8 @@ int v3_invalidate_passthrough_addr_range(struct guest_info * info, switch(mode) { case REAL: case PROTECTED: - return invalidate_addr_32_range(info, inv_addr_start, inv_addr_end); - + // Intentional fallthrough + // There are only PAE PTs now case PROTECTED_PAE: case LONG: case LONG_32_COMPAT: diff --git a/palacios/src/palacios/vmm_direct_paging_32.h b/palacios/src/palacios/vmm_direct_paging_32.h index 9d3a5c9..7b49d58 100644 --- a/palacios/src/palacios/vmm_direct_paging_32.h +++ b/palacios/src/palacios/vmm_direct_paging_32.h @@ -26,6 +26,7 @@ #include #include #include +#include static inline int handle_passthrough_pagefault_32(struct guest_info * info, diff --git a/palacios/src/palacios/vmm_direct_paging_32pae.h b/palacios/src/palacios/vmm_direct_paging_32pae.h index 9d07d39..7414a02 100644 --- a/palacios/src/palacios/vmm_direct_paging_32pae.h +++ b/palacios/src/palacios/vmm_direct_paging_32pae.h @@ -39,6 +39,7 @@ static inline int handle_passthrough_pagefault_32pae(struct guest_info * info, int pdpe_index = PDPE32PAE_INDEX(fault_addr); int pde_index = PDE32PAE_INDEX(fault_addr); int pte_index = PTE32PAE_INDEX(fault_addr); + struct v3_mem_region * region = v3_get_mem_region(info->vm_info, info->vcpu_id, fault_addr); @@ -48,29 +49,32 @@ static inline int handle_passthrough_pagefault_32pae(struct guest_info * info, return -1; } + PrintDebug(info->vm_info, info, "Direct Paging 32PAE page fault handler=%p\n", (void *)fault_addr); + // Lookup the correct PDPE address based on the PAGING MODE if (info->shdw_pg_mode == SHADOW_PAGING) { pdpe = CR3_TO_PDPE32PAE_VA(info->ctrl_regs.cr3); } else { pdpe = CR3_TO_PDPE32PAE_VA(info->direct_map_pt); } - + + PrintDebug(info->vm_info, info, "Top level pdpe error pdp address=%p\n", (void *)pdpe); // Fix up the PDPE entry if (pdpe[pdpe_index].present == 0) { pde = (pde32pae_t *)create_generic_pt_page(info); - + PrintDebug(info->vm_info, info, "Creating a new pd page=%p\n", (void *)pde); pdpe[pdpe_index].present = 1; // Set default PDPE Flags... pdpe[pdpe_index].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde)); } else { pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr)); } - + PrintDebug(info->vm_info, info, "Handling pde error pd base address =%p\n", (void *)pde); // Fix up the PDE entry if (pde[pde_index].present == 0) { pte = (pte32pae_t *)create_generic_pt_page(info); - + PrintDebug(info->vm_info, info, "Creating a new pt page=%p\n", (void *)pte); pde[pde_index].present = 1; pde[pde_index].writable = 1; pde[pde_index].user_page = 1; @@ -80,7 +84,7 @@ static inline int handle_passthrough_pagefault_32pae(struct guest_info * info, pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr)); } - + PrintDebug(info->vm_info, info, "Handling pte error pt base address=%p\n", (void *)pte); // Fix up the PTE entry if (pte[pte_index].present == 0) { pte[pte_index].user_page = 1; @@ -102,12 +106,16 @@ static inline int handle_passthrough_pagefault_32pae(struct guest_info * info, } pte[pte_index].page_base_addr = PAGE_BASE_ADDR(host_addr); + PrintDebug(info->vm_info, info, "PTE mapped to =%p\n", (void *)host_addr); + PrintDebug(info->vm_info, info, "PTE is =%llx\n", *(uint64_t *)&(pte[pte_index])); } else { return region->unhandled(info, fault_addr, fault_addr, region, error_code); } } else { return region->unhandled(info, fault_addr, fault_addr, region, error_code); } + + PrintDebug(info->vm_info, info, "Handler ends with fault address=%p\n", (void *)fault_addr); return 0; }