From: Patrick Bridges Date: Wed, 11 Aug 2010 22:44:19 +0000 (-0600) Subject: Added robust physical alignment check X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=commitdiff_plain;h=6b3606622300e3f0d45f7b8388e4c625a644aa4f;p=palacios.git Added robust physical alignment check --- diff --git a/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_64.h b/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_64.h index aff8034..8b30540 100644 --- a/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_64.h +++ b/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_64.h @@ -271,6 +271,43 @@ static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t faul return 0; } +// For an address on a page of size page_size, compute the actual alignment +// of the physical page it maps to +int compute_physical_alignment(addr_t va, addr_t pa, uint32_t page_size) +{ + addr_t va_offset, pa_base; + switch (page_size) { + case PAGE_SIZE_1GB: + va_offset = PAGE_OFFSET_1GB(va); + break; + case PAGE_SIZE_4MB: + va_offset = PAGE_OFFSET_4MB(va); + break; + case PAGE_SIZE_2MB: + va_offset = PAGE_OFFSET_2MB(va); + break; + case PAGE_SIZE_4KB: + return 1; + default: + PrintError("Invalid page size in %s.\n", __FUNCTION__); + return 0; + } + + pa_base = pa - va_offset; + + if (PAGE_OFFSET_1GB(pa_base) == 0) { + return PAGE_SIZE_1GB; + } else if (PAGE_OFFSET_4MB(pa_base) == 0) { + return PAGE_SIZE_4MB; + } else if (PAGE_OFFSET_2MB(pa_base) == 0) { + return PAGE_SIZE_2MB; + } else if (PAGE_OFFSET_4KB(pa_base) == 0) { + return PAGE_SIZE_4KB; + } else { + PrintError("Incorrection alignment setup or calculation in %s.\n", __FUNCTION__); + return 0; + } +} static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pde64_t * shadow_pd, pde64_t * guest_pd) { @@ -339,12 +376,12 @@ static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) { // Check if we can use large pages and the guest memory is properly aligned // to potentially use a large page - if (info->use_large_pages && guest_pde->large_page - && (info->vm_info->mem_align >= PAGE_SIZE_2MB)) { + if (info->use_large_pages && guest_pde->large_page) { // Check underlying physical memory map to see if a large page is viable addr_t guest_pa = BASE_TO_PAGE_ADDR_2MB(((pde64_2MB_t *)guest_pde)->page_base_addr); - uint32_t max_size = v3_get_max_page_size(info, guest_pa, PAGE_SIZE_2MB); - if (max_size >= PAGE_SIZE_2MB) { + if ((compute_physical_alignment(fault_addr, guest_pa, PAGE_SIZE_2MB) >= PAGE_SIZE_2MB) + && (v3_get_max_page_size(info, guest_pa, PAGE_SIZE_2MB) >= PAGE_SIZE_2MB)) { + // should be able to use a large page. if (handle_2MB_shadow_pagefault_pde_64(info, fault_addr, error_code, shadow_pde_access, (pde64_2MB_t *)shadow_pde, (pde64_2MB_t *)guest_pde) == 0) { return 0; @@ -353,7 +390,7 @@ static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault return -1; } } else { - PrintDebug("Underlying physical memory map doesn't allow use of a large page.\n"); + PrintDebug("Alignment or underlying physical memory map doesn't allow use of a large page.\n"); } // Fallthrough to handle the region with small pages }