X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fmmu%2Fvmm_shdw_pg_tlb_64.h;h=4f31b7fed947df72f042937b43e7256d6420cbdc;hb=1f9fa9cd9662a600f49a3b12fcc02fd2ffca93c8;hp=aff8034c8efafafde638b27a89544eeb5bb2cfb8;hpb=8684ef59c3ede8fe0c33b2f04dbe30a287e7b353;p=palacios.git diff --git a/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_64.h b/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_64.h index aff8034..4f31b7f 100644 --- a/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_64.h +++ b/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_64.h @@ -271,7 +271,6 @@ static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t faul return 0; } - static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pde64_t * shadow_pd, pde64_t * guest_pd) { pt_access_status_t guest_pde_access; @@ -339,21 +338,19 @@ static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) { // Check if we can use large pages and the guest memory is properly aligned // to potentially use a large page - if (info->use_large_pages && guest_pde->large_page - && (info->vm_info->mem_align >= PAGE_SIZE_2MB)) { - // Check underlying physical memory map to see if a large page is viable + + if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) { addr_t guest_pa = BASE_TO_PAGE_ADDR_2MB(((pde64_2MB_t *)guest_pde)->page_base_addr); - uint32_t max_size = v3_get_max_page_size(info, guest_pa, PAGE_SIZE_2MB); - if (max_size >= PAGE_SIZE_2MB) { - if (handle_2MB_shadow_pagefault_pde_64(info, fault_addr, error_code, shadow_pde_access, - (pde64_2MB_t *)shadow_pde, (pde64_2MB_t *)guest_pde) == 0) { - return 0; - } else { - PrintError("Error handling large pagefault with large page\n"); - return -1; + uint32_t page_size = v3_get_max_page_size(info, guest_pa, LONG); + + if (page_size == PAGE_SIZE_2MB) { + if (handle_2MB_shadow_pagefault_pde_64(info, fault_addr, error_code, shadow_pde_access, + (pde64_2MB_t *)shadow_pde, (pde64_2MB_t *)guest_pde) == -1) { + PrintError("Error handling large pagefault with large page\n"); + return -1; } - } else { - PrintDebug("Underlying physical memory map doesn't allow use of a large page.\n"); + + return 0; } // Fallthrough to handle the region with small pages } @@ -366,7 +363,6 @@ static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault shadow_pde->present = 1; shadow_pde->user_page = guest_pde->user_page; - if (guest_pde->large_page == 0) { shadow_pde->writable = guest_pde->writable; } else { @@ -430,7 +426,7 @@ static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault PrintDebug("Handling PTE fault\n"); - struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa); + struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_pa); @@ -562,7 +558,7 @@ static int handle_2MB_shadow_pagefault_pde_64(struct guest_info * info, PrintDebug("Handling 2MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code); PrintDebug("LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde); - struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa); + struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa); if (shdw_reg == NULL) { // Inject a machine check in the guest @@ -644,7 +640,7 @@ static int handle_2MB_shadow_pagefault_pte_64(struct guest_info * info, PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code); PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde); - struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa); + struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa); if (shdw_reg == NULL) {