X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fmmu%2Fvmm_shdw_pg_tlb_32.h;h=b3f4e432b2ce71e632da422bc706f824788dc457;hb=fdb16aa5e99a19a91264ae84beaf77d3036548d5;hp=83bfb10b6644d4dfc9e0c9c0791e48387f07b74f;hpb=ada3ad58ddc6747ea1062d6e9509a978389924c9;p=palacios.git diff --git a/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_32.h b/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_32.h index 83bfb10..b3f4e43 100644 --- a/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_32.h +++ b/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_32.h @@ -133,25 +133,21 @@ static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fa if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) { - if (info->use_large_pages && guest_pde->large_page) { + if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) { // Check underlying physical memory map to see if a large page is viable - addr_t guest_pa = BASE_TO_PAGE_ADDR_4MB(((pde32_4MB_t *)guest_pde)->page_base_addr); - addr_t host_pa; - if (v3_get_max_page_size(info, guest_pa, PAGE_SIZE_4MB) < PAGE_SIZE_4MB) { - PrintDebug("Underlying physical memory map doesn't allow use of a large page.\n"); - // Fallthrough to small pages - } else if ((v3_gpa_to_hpa(info, guest_pa, &host_pa) != 0) - || (v3_compute_page_alignment(host_pa) < PAGE_SIZE_4MB)) { - PrintDebug("Host memory alignment doesn't allow use of a large page.\n"); - // Fallthrough to small pages - } else if (handle_4MB_shadow_pagefault_pde_32(info, fault_addr, error_code, shadow_pde_access, - (pde32_4MB_t *)shadow_pde, (pde32_4MB_t *)guest_pde) == 0) { + addr_t guest_pa = BASE_TO_PAGE_ADDR_4MB(((pde32_4MB_t *)guest_pde)->page_base_addr); + uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED); + + if (page_size == PAGE_SIZE_4MB) { + PrintDebug("using large page for fault_addr %p (gpa=%p)\n", (void *)fault_addr, (void *)guest_pa); + if (handle_4MB_shadow_pagefault_pde_32(info, fault_addr, error_code, shadow_pde_access, + (pde32_4MB_t *)shadow_pde, (pde32_4MB_t *)guest_pde) == -1) { + PrintError("Error handling large pagefault with large page\n"); + return -1; + } + return 0; - } else { - PrintError("Error handling large pagefault with large page\n"); - return -1; - } - // Fallthrough to handle the region with small pages + } } struct shadow_page_data * shdw_page = create_new_shadow_pt(info); @@ -176,7 +172,6 @@ static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fa } } - // VMM Specific options shadow_pde->write_through = guest_pde->write_through; shadow_pde->cache_disable = guest_pde->cache_disable; @@ -185,14 +180,12 @@ static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fa guest_pde->accessed = 1; - shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa); } else { shadow_pt = (pte32_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr)); } - - + if (guest_pde->large_page == 0) { if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) { // Machine check the guest @@ -486,7 +479,8 @@ static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info, return -1; } - PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr)); + PrintDebug("shadow PA = %p\n", (void *)shadow_pa); + large_guest_pde->vmm_info = V3_LARGE_PG; /* For invalidations */ large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_4MB(shadow_pa); @@ -494,6 +488,8 @@ static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info, large_shadow_pde->present = 1; large_shadow_pde->user_page = 1; + PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR_4MB(large_shadow_pde->page_base_addr)); + if (shdw_reg->flags.write == 0) { large_shadow_pde->writable = 0; } else {