X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fmmu%2Fvmm_shdw_pg_tlb_32.h;h=b3f4e432b2ce71e632da422bc706f824788dc457;hb=a0d3be5212e7a5053ba213ce7bd26c7124cf01e3;hp=30acc7a6b4a7ab5b6b63e194c8d6636dc0c9c105;hpb=c26a28a0070ca2aeedf58294d274be78d0991b05;p=palacios.git diff --git a/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_32.h b/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_32.h index 30acc7a..b3f4e43 100644 --- a/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_32.h +++ b/palacios/src/palacios/mmu/vmm_shdw_pg_tlb_32.h @@ -46,8 +46,11 @@ static inline int activate_shadow_pt_32(struct guest_info * core) { * * * * */ -static int handle_4MB_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, - pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde); +static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, + pt_access_status_t shadow_pde_access, pde32_4MB_t * large_shadow_pde, + pde32_4MB_t * large_guest_pde); +static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, + pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde); static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pte32_t * shadow_pt, pte32_t * guest_pt); @@ -65,7 +68,7 @@ static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fa PrintDebug("Shadow page fault handler: %p\n", (void*) fault_addr ); PrintDebug("Handling PDE32 Fault\n"); - if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) { + if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pd) == -1) { PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3); return -1; } @@ -129,6 +132,24 @@ static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fa // Get the next shadow page level, allocate if not present if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) { + + if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) { + // Check underlying physical memory map to see if a large page is viable + addr_t guest_pa = BASE_TO_PAGE_ADDR_4MB(((pde32_4MB_t *)guest_pde)->page_base_addr); + uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED); + + if (page_size == PAGE_SIZE_4MB) { + PrintDebug("using large page for fault_addr %p (gpa=%p)\n", (void *)fault_addr, (void *)guest_pa); + if (handle_4MB_shadow_pagefault_pde_32(info, fault_addr, error_code, shadow_pde_access, + (pde32_4MB_t *)shadow_pde, (pde32_4MB_t *)guest_pde) == -1) { + PrintError("Error handling large pagefault with large page\n"); + return -1; + } + + return 0; + } + } + struct shadow_page_data * shdw_page = create_new_shadow_pt(info); shadow_pt = (pte32_t *)V3_VAddr((void *)shdw_page->page_pa); @@ -151,7 +172,6 @@ static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fa } } - // VMM Specific options shadow_pde->write_through = guest_pde->write_through; shadow_pde->cache_disable = guest_pde->cache_disable; @@ -160,16 +180,14 @@ static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fa guest_pde->accessed = 1; - shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa); } else { shadow_pt = (pte32_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr)); } - - + if (guest_pde->large_page == 0) { - if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) { + if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) { // Machine check the guest PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr)); v3_raise_exception(info, MC_EXCEPTION); @@ -181,7 +199,7 @@ static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fa return -1; } } else { - if (handle_4MB_shadow_pagefault_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) { + if (handle_4MB_shadow_pagefault_pte_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) { PrintError("Error handling large pagefault\n"); return -1; } @@ -200,7 +218,7 @@ static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]); addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr); - struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_pa); + struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa); if (shdw_reg == NULL) { // Inject a machine check in the guest @@ -247,8 +265,13 @@ static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault PrintDebug("guest_pa =%p\n", (void *)guest_pa); if ((shdw_reg->flags.alloced == 1) && (shdw_reg->flags.read == 1)) { - addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_pa); - + addr_t shadow_pa = 0; + + if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) { + PrintError("could not translate page fault address (%p)\n", (void *)guest_pa); + return -1; + } + shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa); PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr)); @@ -278,10 +301,10 @@ static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault shadow_pte->writable = 0; } - } else if (shdw_reg->flags.hook == 1) { - // Page fault handled by hook functions - - if (v3_handle_mem_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) { + } else { + // Page fault on unhandled memory region + + if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) { PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr); return -1; } @@ -289,14 +312,14 @@ static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) { guest_pte->dirty = 1; - if (shdw_reg->flags.hook == 1) { - if (v3_handle_mem_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) { + if (shdw_reg->flags.write == 1) { + PrintDebug("Shadow PTE Write Error\n"); + shadow_pte->writable = guest_pte->writable; + } else { + if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) { PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr); return -1; } - } else if (shdw_reg->flags.write == 1) { - PrintDebug("Shadow PTE Write Error\n"); - shadow_pte->writable = guest_pte->writable; } @@ -317,9 +340,8 @@ static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault return 0; } - - -static int handle_4MB_shadow_pagefault_32(struct guest_info * info, +// Handle a 4MB page fault with small pages in the PTE +static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde) { @@ -331,7 +353,7 @@ static int handle_4MB_shadow_pagefault_32(struct guest_info * info, PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code); PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde); - struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_fault_pa); + struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa); if (shdw_reg == NULL) { @@ -354,7 +376,13 @@ static int handle_4MB_shadow_pagefault_32(struct guest_info * info, if ((shdw_reg->flags.alloced == 1) && (shdw_reg->flags.read == 1)) { - addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_fault_pa); + addr_t shadow_pa = 0; + + + if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) { + PrintError("could not translate page fault address (%p)\n", (void *)guest_fault_pa); + return -1; + } shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa); @@ -382,16 +410,16 @@ static int handle_4MB_shadow_pagefault_32(struct guest_info * info, shadow_pte->writable = 1; } - } else if (shdw_reg->flags.hook == 1) { - if (v3_handle_mem_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) { + } else { + if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) { PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr); return -1; } } } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) { - if (shdw_reg->flags.hook == 1) { - if (v3_handle_mem_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) { + if (shdw_reg->flags.write == 0) { + if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) { PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr); return -1; } @@ -403,20 +431,101 @@ static int handle_4MB_shadow_pagefault_32(struct guest_info * info, return -1; } - PrintDebug("Returning from large page fault handler\n"); + PrintDebug("Returning from large page->small page fault handler\n"); return 0; } +// Handle a 4MB page fault with a 4MB page in the PDE +static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info, + addr_t fault_addr, pf_error_t error_code, + pt_access_status_t shadow_pde_access, + pde32_4MB_t * large_shadow_pde, pde32_4MB_t * large_guest_pde) +{ + addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr); + PrintDebug("Handling 4MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code); + PrintDebug("LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde); + struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa); + + if (shdw_reg == NULL) { + // Inject a machine check in the guest + PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa); + v3_raise_exception(info, MC_EXCEPTION); + return -1; + } + if (shadow_pde_access == PT_ACCESS_OK) { + // Inconsistent state... + // Guest Re-Entry will flush tables and everything should now workd + PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n"); + return 0; + } + + + if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) { + // Get the guest physical address of the fault + + if ((shdw_reg->flags.alloced == 1) && + (shdw_reg->flags.read == 1)) { + addr_t shadow_pa = 0; + + + if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) { + PrintError("could not translate page fault address (%p)\n", (void *)guest_fault_pa); + return -1; + } + + PrintDebug("shadow PA = %p\n", (void *)shadow_pa); + + + large_guest_pde->vmm_info = V3_LARGE_PG; /* For invalidations */ + large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_4MB(shadow_pa); + large_shadow_pde->large_page = 1; + large_shadow_pde->present = 1; + large_shadow_pde->user_page = 1; + + PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR_4MB(large_shadow_pde->page_base_addr)); + + if (shdw_reg->flags.write == 0) { + large_shadow_pde->writable = 0; + } else { + large_shadow_pde->writable = 1; + } + //set according to VMM policy + large_shadow_pde->write_through = large_guest_pde->write_through; + large_shadow_pde->cache_disable = large_guest_pde->cache_disable; + large_shadow_pde->global_page = large_guest_pde->global_page; + // + } else { + if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) { + PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr); + return -1; + } + } + } else if (shadow_pde_access == PT_ACCESS_WRITE_ERROR) { + if (shdw_reg->flags.write == 0) { + if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) { + PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr); + return -1; + } + } + } else { + PrintError("Error in large page fault handler...\n"); + PrintError("This case should have been handled at the top level handler\n"); + return -1; + } + + PrintDebug("Returning from large page->large page fault handler\n"); + return 0; +} /* If we start to optimize we should look up the guest pages in the cache... */ static inline int handle_shadow_invlpg_32(struct guest_info * info, addr_t vaddr) { @@ -427,7 +536,7 @@ static inline int handle_shadow_invlpg_32(struct guest_info * info, addr_t vaddr pde32_t * guest_pd = NULL; pde32_t * guest_pde; - if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) { + if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pd) == -1) { PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3); return -1; }