X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_direct_paging_64.h;h=6afb9fb88f7ecad95383935f314f8fb6e11c2867;hb=a48fed88738b625fea8a606f5b327d32db91f009;hp=9a6eeed9da8c1d8a4e5ca90f5b8f9fe5e7d97ced;hpb=4f65d7ae7fd62f2a52735c7b69fe1da65b9fed9f;p=palacios.git diff --git a/palacios/src/palacios/vmm_direct_paging_64.h b/palacios/src/palacios/vmm_direct_paging_64.h index 9a6eeed..6afb9fb 100644 --- a/palacios/src/palacios/vmm_direct_paging_64.h +++ b/palacios/src/palacios/vmm_direct_paging_64.h @@ -27,126 +27,283 @@ #include #include -#ifndef DEBUG_NESTED_PAGING -#undef PrintDebug -#define PrintDebug(fmt, args...) -#endif +/* this always builds 4 level page tables, but large pages are allowed */ +// Reference: AMD Software Developer Manual Vol.2 Ch.5 "Page Translation and Protection" -static inline int handle_passthrough_pagefault_64(struct guest_info * info, - addr_t fault_addr, - pf_error_t error_code) { - pml4e64_t * pml = NULL; - pdpe64_t * pdpe = NULL; - pde64_t * pde = NULL; - pte64_t * pte = NULL; - addr_t host_addr = 0; +static inline int handle_passthrough_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code, + addr_t *actual_start, addr_t *actual_end) { + pml4e64_t * pml = NULL; + pdpe64_t * pdpe = NULL; + pde64_t * pde = NULL; + pde64_2MB_t * pde2mb = NULL; + pte64_t * pte = NULL; + addr_t host_addr = 0; - int pml_index = PML4E64_INDEX(fault_addr); + int pml_index = PML4E64_INDEX(fault_addr); int pdpe_index = PDPE64_INDEX(fault_addr); - int pde_index = PDE64_INDEX(fault_addr); - int pte_index = PTE64_INDEX(fault_addr); + int pde_index = PDE64_INDEX(fault_addr); + int pte_index = PTE64_INDEX(fault_addr); + struct v3_mem_region * region = v3_get_mem_region(core->vm_info, core->vcpu_id, fault_addr); + int page_size = PAGE_SIZE_4KB; - - - struct v3_shadow_region * region = v3_get_shadow_region(info, fault_addr); - - if ((region == NULL) || - (region->host_type == SHDW_REGION_INVALID)) { - PrintError("Invalid region in passthrough page fault 64, addr=%p\n", - (void *)fault_addr); + if (region == NULL) { + PrintError(core->vm_info, core, "%s: invalid region, addr=%p\n", __FUNCTION__, (void *)fault_addr); return -1; } - host_addr = v3_get_shadow_addr(region, fault_addr); - // + /* Check if: + * 1. the guest is configured to use large pages and + * 2. the memory regions can be referenced by a large page + */ + if ((core->use_large_pages == 1) || (core->use_giant_pages == 1)) { + page_size = v3_get_max_page_size(core, fault_addr, LONG); + } + PrintDebug(core->vm_info, core, "Using page size of %dKB\n", page_size / 1024); + + // Lookup the correct PML address based on the PAGING MODE - if (info->shdw_pg_mode == SHADOW_PAGING) { - pml = CR3_TO_PML4E64_VA(info->ctrl_regs.cr3); + if (core->shdw_pg_mode == SHADOW_PAGING) { + pml = CR3_TO_PML4E64_VA(core->ctrl_regs.cr3); } else { - pml = CR3_TO_PML4E64_VA(info->direct_map_pt); + pml = CR3_TO_PML4E64_VA(core->direct_map_pt); } //Fix up the PML entry if (pml[pml_index].present == 0) { - pdpe = (pdpe64_t *)create_generic_pt_page(); + pdpe = (pdpe64_t *)create_generic_pt_page(core); // Set default PML Flags... pml[pml_index].present = 1; pml[pml_index].writable = 1; pml[pml_index].user_page = 1; - pml[pml_index].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe)); + pml[pml_index].pdp_base_addr = PAGE_BASE_ADDR_4KB((addr_t)V3_PAddr(pdpe)); } else { - pdpe = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pml[pml_index].pdp_base_addr)); + pdpe = V3_VAddr((void*)BASE_TO_PAGE_ADDR_4KB(pml[pml_index].pdp_base_addr)); } // Fix up the PDPE entry if (pdpe[pdpe_index].present == 0) { - pde = (pde64_t *)create_generic_pt_page(); + pde = (pde64_t *)create_generic_pt_page(core); // Set default PDPE Flags... pdpe[pdpe_index].present = 1; pdpe[pdpe_index].writable = 1; pdpe[pdpe_index].user_page = 1; - pdpe[pdpe_index].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde)); + pdpe[pdpe_index].pd_base_addr = PAGE_BASE_ADDR_4KB((addr_t)V3_PAddr(pde)); } else { - pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr)); + pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR_4KB(pdpe[pdpe_index].pd_base_addr)); } + // Fix up the 2MiB PDE and exit here + if (page_size == PAGE_SIZE_2MB) { + pde2mb = (pde64_2MB_t *)pde; // all but these two lines are the same for PTE + pde2mb[pde_index].large_page = 1; + + *actual_start = BASE_TO_PAGE_ADDR_2MB(PAGE_BASE_ADDR_2MB(fault_addr)); + *actual_end = BASE_TO_PAGE_ADDR_2MB(PAGE_BASE_ADDR_2MB(fault_addr)+1)-1; + + if (pde2mb[pde_index].present == 0) { + pde2mb[pde_index].user_page = 1; + + if ( (region->flags.alloced == 1) && + (region->flags.read == 1)) { + // Full access + pde2mb[pde_index].present = 1; + + if (region->flags.write == 1) { + pde2mb[pde_index].writable = 1; + } else { + pde2mb[pde_index].writable = 0; + } + + if (v3_gpa_to_hpa(core, fault_addr, &host_addr) == -1) { + PrintError(core->vm_info, core, "Error Could not translate fault addr (%p)\n", (void *)fault_addr); + return -1; + } + + pde2mb[pde_index].page_base_addr = PAGE_BASE_ADDR_2MB(host_addr); + } else { + return region->unhandled(core, fault_addr, fault_addr, region, error_code); + } + } else { + // We fix all permissions on the first pass, + // so we only get here if its an unhandled exception + + return region->unhandled(core, fault_addr, fault_addr, region, error_code); + } + + // All done + return 0; + } + + // Continue with the 4KiB page heirarchy + + *actual_start = BASE_TO_PAGE_ADDR_4KB(PAGE_BASE_ADDR_4KB(fault_addr)); + *actual_end = BASE_TO_PAGE_ADDR_4KB(PAGE_BASE_ADDR_4KB(fault_addr)+1)-1; // Fix up the PDE entry if (pde[pde_index].present == 0) { - pte = (pte64_t *)create_generic_pt_page(); + pte = (pte64_t *)create_generic_pt_page(core); pde[pde_index].present = 1; pde[pde_index].writable = 1; pde[pde_index].user_page = 1; - pde[pde_index].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte)); + pde[pde_index].pt_base_addr = PAGE_BASE_ADDR_4KB((addr_t)V3_PAddr(pte)); } else { - pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr)); + pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR_4KB(pde[pde_index].pt_base_addr)); } - // Fix up the PTE entry if (pte[pte_index].present == 0) { pte[pte_index].user_page = 1; - if (region->host_type == SHDW_REGION_ALLOCATED) { + if ((region->flags.alloced == 1) && + (region->flags.read == 1)) { // Full access pte[pte_index].present = 1; - pte[pte_index].writable = 1; - pte[pte_index].page_base_addr = PAGE_BASE_ADDR(host_addr); - - } else if (region->host_type == SHDW_REGION_WRITE_HOOK) { - // Only trap writes - pte[pte_index].present = 1; - pte[pte_index].writable = 0; + if (region->flags.write == 1) { + pte[pte_index].writable = 1; + } else { + pte[pte_index].writable = 0; + } - pte[pte_index].page_base_addr = PAGE_BASE_ADDR(host_addr); - - } else if (region->host_type == SHDW_REGION_FULL_HOOK) { - // trap all accesses - return v3_handle_mem_full_hook(info, fault_addr, fault_addr, region, error_code); + if (v3_gpa_to_hpa(core, fault_addr, &host_addr) == -1) { + PrintError(core->vm_info, core, "Error Could not translate fault addr (%p)\n", (void *)fault_addr); + return -1; + } + pte[pte_index].page_base_addr = PAGE_BASE_ADDR_4KB(host_addr); } else { - PrintError("Unknown Region Type...\n"); - return -1; + return region->unhandled(core, fault_addr, fault_addr, region, error_code); } + } else { + // We fix all permissions on the first pass, + // so we only get here if its an unhandled exception + + return region->unhandled(core, fault_addr, fault_addr, region, error_code); } - - if ( (region->host_type == SHDW_REGION_WRITE_HOOK) && - (error_code.write == 1) ) { - return v3_handle_mem_wr_hook(info, fault_addr, fault_addr, region, error_code); + + return 0; +} + +static inline int invalidate_addr_64_internal(struct guest_info * core, addr_t inv_addr, + addr_t *actual_start, uint64_t *actual_size) { + pml4e64_t * pml = NULL; + pdpe64_t * pdpe = NULL; + pde64_t * pde = NULL; + pte64_t * pte = NULL; + + + // TODO: + // Call INVLPGA + + // clear the page table entry + int pml_index = PML4E64_INDEX(inv_addr); + int pdpe_index = PDPE64_INDEX(inv_addr); + int pde_index = PDE64_INDEX(inv_addr); + int pte_index = PTE64_INDEX(inv_addr); + + + // Lookup the correct PDE address based on the PAGING MODE + if (core->shdw_pg_mode == SHADOW_PAGING) { + pml = CR3_TO_PML4E64_VA(core->ctrl_regs.cr3); + } else { + pml = CR3_TO_PML4E64_VA(core->direct_map_pt); } + if (pml[pml_index].present == 0) { + *actual_start = BASE_TO_PAGE_ADDR_512GB(PAGE_BASE_ADDR_512GB(inv_addr)); + *actual_size = PAGE_SIZE_512GB; + return 0; + } + + pdpe = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pml[pml_index].pdp_base_addr)); + + if (pdpe[pdpe_index].present == 0) { + *actual_start = BASE_TO_PAGE_ADDR_1GB(PAGE_BASE_ADDR_1GB(inv_addr)); + *actual_size = PAGE_SIZE_1GB; + return 0; + } else if (pdpe[pdpe_index].large_page == 1) { // 1GiB + pdpe[pdpe_index].present = 0; + pdpe[pdpe_index].writable = 0; + pdpe[pdpe_index].user_page = 0; + *actual_start = BASE_TO_PAGE_ADDR_1GB(PAGE_BASE_ADDR_1GB(inv_addr)); + *actual_size = PAGE_SIZE_1GB; + return 0; + } + + pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr)); + + if (pde[pde_index].present == 0) { + *actual_start = BASE_TO_PAGE_ADDR_2MB(PAGE_BASE_ADDR_2MB(inv_addr)); + *actual_size = PAGE_SIZE_2MB; + return 0; + } else if (pde[pde_index].large_page == 1) { // 2MiB + pde[pde_index].present = 0; + pde[pde_index].writable = 0; + pde[pde_index].user_page = 0; + *actual_start = BASE_TO_PAGE_ADDR_2MB(PAGE_BASE_ADDR_2MB(inv_addr)); + *actual_size = PAGE_SIZE_2MB; + return 0; + } + + pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr)); + + pte[pte_index].present = 0; // 4KiB + pte[pte_index].writable = 0; + pte[pte_index].user_page = 0; + + *actual_start = BASE_TO_PAGE_ADDR_4KB(PAGE_BASE_ADDR_4KB(inv_addr)); + *actual_size = PAGE_SIZE_4KB; + return 0; } +static inline int invalidate_addr_64(struct guest_info * core, addr_t inv_addr, + addr_t *actual_start, addr_t *actual_end) +{ + uint64_t len; + int rc; + + rc = invalidate_addr_64_internal(core,inv_addr,actual_start,&len); + + *actual_end = *actual_start + len - 1; + + return rc; +} + +static inline int invalidate_addr_64_range(struct guest_info * core, addr_t inv_addr_start, addr_t inv_addr_end, + addr_t *actual_start, addr_t *actual_end) +{ + addr_t next; + addr_t start; + uint64_t len; + int rc; + + for (next=inv_addr_start; next<=inv_addr_end; ) { + rc = invalidate_addr_64_internal(core,next,&start, &len); + if (next==inv_addr_start) { + // first iteration, capture where we start invalidating + *actual_start = start; + } + if (rc) { + return rc; + } + next = start + len; + *actual_end = next; + } + // last iteration, actual_end is off by one + (*actual_end)--; + return 0; +} + + #endif