X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_direct_paging_64.h;h=217373ab037330bdc9080ede1d90da499d106b3a;hb=3a64514e2fbdb79da57b289fc94b336b78046ba2;hp=d248522c1a14b5af9cc5fe98f1f3ba327a4736f8;hpb=47c7637661a01c0c053b54acb418af21e9a39b31;p=palacios.git diff --git a/palacios/src/palacios/vmm_direct_paging_64.h b/palacios/src/palacios/vmm_direct_paging_64.h index d248522..217373a 100644 --- a/palacios/src/palacios/vmm_direct_paging_64.h +++ b/palacios/src/palacios/vmm_direct_paging_64.h @@ -29,13 +29,6 @@ // Reference: AMD Software Developer Manual Vol.2 Ch.5 "Page Translation and Protection" -static int get_page_size() { - - // Need to fix this.... - return PAGE_SIZE_4KB; -} - - static inline int handle_passthrough_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) { pml4e64_t * pml = NULL; pdpe64_t * pdpe = NULL; @@ -49,18 +42,24 @@ static inline int handle_passthrough_pagefault_64(struct guest_info * core, addr int pde_index = PDE64_INDEX(fault_addr); int pte_index = PTE64_INDEX(fault_addr); - struct v3_mem_region * region = v3_get_mem_region(core->vm_info, core->cpu_id, fault_addr); + struct v3_mem_region * region = v3_get_mem_region(core->vm_info, core->vcpu_id, fault_addr); int page_size = PAGE_SIZE_4KB; + if (region == NULL) { + PrintError("%s: invalid region, addr=%p\n", __FUNCTION__, (void *)fault_addr); + return -1; + } /* Check if: * 1. the guest is configured to use large pages and * 2. the memory regions can be referenced by a large page */ - if ((core->use_large_pages == 1) ) { - page_size = get_page_size(); + if ((core->use_large_pages == 1) || (core->use_giant_pages == 1)) { + page_size = v3_get_max_page_size(core, fault_addr, LONG); } + PrintDebug("Using page size of %dKB\n", page_size / 1024); + // Lookup the correct PML address based on the PAGING MODE if (core->shdw_pg_mode == SHADOW_PAGING) { @@ -97,7 +96,44 @@ static inline int handle_passthrough_pagefault_64(struct guest_info * core, addr pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR_4KB(pdpe[pdpe_index].pd_base_addr)); } - + // Fix up the 2MiB PDE and exit here + if (page_size == PAGE_SIZE_2MB) { + pde2mb = (pde64_2MB_t *)pde; // all but these two lines are the same for PTE + pde2mb[pde_index].large_page = 1; + + if (pde2mb[pde_index].present == 0) { + pde2mb[pde_index].user_page = 1; + + if ( (region->flags.alloced == 1) && + (region->flags.read == 1)) { + // Full access + pde2mb[pde_index].present = 1; + + if (region->flags.write == 1) { + pde2mb[pde_index].writable = 1; + } else { + pde2mb[pde_index].writable = 0; + } + + if (v3_gpa_to_hpa(core, fault_addr, &host_addr) == -1) { + PrintError("Error Could not translate fault addr (%p)\n", (void *)fault_addr); + return -1; + } + + pde2mb[pde_index].page_base_addr = PAGE_BASE_ADDR_2MB(host_addr); + } else { + return region->unhandled(core, fault_addr, fault_addr, region, error_code); + } + } else { + // We fix all permissions on the first pass, + // so we only get here if its an unhandled exception + + return region->unhandled(core, fault_addr, fault_addr, region, error_code); + } + + // All done + return 0; + } // Continue with the 4KiB page heirarchy @@ -182,6 +218,8 @@ static inline int invalidate_addr_64(struct guest_info * core, addr_t inv_addr) return 0; } else if (pdpe[pdpe_index].large_page == 1) { // 1GiB pdpe[pdpe_index].present = 0; + pdpe[pdpe_index].writable = 0; + pdpe[pdpe_index].user_page = 0; return 0; } @@ -191,12 +229,16 @@ static inline int invalidate_addr_64(struct guest_info * core, addr_t inv_addr) return 0; } else if (pde[pde_index].large_page == 1) { // 2MiB pde[pde_index].present = 0; + pde[pde_index].writable = 0; + pde[pde_index].user_page = 0; return 0; } pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr)); pte[pte_index].present = 0; // 4KiB + pte[pte_index].writable = 0; + pte[pte_index].user_page = 0; return 0; }