X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_direct_paging_64.h;h=fc94d33cb92f7ae36bdb93c9903c622a5cacb55a;hb=b93aeabee44c82139a9afd065dfcaca8ac6688ad;hp=d79693bbb48f924365500e71cff1fab2ad856ad9;hpb=1fe82881720f7f9f64f789871f763aca93b47a7e;p=palacios.releases.git diff --git a/palacios/src/palacios/vmm_direct_paging_64.h b/palacios/src/palacios/vmm_direct_paging_64.h index d79693b..fc94d33 100644 --- a/palacios/src/palacios/vmm_direct_paging_64.h +++ b/palacios/src/palacios/vmm_direct_paging_64.h @@ -28,9 +28,10 @@ #include + static inline int handle_passthrough_pagefault_64(struct guest_info * info, - addr_t fault_addr, - pf_error_t error_code) { + addr_t fault_addr, + pf_error_t error_code) { pml4e64_t * pml = NULL; pdpe64_t * pdpe = NULL; pde64_t * pde = NULL; @@ -43,16 +44,18 @@ static inline int handle_passthrough_pagefault_64(struct guest_info * info, int pte_index = PTE64_INDEX(fault_addr); - struct v3_shadow_region * region = v3_get_shadow_region(info, fault_addr); + + + struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, info->cpu_id, fault_addr); - if ((region == NULL) || - (region->host_type == SHDW_REGION_INVALID)) { + if (region == NULL) { PrintError("Invalid region in passthrough page fault 64, addr=%p\n", (void *)fault_addr); return -1; } - host_addr = v3_get_shadow_addr(region, fault_addr); + host_addr = v3_get_shadow_addr(region, info->cpu_id, fault_addr); + // // Lookup the correct PML address based on the PAGING MODE if (info->shdw_pg_mode == SHADOW_PAGING) { @@ -65,8 +68,11 @@ static inline int handle_passthrough_pagefault_64(struct guest_info * info, if (pml[pml_index].present == 0) { pdpe = (pdpe64_t *)create_generic_pt_page(); - pml[pml_index].present = 1; // Set default PML Flags... + pml[pml_index].present = 1; + pml[pml_index].writable = 1; + pml[pml_index].user_page = 1; + pml[pml_index].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe)); } else { pdpe = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pml[pml_index].pdp_base_addr)); @@ -76,8 +82,11 @@ static inline int handle_passthrough_pagefault_64(struct guest_info * info, if (pdpe[pdpe_index].present == 0) { pde = (pde64_t *)create_generic_pt_page(); - pdpe[pdpe_index].present = 1; // Set default PDPE Flags... + pdpe[pdpe_index].present = 1; + pdpe[pdpe_index].writable = 1; + pdpe[pdpe_index].user_page = 1; + pdpe[pdpe_index].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde)); } else { pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr)); @@ -102,37 +111,84 @@ static inline int handle_passthrough_pagefault_64(struct guest_info * info, if (pte[pte_index].present == 0) { pte[pte_index].user_page = 1; - if (region->host_type == SHDW_REGION_ALLOCATED) { + if ((region->flags.alloced == 1) && + (region->flags.read == 1)) { // Full access pte[pte_index].present = 1; - pte[pte_index].writable = 1; - pte[pte_index].page_base_addr = PAGE_BASE_ADDR(host_addr); - - } else if (region->host_type == SHDW_REGION_WRITE_HOOK) { - // Only trap writes - pte[pte_index].present = 1; - pte[pte_index].writable = 0; + if (region->flags.write == 1) { + pte[pte_index].writable = 1; + } else { + pte[pte_index].writable = 0; + } pte[pte_index].page_base_addr = PAGE_BASE_ADDR(host_addr); - - } else if (region->host_type == SHDW_REGION_FULL_HOOK) { - // trap all accesses - return v3_handle_mem_full_hook(info, fault_addr, fault_addr, region, error_code); - } else { - PrintError("Unknown Region Type...\n"); - return -1; + return region->unhandled(info, fault_addr, fault_addr, region, error_code); } + } else { + // We fix all permissions on the first pass, + // so we only get here if its an unhandled exception + + return region->unhandled(info, fault_addr, fault_addr, region, error_code); } - - if ( (region->host_type == SHDW_REGION_WRITE_HOOK) && - (error_code.write == 1) ) { - return v3_handle_mem_wr_hook(info, fault_addr, fault_addr, region, error_code); + + return 0; +} + +static inline int invalidate_addr_64(struct guest_info * info, addr_t inv_addr) { + pml4e64_t * pml = NULL; + pdpe64_t * pdpe = NULL; + pde64_t * pde = NULL; + pte64_t * pte = NULL; + + + // TODO: + // Call INVLPGA + + // clear the page table entry + int pml_index = PML4E64_INDEX(inv_addr); + int pdpe_index = PDPE64_INDEX(inv_addr); + int pde_index = PDE64_INDEX(inv_addr); + int pte_index = PTE64_INDEX(inv_addr); + + + // Lookup the correct PDE address based on the PAGING MODE + if (info->shdw_pg_mode == SHADOW_PAGING) { + pml = CR3_TO_PML4E64_VA(info->ctrl_regs.cr3); + } else { + pml = CR3_TO_PML4E64_VA(info->direct_map_pt); + } + + if (pml[pml_index].present == 0) { + return 0; } + pdpe = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pml[pml_index].pdp_base_addr)); + + if (pdpe[pdpe_index].present == 0) { + return 0; + } else if (pdpe[pdpe_index].large_page == 1) { + pdpe[pdpe_index].present = 0; + return 0; + } + + pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr)); + + if (pde[pde_index].present == 0) { + return 0; + } else if (pde[pde_index].large_page == 1) { + pde[pde_index].present = 0; + return 0; + } + + pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr)); + + pte[pte_index].present = 0; + return 0; } + #endif