#include <palacios/vm_guest_mem.h>
#include <palacios/vm_guest.h>
-#define DEBUG_NESTED_PAGING 1
-
-#ifndef DEBUG_NESTED_PAGING
-#undef PrintDebug
-#define PrintDebug(fmt, args...)
-#endif
static inline int handle_passthrough_pagefault_64(struct guest_info * info,
- addr_t fault_addr,
- pf_error_t error_code) {
+ addr_t fault_addr,
+ pf_error_t error_code) {
pml4e64_t * pml = NULL;
pdpe64_t * pdpe = NULL;
pde64_t * pde = NULL;
- struct v3_shadow_region * region = v3_get_shadow_region(info, fault_addr);
+ struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, info->cpu_id, fault_addr);
- if ((region == NULL) ||
- (region->host_type == SHDW_REGION_INVALID)) {
+ if (region == NULL) {
PrintError("Invalid region in passthrough page fault 64, addr=%p\n",
(void *)fault_addr);
return -1;
}
- host_addr = v3_get_shadow_addr(region, fault_addr);
+ host_addr = v3_get_shadow_addr(region, info->cpu_id, fault_addr);
//
// Lookup the correct PML address based on the PAGING MODE
if (pte[pte_index].present == 0) {
pte[pte_index].user_page = 1;
- if (region->host_type == SHDW_REGION_ALLOCATED) {
+ if ((region->flags.alloced == 1) &&
+ (region->flags.read == 1)) {
// Full access
pte[pte_index].present = 1;
- pte[pte_index].writable = 1;
- pte[pte_index].page_base_addr = PAGE_BASE_ADDR(host_addr);
-
- } else if (region->host_type == SHDW_REGION_WRITE_HOOK) {
- // Only trap writes
- pte[pte_index].present = 1;
- pte[pte_index].writable = 0;
+ if (region->flags.write == 1) {
+ pte[pte_index].writable = 1;
+ } else {
+ pte[pte_index].writable = 0;
+ }
pte[pte_index].page_base_addr = PAGE_BASE_ADDR(host_addr);
-
- } else if (region->host_type == SHDW_REGION_FULL_HOOK) {
- // trap all accesses
- return v3_handle_mem_full_hook(info, fault_addr, fault_addr, region, error_code);
-
} else {
- PrintError("Unknown Region Type...\n");
- return -1;
+ return region->unhandled(info, fault_addr, fault_addr, region, error_code);
}
+ } else {
+ // We fix all permissions on the first pass,
+ // so we only get here if its an unhandled exception
+
+ return region->unhandled(info, fault_addr, fault_addr, region, error_code);
}
-
- if ( (region->host_type == SHDW_REGION_WRITE_HOOK) &&
- (error_code.write == 1) ) {
- return v3_handle_mem_wr_hook(info, fault_addr, fault_addr, region, error_code);
+
+ return 0;
+}
+
+static inline int invalidate_addr_64(struct guest_info * info, addr_t inv_addr) {
+ pml4e64_t * pml = NULL;
+ pdpe64_t * pdpe = NULL;
+ pde64_t * pde = NULL;
+ pte64_t * pte = NULL;
+
+
+ // TODO:
+ // Call INVLPGA
+
+ // clear the page table entry
+ int pml_index = PML4E64_INDEX(inv_addr);
+ int pdpe_index = PDPE64_INDEX(inv_addr);
+ int pde_index = PDE64_INDEX(inv_addr);
+ int pte_index = PTE64_INDEX(inv_addr);
+
+
+ // Lookup the correct PDE address based on the PAGING MODE
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ pml = CR3_TO_PML4E64_VA(info->ctrl_regs.cr3);
+ } else {
+ pml = CR3_TO_PML4E64_VA(info->direct_map_pt);
}
+ if (pml[pml_index].present == 0) {
+ return 0;
+ }
+
+ pdpe = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pml[pml_index].pdp_base_addr));
+
+ if (pdpe[pdpe_index].present == 0) {
+ return 0;
+ } else if (pdpe[pdpe_index].large_page == 1) {
+ pdpe[pdpe_index].present = 0;
+ return 0;
+ }
+
+ pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr));
+
+ if (pde[pde_index].present == 0) {
+ return 0;
+ } else if (pde[pde_index].large_page == 1) {
+ pde[pde_index].present = 0;
+ return 0;
+ }
+
+ pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr));
+
+ pte[pte_index].present = 0;
+
return 0;
}
+
#endif