int pde_index = PDE32_INDEX(fault_addr);
int pte_index = PTE32_INDEX(fault_addr);
- struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, info->cpu_id, fault_addr);
+ struct v3_mem_region * region = v3_get_mem_region(info->vm_info, info->vcpu_id, fault_addr);
if (region == NULL) {
PrintError("Invalid region in passthrough page fault 32, addr=%p\n",
return -1;
}
- host_addr = v3_get_shadow_addr(region, info->cpu_id, fault_addr);
-
// Lookup the correct PDE address based on the PAGING MODE
if (info->shdw_pg_mode == SHADOW_PAGING) {
pde = CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
} else {
pte[pte_index].writable = 0;
}
+
+ if (v3_gpa_to_hpa(info, fault_addr, &host_addr) == -1) {
+ PrintError("Could not translate fault address (%p)\n", (void *)fault_addr);
+ return -1;
+ }
pte[pte_index].page_base_addr = PAGE_BASE_ADDR(host_addr);
} else {
return 0;
} else if (pde[pde_index].large_page) {
pde[pde_index].present = 0;
+ pde[pde_index].writable = 0;
+ pde[pde_index].user_page = 0;
return 0;
}
pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr));
pte[pte_index].present = 0;
+ pte[pte_index].writable = 0;
+ pte[pte_index].user_page = 0;
return 0;
}