int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
- pde32_t * guest_pde = NULL;
- pde32_t * shadow_pde = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
+ pde32_t * guest_pd = NULL;
+ pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
pt_access_status_t guest_pde_access;
pt_access_status_t shadow_pde_access;
- pde32_t * guest_pde_entry = NULL;
- pde32_t * shadow_pde_entry = (pde32_t *)&(shadow_pde[PDE32_INDEX(fault_addr)]);
+ pde32_t * guest_pde = NULL;
+ pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]);
- if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pde) == -1) {
+ if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
PrintDebug("Invalid Guest PDE Address: 0x%x\n", guest_cr3);
return -1;
}
- guest_pde_entry = (pde32_t *)&(guest_pde[PDE32_INDEX(fault_addr)]);
+ guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
// Check the guest page permissions
- guest_pde_access = can_access_pde32(guest_pde, fault_addr, error_code);
+ guest_pde_access = can_access_pde32(guest_pd, fault_addr, error_code);
if (guest_pde_access != PT_ACCESS_OK) {
// inject page fault to the guest (Guest PDE fault)
info->ctrl_regs.cr2 = fault_addr;
raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
- PrintDebug("Injecting PDE pf to guest\n");
+ PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n", guest_pde_access, error_code);
+ PrintDebug("Guest CR3=%x\n", guest_cr3);
+ PrintPD32(guest_pd);
+
+ V3_ASSERT(0);
return 0;
}
- // Check that the Guest PDE entry points to valid memory
- // else Machine Check the guest
-
- shadow_pde_access = can_access_pde32(shadow_pde, fault_addr, error_code);
+ shadow_pde_access = can_access_pde32(shadow_pd, fault_addr, error_code);
if (shadow_pde_access == PT_ENTRY_NOT_PRESENT) {
- pte32_t * shadow_pte = NULL;
- V3_AllocPages(shadow_pte, 1);
- memset(shadow_pte, 0, PAGE_SIZE);
+ shadow_pde->present = 1;
+ shadow_pde->user_page = guest_pde->user_page;
+ shadow_pde->large_page = guest_pde->large_page;
- shadow_pde_entry->pt_base_addr = PD32_BASE_ADDR(shadow_pte);
-
-
- shadow_pde_entry->present = 1;
- shadow_pde_entry->user_page = guest_pde_entry->user_page;
-
// VMM Specific options
- shadow_pde_entry->write_through = 0;
- shadow_pde_entry->cache_disable = 0;
- shadow_pde_entry->global_page = 0;
+ shadow_pde->write_through = 0;
+ shadow_pde->cache_disable = 0;
+ shadow_pde->global_page = 0;
//
- guest_pde_entry->accessed = 1;
+ guest_pde->accessed = 1;
+
+ if (guest_pde->large_page == 0) {
+ pte32_t * shadow_pt = NULL;
+
+ V3_AllocPages(shadow_pt, 1);
+ memset(shadow_pt, 0, PAGE_SIZE);
+
+ shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt);
- if (guest_pde_entry->large_page == 0) {
- shadow_pde_entry->writable = guest_pde_entry->writable;
+ shadow_pde->writable = guest_pde->writable;
} else {
- /*
- * Check the Intel manual because we are ignoring Large Page issues here
- * Also be wary of hooked pages
+ struct shadow_region * mem_reg;
+ pde32_4MB_t * large_guest_pde = (pde32_4MB_t *)guest_pde;
+ pde32_4MB_t * large_shadow_pde = (pde32_4MB_t *)shadow_pde;
+ host_region_type_t host_page_type;
+ addr_t guest_start_addr = PDE32_4MB_T_ADDR(*large_guest_pde);
+ // addr_t guest_end_addr = guest_start_addr + PAGE_SIZE_4MB; // start address + 4MB
+
+
+ /* JRL: THIS COULD BE A PROBLEM....
+ * Currently we only support large pages if the region is mapped contiguosly in shadow memory
+ * Lets hope this is the case...
*/
- PrintDebug("Large PAge!!!\n");
- return -1;
+ // Check that the Guest PDE entry points to valid memory
+ // else Machine Check the guest
+ host_page_type = get_shadow_addr_type(info, guest_start_addr);
+ if (host_page_type == HOST_REGION_INVALID) {
+
+ raise_exception(info, MC_EXCEPTION);
+ PrintDebug("Invalid guest address in large page (0x%x)\n", guest_start_addr);
+ return -1;
+ } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
+ addr_t host_start_addr = 0;
+ addr_t region_end_addr = 0;
+
+ // Check for a large enough region in host memory
+ mem_reg = get_shadow_region_by_addr(&(info->mem_map), guest_start_addr);
+ host_start_addr = mem_reg->host_addr + (guest_start_addr - mem_reg->guest_start);
+ region_end_addr = mem_reg->host_addr + (mem_reg->guest_end - mem_reg->guest_start);
+
+ // Check if the region is at least an additional 4MB
+ if (region_end_addr <= host_start_addr + PAGE_SIZE_4MB) {
+ PrintDebug("Large page over non contiguous host memory... Not handled\n");
+ return -1;
+ }
+
+ //4b.
+ large_shadow_pde->page_base_addr = PD32_4MB_BASE_ADDR(host_start_addr);
+
+ //4f
+ if (large_guest_pde->dirty == 1) { // dirty
+ large_shadow_pde->writable = guest_pde->writable;
+ } else if (error_code.write == 1) { // not dirty, access is write
+ large_shadow_pde->writable = guest_pde->writable;
+ large_guest_pde->dirty = 1;
+ } else { // not dirty, access is read
+ large_shadow_pde->writable = 0;
+ }
+
+ } else {
+ // Handle hooked pages as well as other special pages
+ if (handle_special_page_fault(info, fault_addr, error_code) == -1) {
+ PrintDebug("Special Page Fault handler returned error for address: %x\n", fault_addr);
+ return -1;
+ }
+ }
}
- } else if (shadow_pde_access == PT_WRITE_ERROR) {
+ } else if ((shadow_pde_access == PT_WRITE_ERROR) &&
+ (guest_pde->large_page = 1) &&
+ (((pde32_4MB_t *)guest_pde)->dirty == 0)) {
//
// Page Directory Entry marked read-only
//
- PrintDebug("Shadow Paging Write Error\n");
- return -1;
+ ((pde32_4MB_t *)guest_pde)->dirty = 1;
+ shadow_pde->writable = guest_pde->writable;
+ return 0;
+
} else if (shadow_pde_access == PT_USER_ERROR) {
//
PrintDebug("Shadow Paging User access error\n");
return -1;
} else if (shadow_pde_access == PT_ACCESS_OK) {
- pte32_t * shadow_pte = (pte32_t *)PDE32_T_ADDR((*shadow_pde_entry));
- pte32_t * guest_pte = NULL;
+ pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde));
+ pte32_t * guest_pt = NULL;
// Page Table Entry fault
- if (guest_pa_to_host_va(info, PDE32_T_ADDR((*guest_pde_entry)), (addr_t*)&guest_pte) == -1) {
- PrintDebug("Invalid Guest PTE Address: 0x%x\n", PDE32_T_ADDR((*guest_pde_entry)));
+ if (guest_pa_to_host_va(info, PDE32_T_ADDR((*guest_pde)), (addr_t*)&guest_pt) == -1) {
+ PrintDebug("Invalid Guest PTE Address: 0x%x\n", PDE32_T_ADDR((*guest_pde)));
// Machine check the guest
raise_exception(info, MC_EXCEPTION);
}
- if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pte, guest_pte) == -1) {
+ if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
PrintDebug("Error handling Page fault caused by PTE\n");
return -1;
}
return -1;
}
- //PrintDebugPageTables(shadow_pde);
+ //PrintDebugPageTables(shadow_pd);
PrintDebug("Returning end of PDE function\n");
return 0;
}
int handle_shadow_pte32_fault(struct guest_info * info,
addr_t fault_addr,
pf_error_t error_code,
- pte32_t * shadow_pte,
- pte32_t * guest_pte) {
+ pte32_t * shadow_pt,
+ pte32_t * guest_pt) {
pt_access_status_t guest_pte_access;
pt_access_status_t shadow_pte_access;
- pte32_t * guest_pte_entry = (pte32_t *)&(guest_pte[PTE32_INDEX(fault_addr)]);;
- pte32_t * shadow_pte_entry = (pte32_t *)&(shadow_pte[PTE32_INDEX(fault_addr)]);
+ pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
+ pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
// Check the guest page permissions
- guest_pte_access = can_access_pte32(guest_pte, fault_addr, error_code);
+ guest_pte_access = can_access_pte32(guest_pt, fault_addr, error_code);
if (guest_pte_access != PT_ACCESS_OK) {
}
- shadow_pte_access = can_access_pte32(shadow_pte, fault_addr, error_code);
+ shadow_pte_access = can_access_pte32(shadow_pt, fault_addr, error_code);
if (shadow_pte_access == PT_ACCESS_OK) {
// Inconsistent state...
return 0;
} else if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) {
addr_t shadow_pa;
- addr_t guest_pa = PTE32_T_ADDR((*guest_pte_entry));
+ addr_t guest_pa = PTE32_T_ADDR((*guest_pte));
// Page Table Entry Not Present
shadow_pa = get_shadow_addr(info, guest_pa);
- shadow_pte_entry->page_base_addr = PT32_BASE_ADDR(shadow_pa);
+ shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa);
- shadow_pte_entry->present = guest_pte_entry->present;
- shadow_pte_entry->user_page = guest_pte_entry->user_page;
+ shadow_pte->present = guest_pte->present;
+ shadow_pte->user_page = guest_pte->user_page;
//set according to VMM policy
- shadow_pte_entry->write_through = 0;
- shadow_pte_entry->cache_disable = 0;
- shadow_pte_entry->global_page = 0;
+ shadow_pte->write_through = 0;
+ shadow_pte->cache_disable = 0;
+ shadow_pte->global_page = 0;
//
- guest_pte_entry->accessed = 1;
+ guest_pte->accessed = 1;
- if (guest_pte_entry->dirty == 1) {
- shadow_pte_entry->writable = guest_pte_entry->writable;
- } else if ((guest_pte_entry->dirty == 0) && (error_code.write == 1)) {
- shadow_pte_entry->writable = guest_pte_entry->writable;
- guest_pte_entry->dirty = 1;
- } else if ((guest_pte_entry->dirty = 0) && (error_code.write == 0)) {
- shadow_pte_entry->writable = 0;
+ if (guest_pte->dirty == 1) {
+ shadow_pte->writable = guest_pte->writable;
+ } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
+ shadow_pte->writable = guest_pte->writable;
+ guest_pte->dirty = 1;
+ } else if ((guest_pte->dirty = 0) && (error_code.write == 0)) {
+ shadow_pte->writable = 0;
}
} else {
// Page fault handled by hook functions
}
} else if ((shadow_pte_access == PT_WRITE_ERROR) &&
- (guest_pte_entry->dirty == 0)) {
- guest_pte_entry->dirty = 1;
- shadow_pte_entry->writable = guest_pte_entry->writable;
+ (guest_pte->dirty == 0)) {
+ guest_pte->dirty = 1;
+ shadow_pte->writable = guest_pte->writable;
PrintDebug("Shadow PTE Write Error\n");
if (addr_type == MEM_OPERAND) {
pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
- pde32_t * shadow_pde_entry = (pde32_t *)&shadow_pd[PDE32_INDEX(first_operand)];
+ pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(first_operand)];
//PrintDebug("PDE Index=%d\n", PDE32_INDEX(first_operand));
//PrintDebug("FirstOperand = %x\n", first_operand);
- if (shadow_pde_entry->large_page == 1) {
- shadow_pde_entry->present = 0;
+ if (shadow_pde->large_page == 1) {
+ shadow_pde->present = 0;
} else {
- if (shadow_pde_entry->present == 1) {
- pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde_entry));
- pte32_t * shadow_pte_entry = (pte32_t *)&shadow_pt[PTE32_INDEX(first_operand)];
+ if (shadow_pde->present == 1) {
+ pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde));
+ pte32_t * shadow_pte = (pte32_t *)&shadow_pt[PTE32_INDEX(first_operand)];
- shadow_pte_entry->present = 0;
+ shadow_pte->present = 0;
}
}