// We assume that shdw_pg_state.guest_cr3 is pointing to the page tables we want to activate
// We also assume that the CPU mode has not changed during this page table transition
-static int activate_shadow_pt_32(struct guest_info * info) {
+static inline int activate_shadow_pt_32(struct guest_info * info) {
struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3);
struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
int cached = 0;
PrintError("CR3 Cache failed\n");
return -1;
} else if (cached == 0) {
- addr_t shadow_pt;
-
- PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 );
- delete_page_tables_32(CR3_TO_PDE32_VA(*(uint_t*)shadow_cr3));
-
- shadow_pt = create_new_shadow_pt();
+ struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
+
+ shdw_page->cr3 = shdw_page->page_pa;
- shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PAGE_BASE_ADDR(shadow_pt));
- PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pdt_base_addr );
+ shadow_cr3->pdt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
+ PrintDebug( "Created new shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pdt_base_addr));
} else {
PrintDebug("Reusing cached shadow Page table\n");
}
pte32_t * guest_pt);
-static int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
+static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
pde32_t * guest_pd = NULL;
pde32_t * shadow_pd = CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
if (shadow_pde_access == PT_ACCESS_NOT_PRESENT)
{
- pte32_t * shadow_pt = (pte32_t *)create_new_shadow_pt();
+ struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
+ pte32_t * shadow_pt = (pte32_t *)V3_VAddr((void *)shdw_page->page_pa);
shadow_pde->present = 1;
shadow_pde->user_page = guest_pde->user_page;
// ?? What if guest pde is dirty a this point?
((pde32_4MB_t *)guest_pde)->dirty = 0;
shadow_pde->writable = 0;
+
+ if (handle_large_pagefault_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) {
+ PrintError("Error handling large pagefault\n");
+ return -1;
+ }
+
}
}
else if (shadow_pde_access == PT_ACCESS_OK)
}
}
else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) &&
- (guest_pde->large_page == 1) &&
- (((pde32_4MB_t *)guest_pde)->dirty == 0))
+ (guest_pde->large_page == 1))
{
//
// Page Directory Entry marked read-only
PrintDebug("Manual Says to inject page fault into guest\n");
#ifdef DEBUG_SHADOW_PAGING
PrintDebug("Guest PDE: (access=%d)\n\t", guest_pde_access);
- PrintPTEntry(PAGE_PD32, fault_addr, guest_pde);
+ PrintPTEntry(info, PAGE_PD32, fault_addr, guest_pde);
PrintDebug("Shadow PDE: (access=%d)\n\t", shadow_pde_access);
- PrintPTEntry(PAGE_PD32, fault_addr, shadow_pde);
+ PrintPTEntry(info, PAGE_PD32, fault_addr, shadow_pde);
#endif
return 0;
pt_access_status_t shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);
+ struct shadow_page_state * state = &(info->shdw_pg_state);
struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
(shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
- struct shadow_page_state * state = &(info->shdw_pg_state);
addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_fault_pa);
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) {
// Check if the entry is a page table...
PrintDebug("Marking page as Guest Page Table (large page)\n");
- shadow_pte->vmm_info = PT32_GUEST_PT;
shadow_pte->writable = 0;
} else if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
shadow_pte->writable = 0;
PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
return -1;
}
- } else if (shadow_pte->vmm_info == PT32_GUEST_PT) {
+ }
+
+
+ if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) {
struct shadow_page_state * state = &(info->shdw_pg_state);
PrintDebug("Write operation on Guest PAge Table Page (large page)\n");
state->cached_cr3 = 0;
pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
+ struct shadow_page_state * state = &(info->shdw_pg_state);
struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_pa);
#ifdef DEBUG_SHADOW_PAGING
PrintDebug("Guest PTE: (access=%d)\n\t", guest_pte_access);
- PrintPTEntry(PAGE_PT32, fault_addr, guest_pte);
+ PrintPTEntry(info, PAGE_PT32, fault_addr, guest_pte);
PrintDebug("Shadow PTE: (access=%d)\n\t", shadow_pte_access);
- PrintPTEntry(PAGE_PT32, fault_addr, shadow_pte);
+ PrintPTEntry(info, PAGE_PT32, fault_addr, shadow_pte);
#endif
/* Was the page fault caused by the Guest's page tables? */
inject_guest_pf(info, fault_addr, error_code);
return 0;
}
+
if (shadow_pte_access == PT_ACCESS_OK) {
if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
(shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
- struct shadow_page_state * state = &(info->shdw_pg_state);
addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_pa);
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
guest_pte->accessed = 1;
- if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) {
- // Check if the entry is a page table...
- PrintDebug("Marking page as Guest Page Table %d\n", shadow_pte->writable);
- shadow_pte->vmm_info = PT32_GUEST_PT;
- }
-
- if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
- shadow_pte->writable = 0;
- } else if (guest_pte->dirty == 1) {
+ if (guest_pte->dirty == 1) {
shadow_pte->writable = guest_pte->writable;
} else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
shadow_pte->writable = guest_pte->writable;
guest_pte->dirty = 1;
-
- if (shadow_pte->vmm_info == PT32_GUEST_PT) {
- // Well that was quick...
- struct shadow_page_state * state = &(info->shdw_pg_state);
- PrintDebug("Immediate Write operation on Guest PAge Table Page\n");
+ } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
+ shadow_pte->writable = 0;
+ }
+
+ // dirty flag has been set, check if its in the cache
+ if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) {
+ if (error_code.write == 1) {
state->cached_cr3 = 0;
+ shadow_pte->writable = guest_pte->writable;
+ } else {
+ shadow_pte->writable = 0;
}
+ }
- } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) { // was =
+ // Write hooks trump all, and are set Read Only
+ if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
shadow_pte->writable = 0;
}
shadow_pte->writable = guest_pte->writable;
}
- if (shadow_pte->vmm_info == PT32_GUEST_PT) {
+ if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) {
struct shadow_page_state * state = &(info->shdw_pg_state);
PrintDebug("Write operation on Guest PAge Table Page\n");
state->cached_cr3 = 0;
/* If we start to optimize we should look up the guest pages in the cache... */
-static int handle_shadow_invlpg_32(struct guest_info * info, addr_t vaddr) {
+static inline int handle_shadow_invlpg_32(struct guest_info * info, addr_t vaddr) {
pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(vaddr)];
shadow_pte->present = 0;
}
-
-
return 0;
}