// We assume that shdw_pg_state.guest_cr3 is pointing to the page tables we want to activate
// We also assume that the CPU mode has not changed during this page table transition
-static int activate_shadow_pt_32(struct guest_info * info) {
+static inline int activate_shadow_pt_32(struct guest_info * info) {
struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3);
struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
int cached = 0;
return -1;
} else if (cached == 0) {
addr_t shadow_pt;
-
- PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 );
- delete_page_tables_32(CR3_TO_PDE32_VA(*(uint_t*)shadow_cr3));
-
- shadow_pt = create_new_shadow_pt();
+
+ shadow_pt = create_new_shadow_pt(info);
shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PAGE_BASE_ADDR(shadow_pt));
- PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pdt_base_addr );
+ PrintDebug( "Created new shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pdt_base_addr));
} else {
PrintDebug("Reusing cached shadow Page table\n");
}
pte32_t * guest_pt);
-static int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
+static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
pde32_t * guest_pd = NULL;
pde32_t * shadow_pd = CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
if (shadow_pde_access == PT_ACCESS_NOT_PRESENT)
{
- pte32_t * shadow_pt = (pte32_t *)create_new_shadow_pt();
+ pte32_t * shadow_pt = (pte32_t *)create_new_shadow_pt(info);
shadow_pde->present = 1;
shadow_pde->user_page = guest_pde->user_page;
// ?? What if guest pde is dirty a this point?
((pde32_4MB_t *)guest_pde)->dirty = 0;
shadow_pde->writable = 0;
+
+ if (handle_large_pagefault_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) {
+ PrintError("Error handling large pagefault\n");
+ return -1;
+ }
+
}
}
else if (shadow_pde_access == PT_ACCESS_OK)
PrintDebug("Returning end of function\n");
return 0;
}
+
+
+
+/* If we start to optimize we should look up the guest pages in the cache... */
+static inline int handle_shadow_invlpg_32(struct guest_info * info, addr_t vaddr) {
+ pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
+ pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(vaddr)];
+
+ addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
+ pde32_t * guest_pd = NULL;
+ pde32_t * guest_pde;
+
+ if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
+ PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
+ return -1;
+ }
+
+ guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(vaddr)]);
+
+ if (guest_pde->large_page == 1) {
+ shadow_pde->present = 0;
+ PrintDebug("Invalidating Large Page\n");
+ } else if (shadow_pde->present == 1) {
+ pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR_4KB(shadow_pde->pt_base_addr);
+ pte32_t * shadow_pte = (pte32_t *) V3_VAddr( (void*) &shadow_pt[PTE32_INDEX(vaddr)] );
+
+ PrintDebug("Setting not present\n");
+
+ shadow_pte->present = 0;
+ }
+ return 0;
+}