switch (pt_type) {
case PAGE_PD32: {
pde32_t * pde = pt;
- pde[PDE32_INDEX(va)].writable = 1;
+ pde[PDE32_INDEX(va)].present = 0;
break;
}
case PAGE_4MB: {
pde32_4MB_t * pde = pt;
- pde[PDE32_INDEX(va)].writable = 1;
+ pde[PDE32_INDEX(va)].present = 0;
break;
}
case PAGE_PT32: {
pte32_t * pte = pt;
- pte[PTE32_INDEX(va)].writable = 1;
+ pte[PTE32_INDEX(va)].present = 0;
break;
}
case PAGE_PML464: {
pml4e64_t * pml = pt;
- pml[PML4E64_INDEX(va)].writable = 1;
+ pml[PML4E64_INDEX(va)].present = 0;
break;
}
case PAGE_PDP64: {
pdpe64_t * pdp = pt;
- pdp[PDPE64_INDEX(va)].writable = 1;
+ pdp[PDPE64_INDEX(va)].present = 0;
break;
}
case PAGE_PD64: {
pde64_t * pde = pt;
- pde[PDE64_INDEX(va)].writable = 1;
+ pde[PDE64_INDEX(va)].present = 0;
break;
}
case PAGE_PT64: {
pte64_t * pte = pt;
- pte[PTE64_INDEX(va)].writable = 1;
+ pte[PTE64_INDEX(va)].present = 0;
break;
}
default:
pg_data = (struct shdw_pg_data *)v3_htable_search(cache_state->page_htable, (addr_t)&tuple);
if (!pg_data) {
- PrintError("Invalid PTE reference...\n");
+ PrintError("Invalid PTE reference... Should Delete rmap entry\n");
continue;
}
struct cache_vm_state * cache_state) {
struct shdw_pg_data * pg_data = NULL;
+ PrintError("popping page from queue\n");
+
pg_data = list_tail_entry(&(cache_state->pg_queue), struct shdw_pg_data, pg_queue_node);
pg_data->hva = (void *)V3_VAddr((void *)pg_data->hpa);
} else if (cache_state->pgs_in_free_list) {
+
+ PrintError("pulling page from free list\n");
// pull from free list
pg_data = list_tail_entry(&(cache_state->free_list), struct shdw_pg_data, pg_queue_node);
PrintDebug("Activating 32 Bit cacheable page tables\n");
shdw_pg = find_shdw_pt(core->vm_info, gpa, PAGE_PD32);
- PrintError("shdw_pg returned as %p\n", shdw_pg);
+ PrintError("shdw_pg returned as %p for CR3:%p\n", shdw_pg, (void *)gpa);
if (shdw_pg == NULL) {
shdw_pg = create_shdw_pt(core->vm_info, gpa, PAGE_PD32);
+
+ // update current reverse map entries...
+ // We are now using this page in a PT, so:
+ // any existing writable mappings must be updated
+ update_rmap_entries(core->vm_info, gpa);
}
PrintDebug("shdw_pg now exists...\n");
return 0;
}
-
+
pte32_t * shadow_pt = NULL;
pte32_t * guest_pt = NULL;
/* Set up cache state */
addr_t gpa = BASE_TO_PAGE_ADDR_4KB(guest_pde->pt_base_addr);
-
+
struct shdw_pg_data * shdw_page = NULL;
page_type_t pt_type = PAGE_PT32;
if (shdw_page == NULL) {
shdw_page = create_shdw_pt(core->vm_info, gpa, pt_type);
- }
-
- // update current reverse map entries...
- // We are now using this page in a PT, so:
- // any existing writable mappings must be updated
- update_rmap_entries(core->vm_info, gpa);
+
+ if (pt_type == PAGE_PT32) {
+ // update current reverse map entries...
+ // We are now using this page in a PT, so:
+ // any existing writable mappings must be updated
+ update_rmap_entries(core->vm_info, gpa);
+ }
+ }
+
struct shdw_pg_data * parent_page = find_shdw_pt(core->vm_info, guest_cr3, PAGE_PD32);
}
if (pt_page != NULL) {
+ PrintError("Found PT page (small), marking RD-ONLY (va=%p), (gpa=%p)\n",
+ (void *)fault_addr, (void *)pg_gpa);
// This is a page table page...
shadow_pte->writable = 0;
shadow_pte->vmm_info = V3_CACHED_PG;
if (pt_page != NULL) {
// This is a page table page...
+ PrintError("Found PT page (large), marking RD-ONLY (va=%p), (gpa=%p)\n",
+ (void *)fault_addr, (void *)pg_gpa);
+
shadow_pte->writable = 0;
shadow_pte->vmm_info = V3_CACHED_PG;
}
guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(vaddr)]);
+ // Should we back propagate the invalidations, because they might be cached...??
+
if (guest_pde->large_page == 1) {
shadow_pde->present = 0;
- PrintError("\tInvalidating small page\n");
+ // PrintError("\tInvalidating small page\n");
shadow_pte->present = 0;