extern struct vmm_os_hooks * os_hooks;
void delete_page_tables_pde32(pde32_t * pde) {
- int i, j;
+ int i;//, j;
if (pde == NULL) {
return;
for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
if (pde[i].present) {
pte32_t * pte = (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER);
-
- for (j = 0; (j < MAX_PTE32_ENTRIES); j++) {
+
+ /*
+ for (j = 0; (j < MAX_PTE32_ENTRIES); j++) {
if ((pte[j].present)) {
- os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
+ os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
}
- }
-
+ }
+ */
+ //PrintDebug("Deleting PTE %d (%x)\n", i, pte);
os_hooks->free_page(pte);
}
}
+ // PrintDebug("Deleting PDE (%x)\n", pde);
os_hooks->free_page(pde);
}
+int pt32_lookup(pde32_t * pd, addr_t vaddr, addr_t * paddr) {
+ addr_t pde_entry;
+ pde32_entry_type_t pde_entry_type;
+
+ if (pd == 0) {
+ return -1;
+ }
+
+ pde_entry_type = pde32_lookup(pd, vaddr, &pde_entry);
+
+ if (pde_entry_type == PDE32_ENTRY_PTE32) {
+ return pte32_lookup((pte32_t *)pde_entry, vaddr, paddr);
+ } else if (pde_entry_type == PDE32_ENTRY_LARGE_PAGE) {
+ *paddr = pde_entry;
+ return 0;
+ }
+
+ return -1;
+}
+
/* We can't do a full lookup because we don't know what context the page tables are in...
* The entry addresses could be pointing to either guest physical memory or host physical memory
* Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
*/
-pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry) {
- pde32_t * pde_entry = &(pde[PDE32_INDEX(addr)]);
+pde32_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
+ pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
if (!pde_entry->present) {
*entry = 0;
return PDE32_ENTRY_NOT_PRESENT;
} else {
- *entry = PAGE_ADDR(pde_entry->pt_base_addr);
-
+
if (pde_entry->large_page) {
- *entry += PAGE_OFFSET(addr);
+ pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
+
+ *entry = PDE32_4MB_T_ADDR(*large_pde);
+ *entry += PD32_4MB_PAGE_OFFSET(addr);
return PDE32_ENTRY_LARGE_PAGE;
} else {
+ *entry = PDE32_T_ADDR(*pde_entry);
return PDE32_ENTRY_PTE32;
}
}
/* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
*/
-int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry) {
- pte32_t * pte_entry = &(pte[PTE32_INDEX(addr)]);
+int pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
+ pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
if (!pte_entry->present) {
*entry = 0;
+ PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
return -1;
} else {
- *entry = PAGE_ADDR(pte_entry->page_base_addr);
- *entry += PAGE_OFFSET(addr);
+ *entry = PTE32_T_ADDR(*pte_entry) + PT32_PAGE_OFFSET(addr);
return 0;
}
} else if ((entry->writable == 0) && (access_type.write == 1)) {
return PT_WRITE_ERROR;
} else if ((entry->user_page == 0) && (access_type.user == 1)) {
- // Check CR0.WP
+ // Check CR0.WP?
return PT_USER_ERROR;
}
} else if ((entry->writable == 0) && (access_type.write == 1)) {
return PT_WRITE_ERROR;
} else if ((entry->user_page == 0) && (access_type.user == 1)) {
- // Check CR0.WP
+ // Check CR0.WP?
return PT_USER_ERROR;
}
int i, j;
struct shadow_map * map = &(guest_info->mem_map);
-
pde32_t * pde = os_hooks->allocate_pages(1);
for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
- shadow_region_t * region = get_shadow_region_by_addr(map, current_page_addr);
+ struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
if (!region ||
- (region->host_type == HOST_REGION_NOTHING) ||
+ (region->host_type == HOST_REGION_HOOK) ||
(region->host_type == HOST_REGION_UNALLOCATED) ||
(region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
(region->host_type == HOST_REGION_REMOTE) ||