X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_paging.c;h=127ec45cf7fb05b44b31c0db2ce2a5600263fe09;hb=f9bb3db89469169bb5775dc031d89e570c6fed70;hp=516073a0b6cd5f2a2c4575e2af04efcbfad40054;hpb=9ed850393ba752a7c0bb83673b5ff98f9573f2da;p=palacios.git diff --git a/palacios/src/palacios/vmm_paging.c b/palacios/src/palacios/vmm_paging.c index 516073a..127ec45 100644 --- a/palacios/src/palacios/vmm_paging.c +++ b/palacios/src/palacios/vmm_paging.c @@ -1,3 +1,6 @@ +/* Northwestern University */ +/* (c) 2008, Jack Lange */ + #include #include @@ -5,10 +8,10 @@ #include -extern struct vmm_os_hooks * os_hooks; + void delete_page_tables_pde32(pde32_t * pde) { - int i, j; + int i;//, j; if (pde == NULL) { return; @@ -17,43 +20,69 @@ void delete_page_tables_pde32(pde32_t * pde) { for (i = 0; (i < MAX_PDE32_ENTRIES); i++) { if (pde[i].present) { pte32_t * pte = (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER); - - for (j = 0; (j < MAX_PTE32_ENTRIES); j++) { + + /* + for (j = 0; (j < MAX_PTE32_ENTRIES); j++) { if ((pte[j].present)) { - os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER)); + os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER)); } - } - - os_hooks->free_page(pte); + } + */ + //PrintDebug("Deleting PTE %d (%x)\n", i, pte); + V3_FreePage(pte); } } - os_hooks->free_page(pde); + // PrintDebug("Deleting PDE (%x)\n", pde); + V3_FreePage(pde); } +int pt32_lookup(pde32_t * pd, addr_t vaddr, addr_t * paddr) { + addr_t pde_entry; + pde32_entry_type_t pde_entry_type; + + if (pd == 0) { + return -1; + } + + pde_entry_type = pde32_lookup(pd, vaddr, &pde_entry); + + if (pde_entry_type == PDE32_ENTRY_PTE32) { + return pte32_lookup((pte32_t *)pde_entry, vaddr, paddr); + } else if (pde_entry_type == PDE32_ENTRY_LARGE_PAGE) { + *paddr = pde_entry; + return 0; + } + + return -1; +} + /* We can't do a full lookup because we don't know what context the page tables are in... * The entry addresses could be pointing to either guest physical memory or host physical memory * Instead we just return the entry address, and a flag to show if it points to a pte or a large page... */ -pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry) { - pde32_t * pde_entry = &(pde[PDE32_INDEX(addr)]); +pde32_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) { + pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]); if (!pde_entry->present) { *entry = 0; return PDE32_ENTRY_NOT_PRESENT; } else { - *entry = PAGE_ADDR(pde_entry->pt_base_addr); - + if (pde_entry->large_page) { - *entry += PAGE_OFFSET(addr); + pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry; + + *entry = PDE32_4MB_T_ADDR(*large_pde); + *entry += PD32_4MB_PAGE_OFFSET(addr); return PDE32_ENTRY_LARGE_PAGE; } else { + *entry = PDE32_T_ADDR(*pde_entry); return PDE32_ENTRY_PTE32; } } @@ -64,15 +93,15 @@ pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry) { /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table */ -int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry) { - pte32_t * pte_entry = &(pte[PTE32_INDEX(addr)]); +int pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) { + pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]); if (!pte_entry->present) { *entry = 0; + PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr)); return -1; } else { - *entry = PAGE_ADDR(pte_entry->page_base_addr); - *entry += PAGE_OFFSET(addr); + *entry = PTE32_T_ADDR(*pte_entry) + PT32_PAGE_OFFSET(addr); return 0; } @@ -89,7 +118,7 @@ pt_access_status_t can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t acces } else if ((entry->writable == 0) && (access_type.write == 1)) { return PT_WRITE_ERROR; } else if ((entry->user_page == 0) && (access_type.user == 1)) { - // Check CR0.WP + // Check CR0.WP? return PT_USER_ERROR; } @@ -105,7 +134,7 @@ pt_access_status_t can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t acces } else if ((entry->writable == 0) && (access_type.write == 1)) { return PT_WRITE_ERROR; } else if ((entry->user_page == 0) && (access_type.user == 1)) { - // Check CR0.WP + // Check CR0.WP? return PT_USER_ERROR; } @@ -124,19 +153,18 @@ pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info) { int i, j; struct shadow_map * map = &(guest_info->mem_map); - - pde32_t * pde = os_hooks->allocate_pages(1); + pde32_t * pde = V3_AllocPages(1); for (i = 0; i < MAX_PDE32_ENTRIES; i++) { int pte_present = 0; - pte32_t * pte = os_hooks->allocate_pages(1); + pte32_t * pte = V3_AllocPages(1); for (j = 0; j < MAX_PTE32_ENTRIES; j++) { - shadow_region_t * region = get_shadow_region_by_addr(map, current_page_addr); + struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr); if (!region || - (region->host_type == HOST_REGION_NOTHING) || + (region->host_type == HOST_REGION_HOOK) || (region->host_type == HOST_REGION_UNALLOCATED) || (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) || (region->host_type == HOST_REGION_REMOTE) || @@ -180,7 +208,7 @@ pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info) { } if (pte_present == 0) { - os_hooks->free_page(pte); + V3_FreePage(pte); pde[i].present = 0; pde[i].writable = 0;