From: Jack Lange Date: Wed, 20 Oct 2010 23:03:00 +0000 (-0500) Subject: some fixes to caching, still segfaults at the init process X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=commitdiff_plain;h=230a94162d544dbd0988f8addd8a6a42dafac572;hp=c8a303c8512c0dafcb8dd5f83e6682729165d547;p=palacios.releases.git some fixes to caching, still segfaults at the init process --- diff --git a/palacios/src/palacios/mmu/vmm_shdw_pg_cache.c b/palacios/src/palacios/mmu/vmm_shdw_pg_cache.c index 3f1f0ca..5ca6a0b 100644 --- a/palacios/src/palacios/mmu/vmm_shdw_pg_cache.c +++ b/palacios/src/palacios/mmu/vmm_shdw_pg_cache.c @@ -100,37 +100,37 @@ static inline int evict_pt(void * pt, addr_t va, page_type_t pt_type) { switch (pt_type) { case PAGE_PD32: { pde32_t * pde = pt; - pde[PDE32_INDEX(va)].writable = 1; + pde[PDE32_INDEX(va)].present = 0; break; } case PAGE_4MB: { pde32_4MB_t * pde = pt; - pde[PDE32_INDEX(va)].writable = 1; + pde[PDE32_INDEX(va)].present = 0; break; } case PAGE_PT32: { pte32_t * pte = pt; - pte[PTE32_INDEX(va)].writable = 1; + pte[PTE32_INDEX(va)].present = 0; break; } case PAGE_PML464: { pml4e64_t * pml = pt; - pml[PML4E64_INDEX(va)].writable = 1; + pml[PML4E64_INDEX(va)].present = 0; break; } case PAGE_PDP64: { pdpe64_t * pdp = pt; - pdp[PDPE64_INDEX(va)].writable = 1; + pdp[PDPE64_INDEX(va)].present = 0; break; } case PAGE_PD64: { pde64_t * pde = pt; - pde[PDE64_INDEX(va)].writable = 1; + pde[PDE64_INDEX(va)].present = 0; break; } case PAGE_PT64: { pte64_t * pte = pt; - pte[PTE64_INDEX(va)].writable = 1; + pte[PTE64_INDEX(va)].present = 0; break; } default: @@ -261,7 +261,7 @@ static int update_rmap_entries(struct v3_vm_info * vm, addr_t gpa) { pg_data = (struct shdw_pg_data *)v3_htable_search(cache_state->page_htable, (addr_t)&tuple); if (!pg_data) { - PrintError("Invalid PTE reference...\n"); + PrintError("Invalid PTE reference... Should Delete rmap entry\n"); continue; } @@ -341,6 +341,8 @@ static struct shdw_pg_data * pop_queue_pg(struct v3_vm_info * vm, struct cache_vm_state * cache_state) { struct shdw_pg_data * pg_data = NULL; + PrintError("popping page from queue\n"); + pg_data = list_tail_entry(&(cache_state->pg_queue), struct shdw_pg_data, pg_queue_node); @@ -371,6 +373,8 @@ static struct shdw_pg_data * create_shdw_pt(struct v3_vm_info * vm, addr_t gpa, pg_data->hva = (void *)V3_VAddr((void *)pg_data->hpa); } else if (cache_state->pgs_in_free_list) { + + PrintError("pulling page from free list\n"); // pull from free list pg_data = list_tail_entry(&(cache_state->free_list), struct shdw_pg_data, pg_queue_node); diff --git a/palacios/src/palacios/mmu/vmm_shdw_pg_cache_32.h b/palacios/src/palacios/mmu/vmm_shdw_pg_cache_32.h index 7d48f6b..2aae928 100644 --- a/palacios/src/palacios/mmu/vmm_shdw_pg_cache_32.h +++ b/palacios/src/palacios/mmu/vmm_shdw_pg_cache_32.h @@ -27,10 +27,15 @@ static inline int activate_shadow_pt_32(struct guest_info * core) { PrintDebug("Activating 32 Bit cacheable page tables\n"); shdw_pg = find_shdw_pt(core->vm_info, gpa, PAGE_PD32); - PrintError("shdw_pg returned as %p\n", shdw_pg); + PrintError("shdw_pg returned as %p for CR3:%p\n", shdw_pg, (void *)gpa); if (shdw_pg == NULL) { shdw_pg = create_shdw_pt(core->vm_info, gpa, PAGE_PD32); + + // update current reverse map entries... + // We are now using this page in a PT, so: + // any existing writable mappings must be updated + update_rmap_entries(core->vm_info, gpa); } PrintDebug("shdw_pg now exists...\n"); @@ -133,7 +138,7 @@ static inline int handle_shadow_pagefault_32(struct guest_info * core, addr_t fa return 0; } - + pte32_t * shadow_pt = NULL; pte32_t * guest_pt = NULL; @@ -141,7 +146,7 @@ static inline int handle_shadow_pagefault_32(struct guest_info * core, addr_t fa /* Set up cache state */ addr_t gpa = BASE_TO_PAGE_ADDR_4KB(guest_pde->pt_base_addr); - + struct shdw_pg_data * shdw_page = NULL; page_type_t pt_type = PAGE_PT32; @@ -154,12 +159,15 @@ static inline int handle_shadow_pagefault_32(struct guest_info * core, addr_t fa if (shdw_page == NULL) { shdw_page = create_shdw_pt(core->vm_info, gpa, pt_type); - } - - // update current reverse map entries... - // We are now using this page in a PT, so: - // any existing writable mappings must be updated - update_rmap_entries(core->vm_info, gpa); + + if (pt_type == PAGE_PT32) { + // update current reverse map entries... + // We are now using this page in a PT, so: + // any existing writable mappings must be updated + update_rmap_entries(core->vm_info, gpa); + } + } + struct shdw_pg_data * parent_page = find_shdw_pt(core->vm_info, guest_cr3, PAGE_PD32); @@ -369,6 +377,8 @@ static int handle_pte_shadow_pagefault_32(struct guest_info * core, addr_t fault } if (pt_page != NULL) { + PrintError("Found PT page (small), marking RD-ONLY (va=%p), (gpa=%p)\n", + (void *)fault_addr, (void *)pg_gpa); // This is a page table page... shadow_pte->writable = 0; shadow_pte->vmm_info = V3_CACHED_PG; @@ -524,6 +534,9 @@ static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * core, if (pt_page != NULL) { // This is a page table page... + PrintError("Found PT page (large), marking RD-ONLY (va=%p), (gpa=%p)\n", + (void *)fault_addr, (void *)pg_gpa); + shadow_pte->writable = 0; shadow_pte->vmm_info = V3_CACHED_PG; } @@ -594,6 +607,8 @@ static inline int handle_shadow_invlpg_32(struct guest_info * core, addr_t vaddr guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(vaddr)]); + // Should we back propagate the invalidations, because they might be cached...?? + if (guest_pde->large_page == 1) { shadow_pde->present = 0; @@ -604,7 +619,7 @@ static inline int handle_shadow_invlpg_32(struct guest_info * core, addr_t vaddr - PrintError("\tInvalidating small page\n"); + // PrintError("\tInvalidating small page\n"); shadow_pte->present = 0;