X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fmmu%2Fvmm_shdw_pg_cache_32.h;h=fab26fdb82f23f33630267a8eb4186dab8b639a7;hb=8340fc900a69b5e4093ae77e0984c12f157dca28;hp=7d48f6b157443ec4dcfb9bbdee461768c05cd237;hpb=c8a303c8512c0dafcb8dd5f83e6682729165d547;p=palacios.git diff --git a/palacios/src/palacios/mmu/vmm_shdw_pg_cache_32.h b/palacios/src/palacios/mmu/vmm_shdw_pg_cache_32.h index 7d48f6b..fab26fd 100644 --- a/palacios/src/palacios/mmu/vmm_shdw_pg_cache_32.h +++ b/palacios/src/palacios/mmu/vmm_shdw_pg_cache_32.h @@ -27,10 +27,15 @@ static inline int activate_shadow_pt_32(struct guest_info * core) { PrintDebug("Activating 32 Bit cacheable page tables\n"); shdw_pg = find_shdw_pt(core->vm_info, gpa, PAGE_PD32); - PrintError("shdw_pg returned as %p\n", shdw_pg); + PrintError("shdw_pg returned as %p for CR3:%p\n", shdw_pg, (void *)gpa); if (shdw_pg == NULL) { shdw_pg = create_shdw_pt(core->vm_info, gpa, PAGE_PD32); + + // update current reverse map entries... + // We are now using this page in a PT, so: + // any existing writable mappings must be updated + update_rmap_entries(core->vm_info, gpa); } PrintDebug("shdw_pg now exists...\n"); @@ -133,7 +138,7 @@ static inline int handle_shadow_pagefault_32(struct guest_info * core, addr_t fa return 0; } - + pte32_t * shadow_pt = NULL; pte32_t * guest_pt = NULL; @@ -141,7 +146,7 @@ static inline int handle_shadow_pagefault_32(struct guest_info * core, addr_t fa /* Set up cache state */ addr_t gpa = BASE_TO_PAGE_ADDR_4KB(guest_pde->pt_base_addr); - + struct shdw_pg_data * shdw_page = NULL; page_type_t pt_type = PAGE_PT32; @@ -154,12 +159,15 @@ static inline int handle_shadow_pagefault_32(struct guest_info * core, addr_t fa if (shdw_page == NULL) { shdw_page = create_shdw_pt(core->vm_info, gpa, pt_type); - } - - // update current reverse map entries... - // We are now using this page in a PT, so: - // any existing writable mappings must be updated - update_rmap_entries(core->vm_info, gpa); + + if (pt_type == PAGE_PT32) { + // update current reverse map entries... + // We are now using this page in a PT, so: + // any existing writable mappings must be updated + update_rmap_entries(core->vm_info, gpa); + } + } + struct shdw_pg_data * parent_page = find_shdw_pt(core->vm_info, guest_cr3, PAGE_PD32); @@ -269,7 +277,7 @@ static int handle_pte_shadow_pagefault_32(struct guest_info * core, addr_t fault pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]); addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr); - struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->cpu_id, guest_pa); + struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, guest_pa); if (shdw_reg == NULL) { // Inject a machine check in the guest @@ -369,6 +377,8 @@ static int handle_pte_shadow_pagefault_32(struct guest_info * core, addr_t fault } if (pt_page != NULL) { + PrintError("Found PT page (small), marking RD-ONLY (va=%p), (gpa=%p)\n", + (void *)fault_addr, (void *)pg_gpa); // This is a page table page... shadow_pte->writable = 0; shadow_pte->vmm_info = V3_CACHED_PG; @@ -449,7 +459,7 @@ static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * core, PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code); PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde); - struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->cpu_id, guest_fault_pa); + struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, guest_fault_pa); if (shdw_reg == NULL) { @@ -524,6 +534,9 @@ static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * core, if (pt_page != NULL) { // This is a page table page... + PrintError("Found PT page (large), marking RD-ONLY (va=%p), (gpa=%p)\n", + (void *)fault_addr, (void *)pg_gpa); + shadow_pte->writable = 0; shadow_pte->vmm_info = V3_CACHED_PG; } @@ -594,6 +607,8 @@ static inline int handle_shadow_invlpg_32(struct guest_info * core, addr_t vaddr guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(vaddr)]); + // Should we back propagate the invalidations, because they might be cached...?? + if (guest_pde->large_page == 1) { shadow_pde->present = 0; @@ -604,7 +619,7 @@ static inline int handle_shadow_invlpg_32(struct guest_info * core, addr_t vaddr - PrintError("\tInvalidating small page\n"); + // PrintError("\tInvalidating small page\n"); shadow_pte->present = 0;