X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_paging.c;h=1a7dc7b8204c21ae8300911589fc14d564d7485c;hb=bce4e326ef3e3e9f9a0bd2fd06254c0d8edb8437;hp=6f1bc14f66b92f779b113502d74993a7f4c2b60b;hpb=e5d7715c14a23e72d742d402d4e4cdf97ffab697;p=palacios-OLD.git diff --git a/palacios/src/palacios/vmm_paging.c b/palacios/src/palacios/vmm_paging.c index 6f1bc14..1a7dc7b 100644 --- a/palacios/src/palacios/vmm_paging.c +++ b/palacios/src/palacios/vmm_paging.c @@ -46,6 +46,11 @@ static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry); +#ifndef DEBUG_SHADOW_PAGING +#undef PrintDebug +#define PrintDebug(fmt, args...) +#endif + void delete_page_tables_32(pde32_t * pde) { @@ -414,11 +419,8 @@ int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t va /* * PAGE TABLE LOOKUP FUNCTIONS * - * * The value of entry is a return type: * Page not present: *entry = 0 - * Large Page: *entry = translated physical address (byte granularity) - * PTE entry: *entry is the address of the PTE Page */ /** @@ -667,7 +669,6 @@ pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_err pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) { addr_t current_page_addr = 0; int i, j; - struct shadow_map * map = &(guest_info->mem_map); pde32_t * pde = V3_VAddr(V3_AllocPages(1)); @@ -677,14 +678,10 @@ pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) { for (j = 0; j < MAX_PTE32_ENTRIES; j++) { - struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr); + struct v3_shadow_region * region = v3_get_shadow_region(guest_info, current_page_addr); if (!region || - (region->host_type == HOST_REGION_HOOK) || - (region->host_type == HOST_REGION_UNALLOCATED) || - (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) || - (region->host_type == HOST_REGION_REMOTE) || - (region->host_type == HOST_REGION_SWAPPED)) { + (region->host_type == SHDW_REGION_FULL_HOOK)) { pte[j].present = 0; pte[j].writable = 0; pte[j].user_page = 0; @@ -699,7 +696,14 @@ pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) { } else { addr_t host_addr; pte[j].present = 1; - pte[j].writable = 1; + + if (region->host_type == SHDW_REGION_WRITE_HOOK) { + pte[j].writable = 0; + PrintDebug("Marking Write hook host_addr %p as RO\n", (void *)current_page_addr); + } else { + pte[j].writable = 1; + } + pte[j].user_page = 1; pte[j].write_through = 0; pte[j].cache_disable = 0; @@ -764,7 +768,6 @@ pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) { pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) { addr_t current_page_addr = 0; int i, j, k; - struct shadow_map * map = &(guest_info->mem_map); pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1)); memset(pdpe, 0, PAGE_SIZE); @@ -781,14 +784,10 @@ pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) { for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) { - struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr); + struct v3_shadow_region * region = v3_get_shadow_region(guest_info, current_page_addr); if (!region || - (region->host_type == HOST_REGION_HOOK) || - (region->host_type == HOST_REGION_UNALLOCATED) || - (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) || - (region->host_type == HOST_REGION_REMOTE) || - (region->host_type == HOST_REGION_SWAPPED)) { + (region->host_type == SHDW_REGION_FULL_HOOK)) { pte[k].present = 0; pte[k].writable = 0; pte[k].user_page = 0; @@ -804,7 +803,13 @@ pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) { } else { addr_t host_addr; pte[k].present = 1; - pte[k].writable = 1; + + if (region->host_type == SHDW_REGION_WRITE_HOOK) { + pte[k].writable = 0; + } else { + pte[k].writable = 1; + } + pte[k].user_page = 1; pte[k].write_through = 0; pte[k].cache_disable = 0; @@ -903,7 +908,6 @@ pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) { pml4e64_t * create_passthrough_pts_64(struct guest_info * info) { addr_t current_page_addr = 0; int i, j, k, m; - struct shadow_map * map = &(info->mem_map); pml4e64_t * pml = V3_VAddr(V3_AllocPages(1)); @@ -921,16 +925,12 @@ pml4e64_t * create_passthrough_pts_64(struct guest_info * info) { for (m = 0; m < MAX_PTE64_ENTRIES; m++) { - struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr); + struct v3_shadow_region * region = v3_get_shadow_region(info, current_page_addr); if (!region || - (region->host_type == HOST_REGION_HOOK) || - (region->host_type == HOST_REGION_UNALLOCATED) || - (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) || - (region->host_type == HOST_REGION_REMOTE) || - (region->host_type == HOST_REGION_SWAPPED)) { + (region->host_type == SHDW_REGION_FULL_HOOK)) { pte[m].present = 0; pte[m].writable = 0; pte[m].user_page = 0; @@ -945,7 +945,13 @@ pml4e64_t * create_passthrough_pts_64(struct guest_info * info) { } else { addr_t host_addr; pte[m].present = 1; - pte[m].writable = 1; + + if (region->host_type == SHDW_REGION_WRITE_HOOK) { + pte[m].writable = 0; + } else { + pte[m].writable = 1; + } + pte[m].user_page = 1; pte[m].write_through = 0; pte[m].cache_disable = 0;