-/*
- For now we'll do something a little more lightweight
-int cache_page_tables32(struct guest_info * info, addr_t pde) {
- struct shadow_page_state * state = &(info->shdw_pg_state);
- addr_t pde_host_addr;
- pde32_t * tmp_pde;
- struct hashtable * pte_cache = NULL;
- int i = 0;
-
-
- pte_cache = (struct hashtable *)find_cr3_in_cache(state->cr3_cache, pde);
- if (pte_cache != NULL) {
- PrintError("CR3 already present in cache\n");
- state->current_ptes = pte_cache;
- return 1;
- } else {
- PrintError("Creating new CR3 cache entry\n");
- pte_cache = create_hashtable(0, &pte_hash_fn, &pte_equals);
- state->current_ptes = pte_cache;
- add_cr3_to_cache(state->cr3_cache, pde, pte_cache);
- }
-
- if (guest_pa_to_host_va(info, pde, &pde_host_addr) == -1) {
- PrintError("Could not lookup host address of guest PDE\n");
- return -1;
- }
-
- tmp_pde = (pde32_t *)pde_host_addr;
-
- add_pte_map(pte_cache, pde, pde_host_addr);
-
-
- for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
- if ((tmp_pde[i].present) && (tmp_pde[i].large_page == 0)) {
- addr_t pte_host_addr;
-
- if (guest_pa_to_host_va(info, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), &pte_host_addr) == -1) {
- PrintError("Could not lookup host address of guest PDE\n");
- return -1;
- }
-
- add_pte_map(pte_cache, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), pte_host_addr);
- }
- }
-
-
- return 0;
-}
-*/
-
int v3_cache_page_tables(struct guest_info * info, addr_t cr3) {
switch(v3_get_cpu_mode(info)) {
return -1;
}
-static void activate_shadow_pt_64_cb(page_type_t type, addr_t page_ptr, addr_t page_pa, void * private_data) {
- PrintDebug("CB: Page: %p, Type: %s\n", (void *)page_pa, v3_page_type_to_str(type));
+static void activate_shadow_pt_64_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
+ PrintDebug("CB: Page: %p->%p (host_ptr=%p), Type: %s\n", (void *)vaddr, (void *)page_pa, (void *)page_ptr, v3_page_type_to_str(type));
}
}
-/*
-
-
-static int create_pd32_nonaligned_4MB_page(struct guest_info * info, pte32_t * pt, addr_t guest_addr, pde32_4MB_t * large_shadow_pde) {
- uint_t i = 0;
- pte32_t * pte_cursor;
- addr_t guest_pa = 0;
-
- for (i = 0; i < 1024; i++) {
- guest_pa = guest_addr + (PAGE_SIZE * i);
- host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
-
- pte_cursor = &(pt[i]);
-
- if (host_page_type == HOST_REGION_INVALID) {
- // Currently we don't support this, but in theory we could
- PrintError("Invalid Host Memory Type\n");
- return -1;
- } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
- addr_t shadow_pa = get_shadow_addr(info, guest_pa);
-
-
- pte_cursor->page_base_addr = PT32_BASE_ADDR(shadow_pa);
- pte_cursor->present = 1;
- pte_cursor->writable = large_shadow_pde->writable;
- pte_cursor->user_page = large_shadow_pde->user_page;
- pte_cursor->write_through = 0;
- pte_cursor->cache_disable = 0;
- pte_cursor->global_page = 0;
-
- } else {
- PrintError("Unsupported Host Memory Type\n");
- return -1;
- }
- }
- return 0;
-}
-
-
-static int handle_large_pagefault32(struct guest_info * info,
- pde32_t * guest_pde, pde32_t * shadow_pde,
- addr_t fault_addr, pf_error_t error_code ) {
- struct shadow_region * mem_reg;
- pde32_4MB_t * large_guest_pde = (pde32_4MB_t *)guest_pde;
- pde32_4MB_t * large_shadow_pde = (pde32_4MB_t *)shadow_pde;
- host_region_type_t host_page_type;
- addr_t guest_start_addr = PDE32_4MB_T_ADDR(*large_guest_pde);
- // addr_t guest_end_addr = guest_start_addr + PAGE_SIZE_4MB; // start address + 4MB
-
-
- // Check that the Guest PDE entry points to valid memory
- // else Machine Check the guest
- PrintDebug("Large Page: Page Base Addr=%x\n", guest_start_addr);
-
- host_page_type = get_shadow_addr_type(info, guest_start_addr);
-
- if (host_page_type == HOST_REGION_INVALID) {
- PrintError("Invalid guest address in large page (0x%x)\n", guest_start_addr);
- v3_raise_exception(info, MC_EXCEPTION);
- return -1;
- }
-
- // else...
-
- if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
-
- addr_t host_start_addr = 0;
- addr_t region_end_addr = 0;
-
- // Check for a large enough region in host memory
- mem_reg = get_shadow_region_by_addr(&(info->mem_map), guest_start_addr);
- PrintDebug("Host region: host_addr=%x (guest_start=%x, end=%x)\n",
- mem_reg->host_addr, mem_reg->guest_start, mem_reg->guest_end);
- host_start_addr = mem_reg->host_addr + (guest_start_addr - mem_reg->guest_start);
- region_end_addr = mem_reg->host_addr + (mem_reg->guest_end - mem_reg->guest_start);
-
- PrintDebug("Host Start Addr=%x; Region End Addr=%x\n", host_start_addr, region_end_addr);
-
-
- //4f
- if (large_guest_pde->dirty == 1) { // dirty
- large_shadow_pde->writable = guest_pde->writable;
- } else if (error_code.write == 1) { // not dirty, access is write
- large_shadow_pde->writable = guest_pde->writable;
- large_guest_pde->dirty = 1;
- } else { // not dirty, access is read
- large_shadow_pde->writable = 0;
- }
-
-
- // Check if the region is at least an additional 4MB
-
-
- //4b.
- if ((PD32_4MB_PAGE_OFFSET(host_start_addr) == 0) &&
- (region_end_addr >= host_start_addr + PAGE_SIZE_4MB)) { // if 4MB boundary
- large_shadow_pde->page_base_addr = PD32_4MB_BASE_ADDR(host_start_addr);
- } else { // else generate 4k pages
- pte32_t * shadow_pt = NULL;
- PrintDebug("Handling non aligned large page\n");
-
- shadow_pde->large_page = 0;
-
- shadow_pt = create_new_shadow_pt32();
-
- if (create_pd32_nonaligned_4MB_page(info, shadow_pt, guest_start_addr, large_shadow_pde) == -1) {
- PrintError("Non Aligned Large Page Error\n");
- V3_Free(shadow_pt);
- return -1;
- }
-
-
-#ifdef DEBUG_SHADOW_PAGING
- PrintDebug("non-aligned Shadow PT\n");
- PrintPT32(PT32_PAGE_ADDR(fault_addr), shadow_pt);
-#endif
- shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt);
- }
-
- } else {
- // Handle hooked pages as well as other special pages
- if (handle_special_page_fault(info, fault_addr, guest_start_addr, error_code) == -1) {
- PrintError("Special Page Fault handler returned error for address: %x\n", fault_addr);
- return -1;
- }
- }
-
- return 0;
-}
-*/