/* Gets the base address needed for a Page Table entry */
/* Deprecate these :*/
-#define PD32_BASE_ADDR(x) (((uint_t)x) >> 12)
-#define PT32_BASE_ADDR(x) (((uint_t)x) >> 12)
-#define PD32_4MB_BASE_ADDR(x) (((uint_t)x) >> 22)
-
-#define PML4E64_BASE_ADDR(x) (((ullong_t)x) >> 12)
-#define PDPE64_BASE_ADDR(x) (((ullong_t)x) >> 12)
-#define PDE64_BASE_ADDR(x) (((ullong_t)x) >> 12)
-#define PTE64_BASE_ADDR(x) (((ullong_t)x) >> 12)
-/* Accessor functions for the page table structures */
-#define PDE32_T_ADDR(x) (((x).pt_base_addr) << 12)
-#define PTE32_T_ADDR(x) (((x).page_base_addr) << 12)
-#define PDE32_4MB_T_ADDR(x) (((x).page_base_addr) << 22)
-
+/*
+ #define PD32_BASE_ADDR(x) (((uint_t)x) >> 12)
+ #define PT32_BASE_ADDR(x) (((uint_t)x) >> 12)
+ #define PD32_4MB_BASE_ADDR(x) (((uint_t)x) >> 22)
+
+ #define PML4E64_BASE_ADDR(x) (((ullong_t)x) >> 12)
+ #define PDPE64_BASE_ADDR(x) (((ullong_t)x) >> 12)
+ #define PDE64_BASE_ADDR(x) (((ullong_t)x) >> 12)
+ #define PTE64_BASE_ADDR(x) (((ullong_t)x) >> 12)
+
+ // Accessor functions for the page table structures
+ #define PDE32_T_ADDR(x) (((x).pt_base_addr) << 12)
+ #define PTE32_T_ADDR(x) (((x).page_base_addr) << 12)
+ #define PDE32_4MB_T_ADDR(x) (((x).page_base_addr) << 22)
+*/
/* Replace The above with these... */
-#define PAGE_BASE_ADDR(x) (((uint_t)x) >> 12)
-#define LARGE_PAGE_BASE_ADDR(x) (((uint_t)x) >> 22)
-#define BASE_TO_PAGE_ADDR(x) (((uint_t)x) << 12)
-#define LARGE_BASE_TO_PAGE_ADDR(x) (((uint_t)x) << 22)
+#define PAGE_BASE_ADDR(x) ((x) >> 12)
+#define PAGE_BASE_ADDR_2MB(x) ((x) >> 21)
+#define PAGE_BASE_ADDR_4MB(x) ((x) >> 22)
+#define BASE_TO_PAGE_ADDR(x) (((addr_t)x) << 12)
+#define BASE_TO_PAGE_ADDR_2MB(x) (((addr_t)x) << 21)
+#define BASE_TO_PAGE_ADDR_4MB(x) (((addr_t)x) << 22)
+/* *** */
+/* Deprecated */
+/*
+ #define PT32_PAGE_OFFSET(x) (((uint_t)x) & 0xfff)
+ #define PD32_4MB_PAGE_OFFSET(x) (((uint_t)x) & 0x003fffff)
+
+ #define PT32_PAGE_ADDR(x) (((uint_t)x) & 0xfffff000)
+ #define PD32_4MB_PAGE_ADDR(x) (((uint_t)x) & 0xffc00000)
+
+ #define PT32_PAGE_POWER 12
+ #define PAGE_ALIGNED_ADDR(x) (((uint_t) (x)) >> 12)
+ //#define PAGE_ADDR(x) (PAGE_ALIGNED_ADDR(x) << 12)
+ #define PAGE_POWER 12
+ #define PAGE_SIZE 4096
+*/
+/* use these instead */
+#define PAGE_OFFSET(x) ((x) & 0xfff)
+#define PAGE_OFFSET_2MB(x) ((x) & 0x1fffff)
+#define PAGE_OFFSET_4MB(x) ((x) & 0x3fffff)
-#define PT32_PAGE_ADDR(x) (((uint_t)x) & 0xfffff000)
-#define PT32_PAGE_OFFSET(x) (((uint_t)x) & 0xfff)
-#define PT32_PAGE_POWER 12
+#define PAGE_POWER 12
+#define PAGE_POWER_2MB 22
+#define PAGE_POWER_4MB 21
-#define PD32_4MB_PAGE_ADDR(x) (((uint_t)x) & 0xffc00000)
-#define PD32_4MB_PAGE_OFFSET(x) (((uint_t)x) & 0x003fffff)
-#define PAGE_SIZE_4MB (4096 * 1024)
+// We shift instead of mask because we don't know the address size
+#define PAGE_ADDR(x) (((x) >> PAGE_POWER) << PAGE_POWER)
+#define PAGE_ADDR_2MB(x) (((x) >> PAGE_POWER_2MB) << PAGE_POWER_2MB)
+#define PAGE_ADDR_4MB(x) (((x) >> PAGE_POWER_4MB) << PAGE_POWER_4MB)
-/* The following should be phased out */
-#define PAGE_OFFSET(x) ((((uint_t)x) & 0xfff))
-#define PAGE_ALIGNED_ADDR(x) (((uint_t) (x)) >> 12)
-#define PAGE_ADDR(x) (PAGE_ALIGNED_ADDR(x) << 12)
-#define PAGE_POWER 12
#define PAGE_SIZE 4096
-/* ** */
+#define PAGE_SIZE_2MB (4096 * 512)
+#define PAGE_SIZE_4MB (4096 * 1024)
+
+
+/* *** */
+
+
uchar_t instr[15];
int ret;
struct emulated_page * data_page = V3_Malloc(sizeof(struct emulated_page));
- addr_t data_addr_offset = PT32_PAGE_OFFSET(read_gva);
+ addr_t data_addr_offset = PAGE_OFFSET(read_gva);
pte32_t saved_pte;
PrintDebug("Emulating Read\n");
*/
data_page->page_addr = get_new_page();
- data_page->va = PT32_PAGE_ADDR(read_gva);
+ data_page->va = PAGE_ADDR(read_gva);
data_page->pte.present = 1;
data_page->pte.writable = 0;
data_page->pte.user_page = 1;
- data_page->pte.page_base_addr = PT32_BASE_ADDR((addr_t)V3_PAddr((void *)(addr_t)(data_page->page_addr)));
+ data_page->pte.page_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr((void *)(addr_t)(data_page->page_addr)));
// Read the data directly onto the emulated page
if (saved_pte.present == 1) {
struct saved_page * saved_data_page = V3_Malloc(sizeof(struct saved_page));
saved_data_page->pte = saved_pte;
- saved_data_page->va = PT32_PAGE_ADDR(read_gva);
+ saved_data_page->va = PAGE_ADDR(read_gva);
list_add(&(saved_data_page->page_list), &(info->emulator.saved_pages));
info->emulator.num_saved_pages++;
int ret;
struct write_region * write_op = V3_Malloc(sizeof(struct write_region ));
struct emulated_page * data_page = V3_Malloc(sizeof(struct emulated_page));
- addr_t data_addr_offset = PT32_PAGE_OFFSET(write_gva);
+ addr_t data_addr_offset = PAGE_OFFSET(write_gva);
pte32_t saved_pte;
int i;
*/
data_page->page_addr = get_new_page();
- data_page->va = PT32_PAGE_ADDR(write_gva);
+ data_page->va = PAGE_ADDR(write_gva);
data_page->pte.present = 1;
data_page->pte.writable = 1;
data_page->pte.user_page = 1;
- data_page->pte.page_base_addr = PT32_BASE_ADDR((addr_t)V3_PAddr((void *)(addr_t)(data_page->page_addr)));
+ data_page->pte.page_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr((void *)(addr_t)(data_page->page_addr)));
if (saved_pte.present == 1) {
struct saved_page * saved_data_page = V3_Malloc(sizeof(struct saved_page));
saved_data_page->pte = saved_pte;
- saved_data_page->va = PT32_PAGE_ADDR(write_gva);
+ saved_data_page->va = PAGE_ADDR(write_gva);
list_add(&(saved_data_page->page_list), &(info->emulator.saved_pages));
info->emulator.num_saved_pages++;
int translate_guest_pt_32(struct guest_info * info, addr_t guest_cr3, addr_t vaddr, addr_t * paddr) {
addr_t guest_pde_pa = CR3_TO_PDE32_PA((void *)guest_cr3);
pde32_t * guest_pde = 0;
+ addr_t guest_pte_pa = 0;
if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t*)&guest_pde) == -1) {
- PrintError("In GVA->GPA: Invalid GPA(%p)->HVA PDE32 lookup\n",
- (void *)guest_pde);
+ PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
+ (void *)guest_pde_pa);
return -1;
}
+ switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
+ case PDE32_ENTRY_NOT_PRESENT:
+ *paddr = 0;
+ return -1;
+ case PDE32_ENTRY_LARGE_PAGE:
+ *paddr = guest_pte_pa;
+ return 0;
+ case PDE32_ENTRY_PTE32:
+ {
+ pte32_t * guest_pte;
+ if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
+ PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
+ (void *)guest_pte_pa);
+ return -1;
+ }
- return -1;
+ if (pte32_lookup(guest_pte, vaddr, paddr) == -1) {
+ return -1;
+ }
+ }
+ }
+
+ return 0;
}
int translate_host_pt_32(addr_t host_cr3, addr_t vaddr, addr_t * paddr) {
+ pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA((void *)host_cr3);
+ pte32_t * host_pte = 0;
+
+ switch (pde32_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
+ case PDE32_ENTRY_NOT_PRESENT:
+ *paddr = 0;
+ return -1;
+ case PDE32_ENTRY_LARGE_PAGE:
+ *paddr = (addr_t)host_pte;
+ return 0;
+ case PDE32_ENTRY_PTE32:
+ if (pte32_lookup(host_pte, vaddr, paddr) == -1) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+int translate_host_pt_32pae(addr_t host_cr3, addr_t vaddr, addr_t * paddr) {
+ pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA((void *)host_cr3);
+ pte32_t * host_pte = 0;
+
+ switch (pde32_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
+ case PDE32_ENTRY_NOT_PRESENT:
+ *paddr = 0;
+ return -1;
+ case PDE32_ENTRY_LARGE_PAGE:
+ *paddr = (addr_t)host_pte;
+ return 0;
+ case PDE32_ENTRY_PTE32:
+ if (pte32_lookup(host_pte, vaddr, paddr) == -1) {
+ return -1;
+ }
+ }
+
+ return -1;
+}
+
+
+int translate_host_pt_64(addr_t host_cr3, addr_t vaddr, addr_t * paddr) {
+ pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA((void *)host_cr3);
+ pte32_t * host_pte = 0;
+
+ switch (pde32_lookup(host_pde, vaddr, (addr_t *)&host_pte)) {
+ case PDE32_ENTRY_NOT_PRESENT:
+ *paddr = 0;
+ return -1;
+ case PDE32_ENTRY_LARGE_PAGE:
+ *paddr = (addr_t)host_pte;
+ return 0;
+ case PDE32_ENTRY_PTE32:
+ if (pte32_lookup(host_pte, vaddr, paddr) == -1) {
+ return -1;
+ }
+ }
return -1;
}
* The entry addresses could be pointing to either guest physical memory or host physical memory
* Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
*/
+/* The value of entry is a return type:
+ * Page not present: *entry = 0
+ * Large Page: *entry = translated physical address (byte granularity)
+ * PTE entry: *entry is the address of the PTE Page
+ */
pde32_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
if (pde_entry->large_page) {
pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
- *entry = PDE32_4MB_T_ADDR(*large_pde);
- *entry += PD32_4MB_PAGE_OFFSET(addr);
+ *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
+ *entry += PAGE_OFFSET_4MB(addr);
return PDE32_ENTRY_LARGE_PAGE;
} else {
- *entry = PDE32_T_ADDR(*pde_entry);
+ *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
return PDE32_ENTRY_PTE32;
}
}
if (!pte_entry->present) {
*entry = 0;
- PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
+ // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
+ return -1;
+ } else {
+ *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr) + PAGE_OFFSET(addr);
+ return 0;
+ }
+
+ return -1;
+}
+
+
+int pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
+ pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
+
+ if (!pdpe_entry->present) {
+ *entry = 0;
return -1;
} else {
- *entry = PTE32_T_ADDR(*pte_entry) + PT32_PAGE_OFFSET(addr);
+ *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr) + PAGE_OFFSET(addr);
return 0;
}
pde[i].large_page = 0;
pde[i].global_page = 0;
pde[i].vmm_info = 0;
- pde[i].pt_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pte));
+ pde[i].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
}
}
pde[j].large_page = 0;
pde[j].global_page = 0;
pde[j].vmm_info = 0;
- pde[j].pt_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pte));
+ pde[j].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
pde[j].rsvd = 0;
pde_present = 1;
pdpe[i].avail = 0;
pdpe[i].rsvd2 = 0;
pdpe[i].vmm_info = 0;
- pdpe[i].pd_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pde));
+ pdpe[i].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
pdpe[i].rsvd3 = 0;
}
return NULL;
}
- pte[m].page_base_addr = PTE64_BASE_ADDR(host_addr);
+ pte[m].page_base_addr = PAGE_BASE_ADDR(host_addr);
//PrintPTE64(current_page_addr, &(pte[m]));
pde[k].large_page = 0;
//pde[k].global_page = 0;
pde[k].vmm_info = 0;
- pde[k].pt_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pte));
+ pde[k].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
pde_present = 1;
}
pdpe[j].large_page = 0;
//pdpe[j].global_page = 0;
pdpe[j].vmm_info = 0;
- pdpe[j].pd_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pde));
+ pdpe[j].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
pdpe_present = 1;
//pml[i].large_page = 0;
//pml[i].global_page = 0;
pml[i].vmm_info = 0;
- pml[i].pdp_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pdpe));
+ pml[i].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe));
}
}
if ((tmp_pde[i].present) && (tmp_pde[i].large_page == 0)) {
addr_t pte_host_addr;
- if (guest_pa_to_host_va(info, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), &pte_host_addr) == -1) {
+ if (guest_pa_to_host_va(info, (addr_t)(BASE_TO_PAGE_ADDR(tmp_pde[i].pt_base_addr)), &pte_host_addr) == -1) {
PrintError("Could not lookup host address of guest PDE\n");
return -1;
}
- add_pte_map(pte_cache, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), pte_host_addr);
+ add_pte_map(pte_cache, (addr_t)(BASE_TO_PAGE_ADDR(tmp_pde[i].pt_base_addr)), pte_host_addr);
}
}
pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(location)]);
if (shadow_pde->large_page == 0) {
- pte32_t * shadow_pt = (pte32_t *)(addr_t)PDE32_T_ADDR((*shadow_pde));
+ pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr);
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(location)]);
//if (shadow_pte->present == 1) {
shadow_pt = v3_create_new_shadow_pt();
- shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PD32_BASE_ADDR(shadow_pt));
+ shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PAGE_BASE_ADDR(shadow_pt));
PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pdt_base_addr );
} else {
PrintDebug("Reusing cached shadow Page table\n");
guest_pde->accessed = 1;
- shadow_pde->pt_base_addr = PD32_BASE_ADDR((addr_t)V3_PAddr(shadow_pt));
+ shadow_pde->pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(shadow_pt));
if (guest_pde->large_page == 0) {
shadow_pde->writable = guest_pde->writable;
//
// PTE fault
//
- pte32_t * shadow_pt = (pte32_t *)V3_VAddr( (void*)(addr_t) PDE32_T_ADDR(*shadow_pde) );
+ pte32_t * shadow_pt = (pte32_t *)V3_VAddr( (void*)(addr_t) BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr) );
if (guest_pde->large_page == 0) {
pte32_t * guest_pt = NULL;
- if (guest_pa_to_host_va(info, PDE32_T_ADDR((*guest_pde)), (addr_t*)&guest_pt) == -1) {
+ if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
// Machine check the guest
- PrintDebug("Invalid Guest PTE Address: 0x%x\n", PDE32_T_ADDR((*guest_pde)));
+ PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
v3_raise_exception(info, MC_EXCEPTION);
return 0;
}
if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) {
// Get the guest physical address of the fault
- addr_t guest_fault_pa = PDE32_4MB_T_ADDR(*large_guest_pde) + PD32_4MB_PAGE_OFFSET(fault_addr);
+ addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);
host_region_type_t host_page_type = get_shadow_addr_type(info, guest_fault_pa);
struct shadow_page_state * state = &(info->shdw_pg_state);
addr_t shadow_pa = get_shadow_addr(info, guest_fault_pa);
- shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa);
+ shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
shadow_pte->present = 1;
*/
shadow_pte->user_page = 1;
- if (find_pte_map(state->cached_ptes, PT32_PAGE_ADDR(guest_fault_pa)) != NULL) {
+ if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) {
// Check if the entry is a page table...
PrintDebug("Marking page as Guest Page Table (large page)\n");
shadow_pte->vmm_info = PT32_GUEST_PT;
if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) {
- addr_t guest_pa = PTE32_T_ADDR((*guest_pte)) + PT32_PAGE_OFFSET(fault_addr);
+ addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
// Page Table Entry Not Present
PrintDebug("guest_pa =%p\n", (void *)guest_pa);
struct shadow_page_state * state = &(info->shdw_pg_state);
addr_t shadow_pa = get_shadow_addr(info, guest_pa);
- shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa);
+ shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
shadow_pte->present = guest_pte->present;
shadow_pte->user_page = guest_pte->user_page;
guest_pte->accessed = 1;
- if (find_pte_map(state->cached_ptes, PT32_PAGE_ADDR(guest_pa)) != NULL) {
+ if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) {
// Check if the entry is a page table...
PrintDebug("Marking page as Guest Page Table %d\n", shadow_pte->writable);
shadow_pte->vmm_info = PT32_GUEST_PT;
PrintDebug("Invalidating Large Page\n");
} else
if (shadow_pde->present == 1) {
- pte32_t * shadow_pt = (pte32_t *)(addr_t)PDE32_T_ADDR((*shadow_pde));
+ pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr);
pte32_t * shadow_pte = (pte32_t *) V3_VAddr( (void*) &shadow_pt[PTE32_INDEX(first_operand)] );
#ifdef DEBUG_SHADOW_PAGING