#define MAX_PTE64_ENTRIES 512
+typedef enum {PAGE_4KB, PAGE_2MB, PAGE_4MB, PAGE_1GB,
+ PAGE_PT32, PAGE_PD32,
+ PAGE_PDP32PAE, PAGE_PD32PAE, PAGE_PT32PAE,
+ PAGE_PML464, PAGE_PDP64, PAGE_PD64, PAGE_PT64} page_type_t;
+
/* Converts an address into a page table index */
#define PDE32_INDEX(x) ((((uint_t)x) >> 22) & 0x3ff)
#define PAGE_BASE_ADDR(x) ((x) >> 12)
#define PAGE_BASE_ADDR_2MB(x) ((x) >> 21)
#define PAGE_BASE_ADDR_4MB(x) ((x) >> 22)
+#define PAGE_BASE_ADDR_1GB(x) ((x) >> 30)
#define BASE_TO_PAGE_ADDR(x) (((addr_t)x) << 12)
#define BASE_TO_PAGE_ADDR_2MB(x) (((addr_t)x) << 21)
#define BASE_TO_PAGE_ADDR_4MB(x) (((addr_t)x) << 22)
+#define BASE_TO_PAGE_ADDR_1GB(x) (((addr_t)x) << 30)
/* *** */
/* Deprecated */
pf_error_t access_type, pt_access_status_t * access_status);
+
+int v3_walk_host_pt_32(v3_reg_t host_cr3,
+ int (*callback)(int level, addr_t page_va, addr_t page_pa, void private_data),
+ void * private_data);
+
+int v3_walk_host_pt_32pae(v3_reg_t host_cr3,
+ void (*callback)(page_type_t type, addr_t page_va, addr_t page_pa, void * private_data),
+ void * private_data);
+
+int v3_walk_host_pt_64(v3_reg_t host_cr3,
+ void (*callback)(page_type_t type, addr_t page_va, addr_t page_pa, void * private_data),
+ void * private_data);
+
struct guest_info;
pde32_t * create_passthrough_pts_32(struct guest_info * guest_info);
* External visibility not needed
*/
addr_t v3_create_new_shadow_pt();
-int v3_cache_page_tables32(struct guest_info * info, addr_t pde);
int v3_replace_shdw_page32(struct guest_info * info, addr_t location, pte32_t * new_page, pte32_t * old_page);
/* *** */
+
+
+
+
/* We generate a page table to correspond to a given memory layout
* pulling pages from the mem_list when necessary
* If there are any gaps in the layout, we add them as unmapped pages
}
+int v3_walk_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3,
+ int (*callback)(int level, addr_t page_va, addr_t page_pa, void private_data),
+ void * private_data) {
+
+
+}
+
+
+int v3_walk_host_pt_32(v3_reg_t host_cr3,
+ int (*callback)(int level, addr_t page_va, addr_t page_pa, void private_data),
+ void * private_data) {
+ pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
+ addr_t pde_pa = CR3_TO_PDE32_PA(host_cr3);
+ int i, j;
+
+ if (!callback) {
+ PrintError("Call back was not specified\n");
+ return -1;
+ }
+
+ callback(PAGE_PD32, host_pde, pde_pa, private_data);
+
+ for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
+ if (host_pde[i].present) {
+ if (host_pde[i].lage_page) {
+ pde32_4MB_t * large_pde = (pde32_4MB_t *)&(host_pde[i]);
+ addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
+
+ callback(PAGE_4MB, V3_VAddr(large_page_pa), large_page_pa, private_data);
+ } else {
+ addr_t pte_pa = BASE_TO_PAGE_ADDR(host_pde[i].pt_base_addr);
+ pte32_t * tmp_pte = (pte32_t *)V3_VAddr(pte_pa);
+
+ callback(PAGE_PT32, tmp_pte, pte_pa, private_data);
+
+ for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
+ if (tmp_pte[j].present) {
+ addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
+ callback(PAGE_4KB, V3_VAddr(page_pa), page_pa, private_data);
+ }
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+
+
+
+
+int v3_walk_host_pt_32pae(v3_reg_t host_cr3,
+ void (*callback)(page_type_t type, addr_t page_va, addr_t page_pa, void * private_data),
+ void * private_data) {
+ pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
+ addr_t pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
+ int i, j, k;
+
+ if (!callback) {
+ PrintError("Callback was not specified\n");
+ return -1;
+ }
+
+ callback(PAGE_PDP32PAE, host_pdpe, pdpe_pa, private_data);
+
+ for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
+ if (host_pdpe[i].present) {
+ addr_t pde_pa = BASE_TO_PAGE_ADDR(host_pdpe[i].pd_base_addr);
+ pde32pae_t * tmp_pde = (pde32pae_t *)V3_VAddr(pde_pa);
+
+ callback(PAGE_PD32PAE, tmp_pde, pde_pa, private_data);
+
+ for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
+ if (tmp_pde[j].present) {
+
+ if (tmp_pde[j].large_page) {
+ pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
+ addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
+
+ callback(PAGE_2MB, V3_VAddr(lage_page_pa), lage_page_pa, private_data);
+ } else {
+ addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
+ pte32pae_t * tmp_pte = (pte32pae_t *)V3_VAddr(pte_pa);
+
+ callback(PAGE_PT32PAE, tmp_pte, pte_pa, private_data);
+
+ for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
+ if (tmp_pte[k].present) {
+ addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
+ callback(PAGE_4KB, V3_VAddr(page_pa), page_pa, private_data);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+
+int v3_walk_host_pt_64(v3_reg_t host_cr3,
+ void (*callback)(page_type_t type, addr_t page_va, addr_t page_pa, void * private_data),
+ void * private_data) {
+ pml4e64_t * host_pml = (pml3e64_t *)CR3_TO_PML4E64_VA(host_cr3);
+ addr_t pml_pa = CR3_TO_PML4E64_PA(host_cr3);
+ int i, j, k, m;
+
+ if (!callback) {
+ PrintError("Callback was not specified\n");
+ return -1;
+ }
+
+ callback(PAGE_PML464, host_pml, pml_pa, private_data);
+
+ for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
+ if (host_pml[i].present) {
+ addr_t pdpe_pa = BASE_TO_PAGE_ADDR(host_pml[i].pdp_base_addr);
+ pdpe64_t * tmp_pdpe = (pdpe64_t *)V3_VAddr(pdpe_pa);
+
+ callback(PAGE_PDP64, tmp_pdpe, pdpe_pa, private_data);
+
+ for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
+ if (tmp_pdpe[j].present) {
+ if (tmp_pdpe[j].large_page) {
+ pdpe64_1GB_t * large_pdp = (pdpe64_t *)&(tmp_pdpe[j]);
+ addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_addr);
+
+ callback(PAGE_1GB, V3_VAddr(large_page_pa), large_page_pa, private_data);
+ } else {
+ addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
+ pde64_t * tmp_pde = (pde64_t *)V3_VAddr(pde_pa);
+
+ callback(PAGE_PD64, tmp_pde, pde_pa, private_data);
+
+ for (k = 0; k < MAX_PDE64_ENRIES; k++) {
+ if (tmp_pde[k].present) {
+ if (tmp_pde[k].large_page) {
+ pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
+ addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_addr);
+
+ callback(PAGE_2MB, V3_VAddr(large_page_pa), large_page_pa, private_data);
+ } else {
+ addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
+ pte64_t * tmp_pte = (pte64_t *)V3_VAddr(pte_pa);
+
+ callback(PAGE_PT64, tmp_pte, pte_pa, private_data);
+
+ for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
+ if (tmp_pte[m].present) {
+ addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
+ callback(PAGE_4KB, V3_VAddr(page_pa), page_pa, private_data);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return 0;
+}
static int handle_shadow_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
static int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
+
+static int cache_page_tables_32(struct guest_info * info, addr_t pde);
+
int v3_init_shadow_page_state(struct guest_info * info) {
struct shadow_page_state * state = &(info->shdw_pg_state);
int v3_cache_page_tables(struct guest_info * info, addr_t cr3) {
switch(v3_get_cpu_mode(info)) {
case PROTECTED:
- return v3_cache_page_tables32(info, CR3_TO_PDE32_PA(cr3));
+ return cache_page_tables_32(info, CR3_TO_PDE32_PA(cr3));
default:
return -1;
}
}
-int v3_cache_page_tables32(struct guest_info * info, addr_t pde) {
+static int cache_page_tables_32(struct guest_info * info, addr_t pde) {
struct shadow_page_state * state = &(info->shdw_pg_state);
addr_t pde_host_addr;
pde32_t * tmp_pde;
} else {
// currently unhandled
+ PrintError("Replacing large shadow pages not implemented\n");
return -1;
}
// We assume that shdw_pg_state.guest_cr3 is pointing to the page tables we want to activate
// We also assume that the CPU mode has not changed during this page table transition
static int activate_shadow_pt_32(struct guest_info * info) {
- struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3);
- struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
- int cached = 0;
-
- // Check if shadow page tables are in the cache
- cached = v3_cache_page_tables32(info, CR3_TO_PDE32_PA(*(addr_t *)guest_cr3));
+ struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3);
+ struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
+ int cached = 0;
+
+ // Check if shadow page tables are in the cache
+ cached = cache_page_tables_32(info, CR3_TO_PDE32_PA(*(addr_t *)guest_cr3));
+
+ if (cached == -1) {
+ PrintError("CR3 Cache failed\n");
+ return -1;
+ } else if (cached == 0) {
+ addr_t shadow_pt;
- if (cached == -1) {
- PrintError("CR3 Cache failed\n");
- return -1;
- } else if (cached == 0) {
- addr_t shadow_pt;
-
- PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 );
- delete_page_tables_32(CR3_TO_PDE32_VA(*(uint_t*)shadow_cr3));
-
- shadow_pt = v3_create_new_shadow_pt();
-
- shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PAGE_BASE_ADDR(shadow_pt));
- PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pdt_base_addr );
- } else {
- PrintDebug("Reusing cached shadow Page table\n");
- }
-
- shadow_cr3->pwt = guest_cr3->pwt;
- shadow_cr3->pcd = guest_cr3->pcd;
-
- return 0;
+ PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 );
+ delete_page_tables_32(CR3_TO_PDE32_VA(*(uint_t*)shadow_cr3));
+
+ shadow_pt = v3_create_new_shadow_pt();
+
+ shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PAGE_BASE_ADDR(shadow_pt));
+ PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pdt_base_addr );
+ } else {
+ PrintDebug("Reusing cached shadow Page table\n");
+ }
+
+ shadow_cr3->pwt = guest_cr3->pwt;
+ shadow_cr3->pcd = guest_cr3->pcd;
+
+ return 0;
}
static int activate_shadow_pt_32pae(struct guest_info * info) {
}
static int activate_shadow_pt_64(struct guest_info * info) {
- PrintError("Activating 64 bit page tables not implemented\n");
+ struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
+ struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
+ int cached = 0;
+
return -1;
+ // Check if shadow page tables are in the cache
+ cached = cache_page_tables_64(info, CR3_TO_PDE32_PA(*(addr_t *)guest_cr3));
+
+ if (cached == -1) {
+ PrintError("CR3 Cache failed\n");
+ return -1;
+ } else if (cached == 0) {
+ addr_t shadow_pt;
+
+ PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 );
+ delete_page_tables_32(CR3_TO_PDE32_VA(*(uint_t*)shadow_cr3));
+
+ shadow_pt = v3_create_new_shadow_pt();
+
+ shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PAGE_BASE_ADDR(shadow_pt));
+ PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pdt_base_addr );
+ } else {
+ PrintDebug("Reusing cached shadow Page table\n");
+ }
+
+ shadow_cr3->pwt = guest_cr3->pwt;
+ shadow_cr3->pcd = guest_cr3->pcd;
+
+ return 0;
}
*/
static int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
+ pt_access_status_t guest_access;
+ pt_access_status_t shadow_access;
+ int ret;
+ PrintDebug("64 bit shadow page fault\n");
+
+ ret = v3_check_guest_pt_32(info, info->shdw_pg_state.guest_cr3, fault_addr, error_code, &guest_access);
+
+ PrintDebug("Guest Access Check: %d (access=%d)\n", ret, guest_access);
+
+ ret = v3_check_host_pt_32(info->ctrl_regs.cr3, fault_addr, error_code, &shadow_access);
+
+ PrintDebug("Shadow Access Check: %d (access=%d)\n", ret, shadow_access);
+
+
PrintError("64 bit shadow paging not implemented\n");
return -1;
}