void delete_page_tables_pde32(vmm_pde_t * pde) {
int i, j;
- if (pde==NULL) {
- return ;
+ if (pde == NULL) {
+ return;
}
for (i = 0; (i < MAX_PAGE_DIR_ENTRIES); i++) {
vmm_pte_t * pte = (vmm_pte_t *)(pde[i].pt_base_addr << PAGE_POWER);
for (j = 0; (j < MAX_PAGE_TABLE_ENTRIES); j++) {
- if ((pte[j].present) && (pte[j].vmm_info & GUEST_PAGE)){
- os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
+ if ((pte[j].present)) {
+ os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
}
}
}
-int init_shadow_paging_state(shadow_paging_state_t *state)
-{
- state->guest_page_directory_type=state->shadow_page_directory_type=PDE32;
+int init_shadow_page_state(shadow_page_state_t * state) {
+ state->guest_mode = PDE32;
+ state->shadow_mode = PDE32;
- state->guest_page_directory=state->shadow_page_directory=NULL;
+ state->guest_cr3.r_reg = 0;
+ state->shadow_cr3.r_reg = 0;
- init_shadow_map(&(state->shadow_map));
return 0;
}
-int wholesale_update_shadow_paging_state(shadow_paging_state_t *state)
-{
+int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map) {
unsigned i, j;
- vmm_pde_t *cur_guest_pde, *cur_shadow_pde;
- vmm_pte_t *cur_guest_pte, *cur_shadow_pte;
+ vmm_pde_t * guest_pde;
+ vmm_pde_t * shadow_pde;
+
// For now, we'll only work with PDE32
- if (state->guest_page_directory_type!=PDE32) {
+ if (state->guest_mode != PDE32) {
return -1;
}
+
+
- cur_shadow_pde=(vmm_pde_t*)(state->shadow_page_directory);
-
- cur_guest_pde = (vmm_pde_t*)(os_hooks->physical_to_virtual(state->guest_page_directory));
+ shadow_pde = (vmm_pde_t *)(CR3_TO_PDE(state->shadow_cr3.e_reg.low));
+ guest_pde = (vmm_pde_t *)(os_hooks->paddr_to_vaddr((void*)CR3_TO_PDE(state->guest_cr3.e_reg.low)));
// Delete the current page table
- delete_page_tables_pde32(cur_shadow_pde);
+ delete_page_tables_pde32(shadow_pde);
+
+ shadow_pde = os_hooks->allocate_pages(1);
- cur_shadow_pde = os_hooks->allocate_pages(1);
- state->shadow_page_directory = cur_shadow_pde;
- state->shadow_page_directory_type=PDE32;
+ state->shadow_cr3.e_reg.low = (addr_t)shadow_pde;
+
+ state->shadow_mode = PDE32;
- for (i=0;i<MAX_PAGE_DIR_ENTRIES;i++) {
- cur_shadow_pde[i] = cur_guest_pde[i];
+ for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
+ shadow_pde[i] = guest_pde[i];
+
// The shadow can be identical to the guest if it's not present
- if (!cur_shadow_pde[i].present) {
+ if (!shadow_pde[i].present) {
continue;
}
- if (cur_shadow_pde[i].large_pages) {
+
+ if (shadow_pde[i].large_pages) {
// large page - just map it through shadow map to generate its physical location
- addr_t guest_addr = PAGE_ADDR(cur_shadow_pde[i].pt_base_addr);
+ addr_t guest_addr = PAGE_ADDR(shadow_pde[i].pt_base_addr);
addr_t host_addr;
- shadow_map_entry_t *ent;
+ shadow_region_t * ent;
- ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
+ ent = get_shadow_region_by_addr(mem_map, guest_addr);
if (!ent) {
// FIXME Panic here - guest is trying to map to physical memory
case HOST_REGION_PHYSICAL_MEMORY:
// points into currently allocated physical memory, so we just
// set up the shadow to point to the mapped location
- if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) {
+ if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) {
// Panic here
return -1;
}
- cur_shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
+
+ shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
// FIXME set vmm_info bits here
break;
case HOST_REGION_UNALLOCATED:
// points to physical memory that is *allowed* but that we
// have not yet allocated. We mark as not present and set a
// bit to remind us to allocate it later
- cur_shadow_pde[i].present=0;
+ shadow_pde[i].present = 0;
// FIXME Set vminfo bits here so that we know that we will be
// allocating it later
break;
// points to physical memory that is NOT ALLOWED.
// We will mark it as not present and set a bit to remind
// us that it's bad later and insert a GPF then
- cur_shadow_pde[i].present=0;
+ shadow_pde[i].present = 0;
break;
case HOST_REGION_MEMORY_MAPPED_DEVICE:
case HOST_REGION_REMOTE:
break;
}
} else {
- addr_t host_addr;
+ vmm_pte_t * guest_pte;
+ vmm_pte_t * shadow_pte;
addr_t guest_addr;
+ addr_t guest_pte_host_addr;
+ shadow_region_t * ent;
// small page - set PDE and follow down to the child table
- cur_shadow_pde[i] = cur_guest_pde[i];
-
- // Allocate a new second level page table for the shadow
- cur_shadow_pte = os_hooks->allocate_pages(1);
+ shadow_pde[i] = guest_pde[i];
- // make our first level page table in teh shadow point to it
- cur_shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(cur_shadow_pte);
+ guest_addr = PAGE_ADDR(guest_pde[i].pt_base_addr);
- shadow_map_entry_t *ent;
-
- guest_addr=PAGE_ADDR(cur_guest_pde[i].pt_base_addr);
+ // Allocate a new second level page table for the shadow
+ shadow_pte = os_hooks->allocate_pages(1);
- ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
+ // make our first level page table in the shadow point to it
+ shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(shadow_pte);
- if (!ent) {
+ ent = get_shadow_region_by_addr(mem_map, guest_addr);
+
+
+ /* JRL: This is bad.... */
+ // For now the guest Page Table must always be mapped to host physical memory
+ /* If we swap out a page table or if it isn't present for some reason, this turns real ugly */
+
+ if ((!ent) || (ent->host_type != HOST_REGION_PHYSICAL_MEMORY)) {
// FIXME Panic here - guest is trying to map to physical memory
// it does not own in any way!
return -1;
}
// Address of the relevant second level page table in the guest
- if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) {
+ if (guest_paddr_to_host_paddr(ent, guest_addr, &guest_pte_host_addr)) {
// Panic here
return -1;
}
- // host_addr now contains the host physical address for the guest's 2nd level page table
+
+ // host_addr now contains the host physical address for the guest's 2nd level page table
// Now we transform it to relevant virtual address
- cur_guest_pte = os_hooks->physical_to_virtual((void*)host_addr);
+ guest_pte = os_hooks->paddr_to_vaddr((void *)guest_pte_host_addr);
// Now we walk through the second level guest page table
// and clone it into the shadow
- for (j=0;j<MAX_PAGE_TABLE_ENTRIES;j++) {
- cur_shadow_pte[j] = cur_guest_pte[j];
+ for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
+ shadow_pte[j] = guest_pte[j];
- addr_t guest_addr = PAGE_ADDR(cur_shadow_pte[j].page_base_addr);
+ addr_t guest_addr = PAGE_ADDR(shadow_pte[j].page_base_addr);
- shadow_map_entry_t *ent;
+ shadow_region_t * ent;
- ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
+ ent = get_shadow_region_by_addr(mem_map, guest_addr);
if (!ent) {
// FIXME Panic here - guest is trying to map to physical memory
switch (ent->host_type) {
case HOST_REGION_PHYSICAL_MEMORY:
- // points into currently allocated physical memory, so we just
- // set up the shadow to point to the mapped location
- if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) {
- // Panic here
- return -1;
+ {
+ addr_t host_addr;
+
+ // points into currently allocated physical memory, so we just
+ // set up the shadow to point to the mapped location
+ if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) {
+ // Panic here
+ return -1;
+ }
+
+ shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
+ // FIXME set vmm_info bits here
+ break;
}
- cur_shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
- // FIXME set vmm_info bits here
- break;
case HOST_REGION_UNALLOCATED:
// points to physical memory that is *allowed* but that we
// have not yet allocated. We mark as not present and set a
// bit to remind us to allocate it later
- cur_shadow_pte[j].present=0;
+ shadow_pte[j].present = 0;
// FIXME Set vminfo bits here so that we know that we will be
// allocating it later
break;
// points to physical memory that is NOT ALLOWED.
// We will mark it as not present and set a bit to remind
// us that it's bad later and insert a GPF then
- cur_shadow_pte[j].present=0;
+ shadow_pte[j].present = 0;
break;
case HOST_REGION_MEMORY_MAPPED_DEVICE:
case HOST_REGION_REMOTE:
-#if 0
+
/* We generate a page table to correspond to a given memory layout
* pulling pages from the mem_list when necessary
* If there are any gaps in the layout, we add them as unmapped pages
*/
-vmm_pde_t * generate_guest_page_tables(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
+vmm_pde_t * create_passthrough_pde32_pts(shadow_map_t * map) {
ullong_t current_page_addr = 0;
- uint_t layout_index = 0;
- uint_t list_index = 0;
- ullong_t layout_addr = 0;
int i, j;
- uint_t num_entries = layout->num_pages; // The number of pages left in the layout
-
-
vmm_pde_t * pde = os_hooks->allocate_pages(1);
for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
- if (num_entries == 0) {
+ int pte_present = 0;
+ vmm_pte_t * pte = os_hooks->allocate_pages(1);
+
+
+ for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
+ shadow_region_t * region = get_shadow_region_by_addr(map, current_page_addr);
+
+ if (!region ||
+ (region->host_type == HOST_REGION_NOTHING) ||
+ (region->host_type == HOST_REGION_UNALLOCATED) ||
+ (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
+ (region->host_type == HOST_REGION_REMOTE) ||
+ (region->host_type == HOST_REGION_SWAPPED)) {
+ pte[j].present = 0;
+ pte[j].flags = 0;
+ pte[j].accessed = 0;
+ pte[j].dirty = 0;
+ pte[j].pte_attr = 0;
+ pte[j].global_page = 0;
+ pte[j].vmm_info = 0;
+ pte[j].page_base_addr = 0;
+ } else {
+ addr_t host_addr;
+ pte[j].present = 1;
+ pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
+
+ pte[j].accessed = 0;
+ pte[j].dirty = 0;
+ pte[j].pte_attr = 0;
+ pte[j].global_page = 0;
+ pte[j].vmm_info = 0;
+
+ if (guest_paddr_to_host_paddr(region, current_page_addr, &host_addr) == -1) {
+ // BIG ERROR
+ // PANIC
+ return NULL;
+ }
+
+ pte[j].page_base_addr = host_addr >> 12;
+
+ pte_present = 1;
+ }
+
+ current_page_addr += PAGE_SIZE;
+ }
+
+ if (pte_present == 0) {
+ os_hooks->free_page(pte);
+
pde[i].present = 0;
pde[i].flags = 0;
pde[i].accessed = 0;
pde[i].vmm_info = 0;
pde[i].pt_base_addr = 0;
} else {
- vmm_pte_t * pte = os_hooks->allocate_pages(1);
-
pde[i].present = 1;
pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
pde[i].accessed = 0;
pde[i].large_pages = 0;
pde[i].global_page = 0;
pde[i].vmm_info = 0;
- pde[i].pt_base_addr = PAGE_ALLIGNED_ADDR(pte);
-
-
-
- for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
- layout_addr = get_mem_layout_addr(layout, layout_index);
-
- if ((current_page_addr < layout_addr) || (num_entries == 0)) {
- // We have a gap in the layout, fill with unmapped page
- pte[j].present = 0;
- pte[j].flags = 0;
- pte[j].accessed = 0;
- pte[j].dirty = 0;
- pte[j].pte_attr = 0;
- pte[j].global_page = 0;
- pte[j].vmm_info = 0;
- pte[j].page_base_addr = 0;
-
- current_page_addr += PAGE_SIZE;
- } else if (current_page_addr == layout_addr) {
- // Set up the Table entry to map correctly to the layout region
- layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
-
- if (page_region->type == UNMAPPED) {
- pte[j].present = 0;
- pte[j].flags = 0;
- } else {
- pte[j].present = 1;
- pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
- }
-
- pte[j].accessed = 0;
- pte[j].dirty = 0;
- pte[j].pte_attr = 0;
- pte[j].global_page = 0;
- pte[j].vmm_info = 0;
-
- if (page_region->type == UNMAPPED) {
- pte[j].page_base_addr = 0;
- } else if (page_region->type == SHARED) {
- addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
-
- pte[j].page_base_addr = host_addr >> 12;
- pte[j].vmm_info = SHARED_PAGE;
- } else if (page_region->type == GUEST) {
- addr_t list_addr = get_mem_list_addr(list, list_index++);
-
- if (list_addr == -1) {
- // error
- // cleanup...
- free_guest_page_tables(pde);
- return NULL;
- }
- PrintDebug("Adding guest page (%x)\n", list_addr);
- pte[j].page_base_addr = list_addr >> 12;
-
- // Reset this when we move over to dynamic page allocation
- // pte[j].vmm_info = GUEST_PAGE;
- pte[j].vmm_info = SHARED_PAGE;
- }
-
- num_entries--;
- current_page_addr += PAGE_SIZE;
- layout_index++;
- } else {
- // error
- PrintDebug("Error creating page table...\n");
- // cleanup
- free_guest_page_tables(pde);
- return NULL;
- }
- }
+ pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(pte);
}
+
}
return pde;
}
-#endif
+