#include <geekos/vmm.h>
+#include <geekos/vm_guest_mem.h>
extern struct vmm_os_hooks * os_hooks;
+void delete_page_tables_pde32(pde32_t * pde) {
+ int i, j;
+
+ if (pde == NULL) {
+ return;
+ }
+
+ for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
+ if (pde[i].present) {
+ pte32_t * pte = (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER);
+
+ for (j = 0; (j < MAX_PTE32_ENTRIES); j++) {
+ if ((pte[j].present)) {
+ os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
+ }
+ }
+
+ os_hooks->free_page(pte);
+ }
+ }
+
+ os_hooks->free_page(pde);
+}
* pulling pages from the mem_list when necessary
* If there are any gaps in the layout, we add them as unmapped pages
*/
-vmm_pde_t * generate_guest_page_tables(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
+pde32_t * create_passthrough_pde32_pts(guest_info_t * guest_info) {
ullong_t current_page_addr = 0;
- uint_t layout_index = 0;
- uint_t list_index = 0;
- ullong_t layout_addr = 0;
int i, j;
- uint_t num_entries = layout->num_pages; // The number of pages left in the layout
+ shadow_map_t * map = guest_info->mem_map;
-
+ pde32_t * pde = os_hooks->allocate_pages(1);
- vmm_pde_t * pde = os_hooks->allocate_pages(1);
+ for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
+ int pte_present = 0;
+ pte32_t * pte = os_hooks->allocate_pages(1);
+
+
+ for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
+ shadow_region_t * region = get_shadow_region_by_addr(map, current_page_addr);
+
+ if (!region ||
+ (region->host_type == HOST_REGION_NOTHING) ||
+ (region->host_type == HOST_REGION_UNALLOCATED) ||
+ (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
+ (region->host_type == HOST_REGION_REMOTE) ||
+ (region->host_type == HOST_REGION_SWAPPED)) {
+ pte[j].present = 0;
+ pte[j].flags = 0;
+ pte[j].accessed = 0;
+ pte[j].dirty = 0;
+ pte[j].pte_attr = 0;
+ pte[j].global_page = 0;
+ pte[j].vmm_info = 0;
+ pte[j].page_base_addr = 0;
+ } else {
+ addr_t host_addr;
+ pte[j].present = 1;
+ pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
+
+ pte[j].accessed = 0;
+ pte[j].dirty = 0;
+ pte[j].pte_attr = 0;
+ pte[j].global_page = 0;
+ pte[j].vmm_info = 0;
+
+ if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
+ // BIG ERROR
+ // PANIC
+ return NULL;
+ }
+
+ pte[j].page_base_addr = host_addr >> 12;
+
+ pte_present = 1;
+ }
+
+ current_page_addr += PAGE_SIZE;
+ }
+
+ if (pte_present == 0) {
+ os_hooks->free_page(pte);
- for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
- if (num_entries == 0) {
pde[i].present = 0;
pde[i].flags = 0;
pde[i].accessed = 0;
pde[i].vmm_info = 0;
pde[i].pt_base_addr = 0;
} else {
- vmm_pte_t * pte = os_hooks->allocate_pages(1);
-
pde[i].present = 1;
pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
pde[i].accessed = 0;
pde[i].large_pages = 0;
pde[i].global_page = 0;
pde[i].vmm_info = 0;
- pde[i].pt_base_addr = PAGE_ALLIGNED_ADDR(pte);
-
-
-
- for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
- layout_addr = get_mem_layout_addr(layout, layout_index);
-
- if ((current_page_addr < layout_addr) || (num_entries == 0)) {
- // We have a gap in the layout, fill with unmapped page
- pte[j].present = 0;
- pte[j].flags = 0;
- pte[j].accessed = 0;
- pte[j].dirty = 0;
- pte[j].pte_attr = 0;
- pte[j].global_page = 0;
- pte[j].vmm_info = 0;
- pte[j].page_base_addr = 0;
-
- current_page_addr += PAGE_SIZE;
- } else if (current_page_addr == layout_addr) {
- // Set up the Table entry to map correctly to the layout region
- layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
-
- if (page_region->type == UNMAPPED) {
- pte[j].present = 0;
- pte[j].flags = 0;
- } else {
- pte[j].present = 1;
- pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
- }
-
- pte[j].accessed = 0;
- pte[j].dirty = 0;
- pte[j].pte_attr = 0;
- pte[j].global_page = 0;
- pte[j].vmm_info = 0;
-
- if (page_region->type == UNMAPPED) {
- pte[j].page_base_addr = 0;
- } else if (page_region->type == SHARED) {
- addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
-
- pte[j].page_base_addr = host_addr >> 12;
- pte[j].vmm_info = SHARED_PAGE;
- } else if (page_region->type == GUEST) {
- addr_t list_addr = get_mem_list_addr(list, list_index++);
-
- if (list_addr == -1) {
- // error
- // cleanup...
- free_guest_page_tables(pde);
- return NULL;
- }
- PrintDebug("Adding guest page (%x)\n", list_addr);
- pte[j].page_base_addr = list_addr >> 12;
-
- // Reset this when we move over to dynamic page allocation
- // pte[j].vmm_info = GUEST_PAGE;
- pte[j].vmm_info = SHARED_PAGE;
- }
-
- num_entries--;
- current_page_addr += PAGE_SIZE;
- layout_index++;
- } else {
- // error
- PrintDebug("Error creating page table...\n");
- // cleanup
- free_guest_page_tables(pde);
- return NULL;
- }
- }
+ pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(pte);
}
+
}
return pde;
}
-void free_guest_page_tables(vmm_pde_t * pde) {
- int i, j;
- for (i = 0; (i < MAX_PAGE_DIR_ENTRIES); i++) {
- if (pde[i].present) {
- vmm_pte_t * pte = (vmm_pte_t *)(pde[i].pt_base_addr << PAGE_POWER);
-
- for (j = 0; (j < MAX_PAGE_TABLE_ENTRIES); j++) {
- if ((pte[j].present) && (pte[j].vmm_info & GUEST_PAGE)){
- os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
- }
- }
-
- os_hooks->free_page(pte);
+
+
+/* We can't do a full lookup because we don't know what context the page tables are in...
+ * The entry addresses could be pointing to either guest physical memory or host physical memory
+ * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
+ */
+pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry) {
+ pde32_t * pde_entry = pde[PDE32_INDEX(addr)];
+
+ if (!pde_entry->present) {
+ *entry = 0;
+ return NOT_PRESENT;
+ } else {
+ *entry = PAGE_ADDR(pde_entry->pt_base_addr);
+
+ if (pde_entry->large_pages) {
+ *entry += PAGE_OFFSET(addr);
+ return LARGE_PAGE;
+ } else {
+ return PTE32;
}
+ }
+ return NOT_PRESENT;
+}
+
+
+int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry) {
+ pte32_t * pte_entry = pte[PTE32_INDEX(addr)];
+
+ if (!pte_entry->present) {
+ *entry = 0;
+ return -1;
+ } else {
+ *entry = PAGE_ADDR(pte_entry->page_base_addr);
+ *entry += PAGE_OFFSET(addr);
+ return 0;
}
- os_hooks->free_page(pde);
+ return -1;
}
-void PrintPDE(void * virtual_address, vmm_pde_t * pde)
+
+
+
+
+
+void PrintPDE32(void * virtual_address, pde32_t * pde)
{
PrintDebug("PDE %p -> %p : present=%x, flags=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
virtual_address,
pde->vmm_info);
}
-void PrintPTE(void * virtual_address, vmm_pte_t * pte)
+void PrintPTE32(void * virtual_address, pte32_t * pte)
{
PrintDebug("PTE %p -> %p : present=%x, flags=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
virtual_address,
-void PrintPD(vmm_pde_t * pde)
+void PrintPD32(pde32_t * pde)
{
int i;
PrintDebug("Page Directory at %p:\n", pde);
- for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) {
- PrintPDE((void*)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
+ for (i = 0; (i < MAX_PDE32_ENTRIES) && pde[i].present; i++) {
+ PrintPDE32((void*)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
}
}
-void PrintPT(void * starting_address, vmm_pte_t * pte)
+void PrintPT32(void * starting_address, pte32_t * pte)
{
int i;
PrintDebug("Page Table at %p:\n", pte);
- for (i = 0; (i < MAX_PAGE_TABLE_ENTRIES) && pte[i].present; i++) {
- PrintPTE(starting_address + (PAGE_SIZE * i), &(pte[i]));
+ for (i = 0; (i < MAX_PTE32_ENTRIES) && pte[i].present; i++) {
+ PrintPTE32(starting_address + (PAGE_SIZE * i), &(pte[i]));
}
}
-void PrintDebugPageTables(vmm_pde_t * pde)
+void PrintDebugPageTables(pde32_t * pde)
{
int i;
PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
- for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) {
- PrintPDE((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
- PrintPT((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), (void *)(pde[i].pt_base_addr << PAGE_POWER));
+ for (i = 0; (i < MAX_PDE32_ENTRIES) && pde[i].present; i++) {
+ PrintPDE32((void *)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
+ PrintPT32((void *)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), (void *)(pde[i].pt_base_addr << PAGE_POWER));
}
}
-