1 #include <geekos/vmm_paging.h>
7 extern struct vmm_os_hooks * os_hooks;
12 /* We generate a page table to correspond to a given memory layout
13 * pulling pages from the mem_list when necessary
14 * If there are any gaps in the layout, we add them as unmapped pages
16 pde_t * generate_guest_page_tables(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
17 ullong_t current_page_addr = 0;
18 uint_t layout_index = 0;
19 uint_t list_index = 0;
20 ullong_t layout_addr = 0;
22 uint_t num_entries = layout->num_pages; // The number of pages left in the layout
27 pde_t * pde = os_hooks->allocate_pages(1);
29 for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
30 if (num_entries == 0) {
35 pde[i].large_pages = 0;
36 pde[i].global_page = 0;
38 pde[i].pt_base_addr = 0;
40 pte_t * pte = os_hooks->allocate_pages(1);
43 pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
46 pde[i].large_pages = 0;
47 pde[i].global_page = 0;
49 pde[i].pt_base_addr = PAGE_ALLIGNED_ADDR(pte);
53 for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
54 layout_addr = get_mem_layout_addr(layout, layout_index);
56 if ((current_page_addr < layout_addr) || (num_entries == 0)) {
57 // We have a gap in the layout, fill with unmapped page
63 pte[j].global_page = 0;
65 pte[j].page_base_addr = 0;
67 current_page_addr += PAGE_SIZE;
68 } else if (current_page_addr == layout_addr) {
69 // Set up the Table entry to map correctly to the layout region
70 layout_region_t * page_region = get_layout_cursor(layout, layout_addr);
72 if (page_region->type == UNMAPPED) {
77 pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
83 pte[j].global_page = 0;
86 if (page_region->type == UNMAPPED) {
87 pte[j].page_base_addr = 0;
88 } else if (page_region->type == SHARED) {
89 pte[j].page_base_addr = page_region->host_addr >> 12;
90 } else if (page_region->type == GUEST) {
91 addr_t list_addr = get_mem_list_addr(list, list_index++);
93 if (list_addr == -1) {
98 PrintDebug("Adding guest page (%x)\n", list_addr);
99 pte[j].page_base_addr = list_addr >> 12;
103 current_page_addr += PAGE_SIZE;
107 PrintDebug("Error creating page table...\n");
121 void PrintPDE(void * virtual_address, pde_t * pde)
123 PrintDebug("PDE %p -> %p : present=%x, flags=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
125 (void *) (pde->pt_base_addr << PAGE_POWER),
135 void PrintPTE(void * virtual_address, pte_t * pte)
137 PrintDebug("PTE %p -> %p : present=%x, flags=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
139 (void*)(pte->page_base_addr << PAGE_POWER),
151 void PrintPD(pde_t * pde)
155 PrintDebug("Page Directory at %p:\n", pde);
156 for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) {
157 PrintPDE((void*)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
161 void PrintPT(void * starting_address, pte_t * pte)
165 // PrintDebug("Page Table at %p:\n", pte);
166 for (i = 0; (i < MAX_PAGE_TABLE_ENTRIES) && pte[i].present; i++) {
167 PrintPTE(starting_address + (PAGE_SIZE * i), &(pte[i]));
175 void PrintDebugPageTables(pde_t * pde)
179 PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
181 for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) {
182 PrintPDE((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
183 PrintPT((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), (void *)(pde[i].pt_base_addr << PAGE_POWER));