1 #include <geekos/vmm_paging.h>
3 #include <geekos/vmm.h>
7 extern struct vmm_os_hooks * os_hooks;
12 /* We generate a page table to correspond to a given memory layout
13 * pulling pages from the mem_list when necessary
14 * If there are any gaps in the layout, we add them as unmapped pages
16 vmm_pde_t * generate_guest_page_tables(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
17 ullong_t current_page_addr = 0;
18 uint_t layout_index = 0;
19 uint_t list_index = 0;
20 ullong_t layout_addr = 0;
22 uint_t num_entries = layout->num_pages; // The number of pages left in the layout
27 vmm_pde_t * pde = os_hooks->allocate_pages(1);
29 for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
30 if (num_entries == 0) {
35 pde[i].large_pages = 0;
36 pde[i].global_page = 0;
38 pde[i].pt_base_addr = 0;
40 vmm_pte_t * pte = os_hooks->allocate_pages(1);
43 pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
46 pde[i].large_pages = 0;
47 pde[i].global_page = 0;
49 pde[i].pt_base_addr = PAGE_ALLIGNED_ADDR(pte);
53 for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
54 layout_addr = get_mem_layout_addr(layout, layout_index);
56 if ((current_page_addr < layout_addr) || (num_entries == 0)) {
57 // We have a gap in the layout, fill with unmapped page
63 pte[j].global_page = 0;
65 pte[j].page_base_addr = 0;
67 current_page_addr += PAGE_SIZE;
68 } else if (current_page_addr == layout_addr) {
69 // Set up the Table entry to map correctly to the layout region
70 layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
72 if (page_region->type == UNMAPPED) {
77 pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
83 pte[j].global_page = 0;
86 if (page_region->type == UNMAPPED) {
87 pte[j].page_base_addr = 0;
88 } else if (page_region->type == SHARED) {
89 addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
91 pte[j].page_base_addr = host_addr >> 12;
92 pte[j].vmm_info = SHARED_PAGE;
93 } else if (page_region->type == GUEST) {
94 addr_t list_addr = get_mem_list_addr(list, list_index++);
96 if (list_addr == -1) {
99 free_guest_page_tables(pde);
102 PrintDebug("Adding guest page (%x)\n", list_addr);
103 pte[j].page_base_addr = list_addr >> 12;
105 // Reset this when we move over to dynamic page allocation
106 // pte[j].vmm_info = GUEST_PAGE;
107 pte[j].vmm_info = SHARED_PAGE;
111 current_page_addr += PAGE_SIZE;
115 PrintDebug("Error creating page table...\n");
117 free_guest_page_tables(pde);
128 void free_guest_page_tables(vmm_pde_t * pde) {
132 for (i = 0; (i < MAX_PAGE_DIR_ENTRIES); i++) {
133 if (pde[i].present) {
134 vmm_pte_t * pte = (vmm_pte_t *)(pde[i].pt_base_addr << PAGE_POWER);
136 for (j = 0; (j < MAX_PAGE_TABLE_ENTRIES); j++) {
137 if ((pte[j].present) && (pte[j].vmm_info & GUEST_PAGE)){
138 os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
142 os_hooks->free_page(pte);
146 os_hooks->free_page(pde);
152 void PrintPDE(void * virtual_address, vmm_pde_t * pde)
154 PrintDebug("PDE %p -> %p : present=%x, flags=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
156 (void *) (pde->pt_base_addr << PAGE_POWER),
166 void PrintPTE(void * virtual_address, vmm_pte_t * pte)
168 PrintDebug("PTE %p -> %p : present=%x, flags=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
170 (void*)(pte->page_base_addr << PAGE_POWER),
182 void PrintPD(vmm_pde_t * pde)
186 PrintDebug("Page Directory at %p:\n", pde);
187 for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) {
188 PrintPDE((void*)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
192 void PrintPT(void * starting_address, vmm_pte_t * pte)
196 PrintDebug("Page Table at %p:\n", pte);
197 for (i = 0; (i < MAX_PAGE_TABLE_ENTRIES) && pte[i].present; i++) {
198 PrintPTE(starting_address + (PAGE_SIZE * i), &(pte[i]));
206 void PrintDebugPageTables(vmm_pde_t * pde)
210 PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
212 for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) {
213 PrintPDE((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
214 PrintPT((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), (void *)(pde[i].pt_base_addr << PAGE_POWER));
222 pml4e64_t * generate_guest_page_tables_64(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
223 pml4e64_t * pml = os_hooks->allocate_pages(1);
225 ullong_t current_page_addr = 0;
226 uint_t layout_index = 0;
227 uint_t list_index = 0;
228 ullong_t layout_addr = 0;
229 uint_t num_entries = layout->num_pages; // The number of pages left in the layout
231 for (m = 0; m < MAX_PAGE_MAP_ENTRIES_64; m++ ) {
232 if (num_entries == 0) {
242 pml[m].pdp_base_addr_lo = 0;
243 pml[m].pdp_base_addr_hi = 0;
244 pml[m].available = 0;
245 pml[m].no_execute = 0;
247 pdpe64_t * pdpe = os_hooks->allocate_pages(1);
258 pml[m].pdp_base_addr_lo = PAGE_ALLIGNED_ADDR(pdpe) & 0xfffff;
259 pml[m].pdp_base_addr_hi = 0;
260 pml[m].available = 0;
261 pml[m].no_execute = 0;
263 for (k = 0; k < MAX_PAGE_DIR_PTR_ENTRIES_64; k++) {
264 if (num_entries == 0) {
266 pdpe[k].writable = 0;
270 pdpe[k].accessed = 0;
271 pdpe[k].reserved = 0;
272 pdpe[k].large_pages = 0;
274 pdpe[k].vmm_info = 0;
275 pdpe[k].pd_base_addr_lo = 0;
276 pdpe[k].pd_base_addr_hi = 0;
277 pdpe[k].available = 0;
278 pdpe[k].no_execute = 0;
280 pde64_t * pde = os_hooks->allocate_pages(1);
283 pdpe[k].writable = 1;
287 pdpe[k].accessed = 0;
288 pdpe[k].reserved = 0;
289 pdpe[k].large_pages = 0;
291 pdpe[k].vmm_info = 0;
292 pdpe[k].pd_base_addr_lo = PAGE_ALLIGNED_ADDR(pde) & 0xfffff;
293 pdpe[k].pd_base_addr_hi = 0;
294 pdpe[k].available = 0;
295 pdpe[k].no_execute = 0;
299 for (i = 0; i < MAX_PAGE_DIR_ENTRIES_64; i++) {
300 if (num_entries == 0) {
305 pde[i].large_pages = 0;
306 pde[i].reserved2 = 0;
308 pde[i].pt_base_addr_lo = 0;
309 pde[i].pt_base_addr_hi = 0;
310 pde[i].available = 0;
311 pde[i].no_execute = 0;
313 pte64_t * pte = os_hooks->allocate_pages(1);
316 pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
319 pde[i].large_pages = 0;
320 pde[i].reserved2 = 0;
322 pde[i].pt_base_addr_lo = PAGE_ALLIGNED_ADDR(pte) & 0xfffff;
323 pde[i].pt_base_addr_hi = 0;
324 pde[i].available = 0;
325 pde[i].no_execute = 0;
328 for (j = 0; j < MAX_PAGE_TABLE_ENTRIES_64; j++) {
329 layout_addr = get_mem_layout_addr(layout, layout_index);
331 if ((current_page_addr < layout_addr) || (num_entries == 0)) {
332 // We have a gap in the layout, fill with unmapped page
338 pte[j].global_page = 0;
340 pte[j].page_base_addr_lo = 0;
341 pte[j].page_base_addr_hi = 0;
342 pte[j].available = 0;
343 pte[j].no_execute = 0;
345 current_page_addr += PAGE_SIZE;
346 } else if (current_page_addr == layout_addr) {
347 // Set up the Table entry to map correctly to the layout region
348 layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
350 if (page_region->type == UNMAPPED) {
355 pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
361 pte[j].global_page = 0;
363 pte[j].available = 0;
364 pte[j].no_execute = 0;
366 if (page_region->type == UNMAPPED) {
367 pte[j].page_base_addr_lo = 0;
368 pte[j].page_base_addr_hi = 0;
369 } else if (page_region->type == SHARED) {
370 addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
372 pte[j].page_base_addr_lo = PAGE_ALLIGNED_ADDR(host_addr) & 0xfffff;
373 pte[j].page_base_addr_hi = 0;
374 pte[j].vmm_info = SHARED_PAGE;
375 } else if (page_region->type == GUEST) {
376 addr_t list_addr = get_mem_list_addr(list, list_index++);
378 if (list_addr == -1) {
381 //free_guest_page_tables(pde);
384 PrintDebug("Adding guest page (%x)\n", list_addr);
385 pte[j].page_base_addr_lo = PAGE_ALLIGNED_ADDR(list_addr) & 0xfffff;
386 pte[j].page_base_addr_hi = 0;
388 // Reset this when we move over to dynamic page allocation
389 // pte[j].vmm_info = GUEST_PAGE;
390 pte[j].vmm_info = SHARED_PAGE;
394 current_page_addr += PAGE_SIZE;
398 PrintDebug("Error creating page table...\n");
400 // free_guest_page_tables64(pde);