1 #include <geekos/vmm_paging.h>
3 #include <geekos/vmm.h>
7 extern struct vmm_os_hooks * os_hooks;
12 /* We generate a page table to correspond to a given memory layout
13 * pulling pages from the mem_list when necessary
14 * If there are any gaps in the layout, we add them as unmapped pages
16 vmm_pde_t * generate_guest_page_tables(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
17 ullong_t current_page_addr = 0;
18 uint_t layout_index = 0;
19 uint_t list_index = 0;
20 ullong_t layout_addr = 0;
22 uint_t num_entries = layout->num_pages; // The number of pages left in the layout
27 vmm_pde_t * pde = os_hooks->allocate_pages(1);
29 for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
30 if (num_entries == 0) {
35 pde[i].large_pages = 0;
36 pde[i].global_page = 0;
38 pde[i].pt_base_addr = 0;
40 vmm_pte_t * pte = os_hooks->allocate_pages(1);
43 pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
46 pde[i].large_pages = 0;
47 pde[i].global_page = 0;
49 pde[i].pt_base_addr = PAGE_ALLIGNED_ADDR(pte);
53 for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
54 layout_addr = get_mem_layout_addr(layout, layout_index);
56 if ((current_page_addr < layout_addr) || (num_entries == 0)) {
57 // We have a gap in the layout, fill with unmapped page
63 pte[j].global_page = 0;
65 pte[j].page_base_addr = 0;
67 current_page_addr += PAGE_SIZE;
68 } else if (current_page_addr == layout_addr) {
69 // Set up the Table entry to map correctly to the layout region
70 layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
72 if (page_region->type == UNMAPPED) {
77 pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
83 pte[j].global_page = 0;
86 if (page_region->type == UNMAPPED) {
87 pte[j].page_base_addr = 0;
88 } else if (page_region->type == SHARED) {
89 addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
91 pte[j].page_base_addr = host_addr >> 12;
92 pte[j].vmm_info = SHARED_PAGE;
93 } else if (page_region->type == GUEST) {
94 addr_t list_addr = get_mem_list_addr(list, list_index++);
96 if (list_addr == -1) {
99 free_guest_page_tables(pde);
102 PrintDebug("Adding guest page (%x)\n", list_addr);
103 pte[j].page_base_addr = list_addr >> 12;
105 // Reset this when we move over to dynamic page allocation
106 // pte[j].vmm_info = GUEST_PAGE;
107 pte[j].vmm_info = SHARED_PAGE;
111 current_page_addr += PAGE_SIZE;
115 PrintDebug("Error creating page table...\n");
117 free_guest_page_tables(pde);
128 void free_guest_page_tables(vmm_pde_t * pde) {
132 for (i = 0; (i < MAX_PAGE_DIR_ENTRIES); i++) {
133 if (pde[i].present) {
134 vmm_pte_t * pte = (vmm_pte_t *)(pde[i].pt_base_addr << PAGE_POWER);
136 for (j = 0; (j < MAX_PAGE_TABLE_ENTRIES); j++) {
137 if ((pte[j].present) && (pte[j].vmm_info & GUEST_PAGE)){
138 os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
142 os_hooks->free_page(pte);
146 os_hooks->free_page(pde);
152 void PrintPDE(void * virtual_address, vmm_pde_t * pde)
154 PrintDebug("PDE %p -> %p : present=%x, flags=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
156 (void *) (pde->pt_base_addr << PAGE_POWER),
166 void PrintPTE(void * virtual_address, vmm_pte_t * pte)
168 PrintDebug("PTE %p -> %p : present=%x, flags=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
170 (void*)(pte->page_base_addr << PAGE_POWER),
182 void PrintPD(vmm_pde_t * pde)
186 PrintDebug("Page Directory at %p:\n", pde);
187 for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) {
188 PrintPDE((void*)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
192 void PrintPT(void * starting_address, vmm_pte_t * pte)
196 PrintDebug("Page Table at %p:\n", pte);
197 for (i = 0; (i < MAX_PAGE_TABLE_ENTRIES) && pte[i].present; i++) {
198 PrintPTE(starting_address + (PAGE_SIZE * i), &(pte[i]));
206 void PrintDebugPageTables(vmm_pde_t * pde)
210 PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
212 for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) {
213 PrintPDE((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
214 PrintPT((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), (void *)(pde[i].pt_base_addr << PAGE_POWER));