1 #include <geekos/vmm_paging.h>
3 #include <geekos/vmm.h>
7 extern struct vmm_os_hooks * os_hooks;
9 void delete_page_tables_pde32(vmm_pde_t * pde) {
16 for (i = 0; (i < MAX_PAGE_DIR_ENTRIES); i++) {
18 vmm_pte_t * pte = (vmm_pte_t *)(pde[i].pt_base_addr << PAGE_POWER);
20 for (j = 0; (j < MAX_PAGE_TABLE_ENTRIES); j++) {
21 if ((pte[j].present) && (pte[j].vmm_info & GUEST_PAGE)){
22 os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
26 os_hooks->free_page(pte);
30 os_hooks->free_page(pde);
34 int init_shadow_paging_state(shadow_paging_state_t *state)
36 state->guest_page_directory_type=state->shadow_page_directory_type=PDE32;
38 state->guest_page_directory=state->shadow_page_directory=NULL;
40 init_shadow_map(&(state->shadow_map));
45 int wholesale_update_shadow_paging_state(shadow_paging_state_t *state)
48 vmm_pde_t *cur_guest_pde, *cur_shadow_pde;
49 vmm_pte_t *cur_guest_pte, *cur_shadow_pte;
51 // For now, we'll only work with PDE32
52 if (state->guest_page_directory_type!=PDE32) {
56 cur_shadow_pde=(vmm_pde_t*)(state->shadow_page_directory);
58 cur_guest_pde = (vmm_pde_t*)(os_hooks->physical_to_virtual(state->guest_page_directory));
60 // Delete the current page table
61 delete_page_tables_pde32(cur_shadow_pde);
63 cur_shadow_pde = os_hooks->allocate_pages(1);
65 state->shadow_page_directory = cur_shadow_pde;
66 state->shadow_page_directory_type=PDE32;
69 for (i=0;i<MAX_PAGE_DIR_ENTRIES;i++) {
70 cur_shadow_pde[i] = cur_guest_pde[i];
71 // The shadow can be identical to the guest if it's not present
72 if (!cur_shadow_pde[i].present) {
75 if (cur_shadow_pde[i].large_pages) {
76 // large page - just map it through shadow map to generate its physical location
77 addr_t guest_addr = PAGE_ADDR(cur_shadow_pde[i].pt_base_addr);
79 shadow_map_entry_t *ent;
81 ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
84 // FIXME Panic here - guest is trying to map to physical memory
85 // it does not own in any way!
89 // FIXME Bounds check here to see if it's trying to trick us
91 switch (ent->host_type) {
92 case HOST_REGION_PHYSICAL_MEMORY:
93 // points into currently allocated physical memory, so we just
94 // set up the shadow to point to the mapped location
95 if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) {
99 cur_shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
100 // FIXME set vmm_info bits here
102 case HOST_REGION_UNALLOCATED:
103 // points to physical memory that is *allowed* but that we
104 // have not yet allocated. We mark as not present and set a
105 // bit to remind us to allocate it later
106 cur_shadow_pde[i].present=0;
107 // FIXME Set vminfo bits here so that we know that we will be
108 // allocating it later
110 case HOST_REGION_NOTHING:
111 // points to physical memory that is NOT ALLOWED.
112 // We will mark it as not present and set a bit to remind
113 // us that it's bad later and insert a GPF then
114 cur_shadow_pde[i].present=0;
116 case HOST_REGION_MEMORY_MAPPED_DEVICE:
117 case HOST_REGION_REMOTE:
118 case HOST_REGION_SWAPPED:
120 // Panic. Currently unhandled
128 // small page - set PDE and follow down to the child table
129 cur_shadow_pde[i] = cur_guest_pde[i];
131 // Allocate a new second level page table for the shadow
132 cur_shadow_pte = os_hooks->allocate_pages(1);
134 // make our first level page table in teh shadow point to it
135 cur_shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(cur_shadow_pte);
137 shadow_map_entry_t *ent;
139 guest_addr=PAGE_ADDR(cur_guest_pde[i].pt_base_addr);
141 ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
144 // FIXME Panic here - guest is trying to map to physical memory
145 // it does not own in any way!
149 // Address of the relevant second level page table in the guest
150 if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) {
154 // host_addr now contains the host physical address for the guest's 2nd level page table
156 // Now we transform it to relevant virtual address
157 cur_guest_pte = os_hooks->physical_to_virtual((void*)host_addr);
159 // Now we walk through the second level guest page table
160 // and clone it into the shadow
161 for (j=0;j<MAX_PAGE_TABLE_ENTRIES;j++) {
162 cur_shadow_pte[j] = cur_guest_pte[j];
164 addr_t guest_addr = PAGE_ADDR(cur_shadow_pte[j].page_base_addr);
166 shadow_map_entry_t *ent;
168 ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
171 // FIXME Panic here - guest is trying to map to physical memory
172 // it does not own in any way!
176 switch (ent->host_type) {
177 case HOST_REGION_PHYSICAL_MEMORY:
178 // points into currently allocated physical memory, so we just
179 // set up the shadow to point to the mapped location
180 if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) {
184 cur_shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
185 // FIXME set vmm_info bits here
187 case HOST_REGION_UNALLOCATED:
188 // points to physical memory that is *allowed* but that we
189 // have not yet allocated. We mark as not present and set a
190 // bit to remind us to allocate it later
191 cur_shadow_pte[j].present=0;
192 // FIXME Set vminfo bits here so that we know that we will be
193 // allocating it later
195 case HOST_REGION_NOTHING:
196 // points to physical memory that is NOT ALLOWED.
197 // We will mark it as not present and set a bit to remind
198 // us that it's bad later and insert a GPF then
199 cur_shadow_pte[j].present=0;
201 case HOST_REGION_MEMORY_MAPPED_DEVICE:
202 case HOST_REGION_REMOTE:
203 case HOST_REGION_SWAPPED:
205 // Panic. Currently unhandled
218 /* We generate a page table to correspond to a given memory layout
219 * pulling pages from the mem_list when necessary
220 * If there are any gaps in the layout, we add them as unmapped pages
222 vmm_pde_t * generate_guest_page_tables(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
223 ullong_t current_page_addr = 0;
224 uint_t layout_index = 0;
225 uint_t list_index = 0;
226 ullong_t layout_addr = 0;
228 uint_t num_entries = layout->num_pages; // The number of pages left in the layout
233 vmm_pde_t * pde = os_hooks->allocate_pages(1);
235 for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
236 if (num_entries == 0) {
241 pde[i].large_pages = 0;
242 pde[i].global_page = 0;
244 pde[i].pt_base_addr = 0;
246 vmm_pte_t * pte = os_hooks->allocate_pages(1);
249 pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
252 pde[i].large_pages = 0;
253 pde[i].global_page = 0;
255 pde[i].pt_base_addr = PAGE_ALLIGNED_ADDR(pte);
259 for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
260 layout_addr = get_mem_layout_addr(layout, layout_index);
262 if ((current_page_addr < layout_addr) || (num_entries == 0)) {
263 // We have a gap in the layout, fill with unmapped page
269 pte[j].global_page = 0;
271 pte[j].page_base_addr = 0;
273 current_page_addr += PAGE_SIZE;
274 } else if (current_page_addr == layout_addr) {
275 // Set up the Table entry to map correctly to the layout region
276 layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
278 if (page_region->type == UNMAPPED) {
283 pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
289 pte[j].global_page = 0;
292 if (page_region->type == UNMAPPED) {
293 pte[j].page_base_addr = 0;
294 } else if (page_region->type == SHARED) {
295 addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
297 pte[j].page_base_addr = host_addr >> 12;
298 pte[j].vmm_info = SHARED_PAGE;
299 } else if (page_region->type == GUEST) {
300 addr_t list_addr = get_mem_list_addr(list, list_index++);
302 if (list_addr == -1) {
305 free_guest_page_tables(pde);
308 PrintDebug("Adding guest page (%x)\n", list_addr);
309 pte[j].page_base_addr = list_addr >> 12;
311 // Reset this when we move over to dynamic page allocation
312 // pte[j].vmm_info = GUEST_PAGE;
313 pte[j].vmm_info = SHARED_PAGE;
317 current_page_addr += PAGE_SIZE;
321 PrintDebug("Error creating page table...\n");
323 free_guest_page_tables(pde);
339 void PrintPDE(void * virtual_address, vmm_pde_t * pde)
341 PrintDebug("PDE %p -> %p : present=%x, flags=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
343 (void *) (pde->pt_base_addr << PAGE_POWER),
353 void PrintPTE(void * virtual_address, vmm_pte_t * pte)
355 PrintDebug("PTE %p -> %p : present=%x, flags=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
357 (void*)(pte->page_base_addr << PAGE_POWER),
369 void PrintPD(vmm_pde_t * pde)
373 PrintDebug("Page Directory at %p:\n", pde);
374 for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) {
375 PrintPDE((void*)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
379 void PrintPT(void * starting_address, vmm_pte_t * pte)
383 PrintDebug("Page Table at %p:\n", pte);
384 for (i = 0; (i < MAX_PAGE_TABLE_ENTRIES) && pte[i].present; i++) {
385 PrintPTE(starting_address + (PAGE_SIZE * i), &(pte[i]));
393 void PrintDebugPageTables(vmm_pde_t * pde)
397 PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
399 for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) {
400 PrintPDE((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
401 PrintPT((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), (void *)(pde[i].pt_base_addr << PAGE_POWER));
409 pml4e64_t * generate_guest_page_tables_64(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
410 pml4e64_t * pml = os_hooks->allocate_pages(1);
412 ullong_t current_page_addr = 0;
413 uint_t layout_index = 0;
414 uint_t list_index = 0;
415 ullong_t layout_addr = 0;
416 uint_t num_entries = layout->num_pages; // The number of pages left in the layout
418 for (m = 0; m < MAX_PAGE_MAP_ENTRIES_64; m++ ) {
419 if (num_entries == 0) {
429 pml[m].pdp_base_addr_lo = 0;
430 pml[m].pdp_base_addr_hi = 0;
431 pml[m].available = 0;
432 pml[m].no_execute = 0;
434 pdpe64_t * pdpe = os_hooks->allocate_pages(1);
445 pml[m].pdp_base_addr_lo = PAGE_ALLIGNED_ADDR(pdpe) & 0xfffff;
446 pml[m].pdp_base_addr_hi = 0;
447 pml[m].available = 0;
448 pml[m].no_execute = 0;
450 for (k = 0; k < MAX_PAGE_DIR_PTR_ENTRIES_64; k++) {
451 if (num_entries == 0) {
453 pdpe[k].writable = 0;
457 pdpe[k].accessed = 0;
458 pdpe[k].reserved = 0;
459 pdpe[k].large_pages = 0;
461 pdpe[k].vmm_info = 0;
462 pdpe[k].pd_base_addr_lo = 0;
463 pdpe[k].pd_base_addr_hi = 0;
464 pdpe[k].available = 0;
465 pdpe[k].no_execute = 0;
467 pde64_t * pde = os_hooks->allocate_pages(1);
470 pdpe[k].writable = 1;
474 pdpe[k].accessed = 0;
475 pdpe[k].reserved = 0;
476 pdpe[k].large_pages = 0;
478 pdpe[k].vmm_info = 0;
479 pdpe[k].pd_base_addr_lo = PAGE_ALLIGNED_ADDR(pde) & 0xfffff;
480 pdpe[k].pd_base_addr_hi = 0;
481 pdpe[k].available = 0;
482 pdpe[k].no_execute = 0;
486 for (i = 0; i < MAX_PAGE_DIR_ENTRIES_64; i++) {
487 if (num_entries == 0) {
492 pde[i].large_pages = 0;
493 pde[i].reserved2 = 0;
495 pde[i].pt_base_addr_lo = 0;
496 pde[i].pt_base_addr_hi = 0;
497 pde[i].available = 0;
498 pde[i].no_execute = 0;
500 pte64_t * pte = os_hooks->allocate_pages(1);
503 pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
506 pde[i].large_pages = 0;
507 pde[i].reserved2 = 0;
509 pde[i].pt_base_addr_lo = PAGE_ALLIGNED_ADDR(pte) & 0xfffff;
510 pde[i].pt_base_addr_hi = 0;
511 pde[i].available = 0;
512 pde[i].no_execute = 0;
515 for (j = 0; j < MAX_PAGE_TABLE_ENTRIES_64; j++) {
516 layout_addr = get_mem_layout_addr(layout, layout_index);
518 if ((current_page_addr < layout_addr) || (num_entries == 0)) {
519 // We have a gap in the layout, fill with unmapped page
525 pte[j].global_page = 0;
527 pte[j].page_base_addr_lo = 0;
528 pte[j].page_base_addr_hi = 0;
529 pte[j].available = 0;
530 pte[j].no_execute = 0;
532 current_page_addr += PAGE_SIZE;
533 } else if (current_page_addr == layout_addr) {
534 // Set up the Table entry to map correctly to the layout region
535 layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
537 if (page_region->type == UNMAPPED) {
542 pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
548 pte[j].global_page = 0;
550 pte[j].available = 0;
551 pte[j].no_execute = 0;
553 if (page_region->type == UNMAPPED) {
554 pte[j].page_base_addr_lo = 0;
555 pte[j].page_base_addr_hi = 0;
556 } else if (page_region->type == SHARED) {
557 addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
559 pte[j].page_base_addr_lo = PAGE_ALLIGNED_ADDR(host_addr) & 0xfffff;
560 pte[j].page_base_addr_hi = 0;
561 pte[j].vmm_info = SHARED_PAGE;
562 } else if (page_region->type == GUEST) {
563 addr_t list_addr = get_mem_list_addr(list, list_index++);
565 if (list_addr == -1) {
568 //free_guest_page_tables(pde);
571 PrintDebug("Adding guest page (%x)\n", list_addr);
572 pte[j].page_base_addr_lo = PAGE_ALLIGNED_ADDR(list_addr) & 0xfffff;
573 pte[j].page_base_addr_hi = 0;
575 // Reset this when we move over to dynamic page allocation
576 // pte[j].vmm_info = GUEST_PAGE;
577 pte[j].vmm_info = SHARED_PAGE;
581 current_page_addr += PAGE_SIZE;
585 PrintDebug("Error creating page table...\n");
587 // free_guest_page_tables64(pde);