1 #include <geekos/vmm_paging.h>
3 #include <geekos/vmm.h>
5 #include <geekos/vm_guest_mem.h>
8 extern struct vmm_os_hooks * os_hooks;
10 void delete_page_tables_pde32(pde32_t * pde) {
17 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
19 pte32_t * pte = (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER);
21 for (j = 0; (j < MAX_PTE32_ENTRIES); j++) {
22 if ((pte[j].present)) {
23 os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
27 os_hooks->free_page(pte);
31 os_hooks->free_page(pde);
36 /* We generate a page table to correspond to a given memory layout
37 * pulling pages from the mem_list when necessary
38 * If there are any gaps in the layout, we add them as unmapped pages
40 pde32_t * create_passthrough_pde32_pts(guest_info_t * guest_info) {
41 ullong_t current_page_addr = 0;
43 shadow_map_t * map = guest_info->mem_map;
46 pde32_t * pde = os_hooks->allocate_pages(1);
48 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
50 pte32_t * pte = os_hooks->allocate_pages(1);
53 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
54 shadow_region_t * region = get_shadow_region_by_addr(map, current_page_addr);
57 (region->host_type == HOST_REGION_NOTHING) ||
58 (region->host_type == HOST_REGION_UNALLOCATED) ||
59 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
60 (region->host_type == HOST_REGION_REMOTE) ||
61 (region->host_type == HOST_REGION_SWAPPED)) {
67 pte[j].global_page = 0;
69 pte[j].page_base_addr = 0;
73 pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
78 pte[j].global_page = 0;
81 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
87 pte[j].page_base_addr = host_addr >> 12;
92 current_page_addr += PAGE_SIZE;
95 if (pte_present == 0) {
96 os_hooks->free_page(pte);
102 pde[i].large_pages = 0;
103 pde[i].global_page = 0;
105 pde[i].pt_base_addr = 0;
108 pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
111 pde[i].large_pages = 0;
112 pde[i].global_page = 0;
114 pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(pte);
127 /* We can't do a full lookup because we don't know what context the page tables are in...
128 * The entry addresses could be pointing to either guest physical memory or host physical memory
129 * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
131 pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry) {
132 pde32_t * pde_entry = pde[PDE32_INDEX(addr)];
134 if (!pde_entry->present) {
138 *entry = PAGE_ADDR(pde_entry->pt_base_addr);
140 if (pde_entry->large_pages) {
141 *entry += PAGE_OFFSET(addr);
151 int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry) {
152 pte32_t * pte_entry = pte[PTE32_INDEX(addr)];
154 if (!pte_entry->present) {
158 *entry = PAGE_ADDR(pte_entry->page_base_addr);
159 *entry += PAGE_OFFSET(addr);
174 void PrintPDE32(void * virtual_address, pde32_t * pde)
176 PrintDebug("PDE %p -> %p : present=%x, flags=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
178 (void *) (pde->pt_base_addr << PAGE_POWER),
188 void PrintPTE32(void * virtual_address, pte32_t * pte)
190 PrintDebug("PTE %p -> %p : present=%x, flags=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
192 (void*)(pte->page_base_addr << PAGE_POWER),
204 void PrintPD32(pde32_t * pde)
208 PrintDebug("Page Directory at %p:\n", pde);
209 for (i = 0; (i < MAX_PDE32_ENTRIES) && pde[i].present; i++) {
210 PrintPDE32((void*)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
214 void PrintPT32(void * starting_address, pte32_t * pte)
218 PrintDebug("Page Table at %p:\n", pte);
219 for (i = 0; (i < MAX_PTE32_ENTRIES) && pte[i].present; i++) {
220 PrintPTE32(starting_address + (PAGE_SIZE * i), &(pte[i]));
228 void PrintDebugPageTables(pde32_t * pde)
232 PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
234 for (i = 0; (i < MAX_PDE32_ENTRIES) && pde[i].present; i++) {
235 PrintPDE32((void *)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
236 PrintPT32((void *)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), (void *)(pde[i].pt_base_addr << PAGE_POWER));