1 #include <geekos/vmm_paging.h>
3 #include <geekos/vmm.h>
5 #include <geekos/vm_guest_mem.h>
8 extern struct vmm_os_hooks * os_hooks;
10 void delete_page_tables_pde32(pde32_t * pde) {
17 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
19 pte32_t * pte = (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER);
21 for (j = 0; (j < MAX_PTE32_ENTRIES); j++) {
22 if ((pte[j].present)) {
23 os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
27 os_hooks->free_page(pte);
31 os_hooks->free_page(pde);
40 /* We can't do a full lookup because we don't know what context the page tables are in...
41 * The entry addresses could be pointing to either guest physical memory or host physical memory
42 * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
44 pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry) {
45 pde32_t * pde_entry = &(pde[PDE32_INDEX(addr)]);
47 if (!pde_entry->present) {
51 *entry = PAGE_ADDR(pde_entry->pt_base_addr);
53 if (pde_entry->large_pages) {
54 *entry += PAGE_OFFSET(addr);
64 int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry) {
65 pte32_t * pte_entry = &(pte[PTE32_INDEX(addr)]);
67 if (!pte_entry->present) {
71 *entry = PAGE_ADDR(pte_entry->page_base_addr);
72 *entry += PAGE_OFFSET(addr);
86 /* We generate a page table to correspond to a given memory layout
87 * pulling pages from the mem_list when necessary
88 * If there are any gaps in the layout, we add them as unmapped pages
90 pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info) {
91 ullong_t current_page_addr = 0;
93 shadow_map_t * map = &(guest_info->mem_map);
96 pde32_t * pde = os_hooks->allocate_pages(1);
98 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
100 pte32_t * pte = os_hooks->allocate_pages(1);
103 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
104 shadow_region_t * region = get_shadow_region_by_addr(map, current_page_addr);
107 (region->host_type == HOST_REGION_NOTHING) ||
108 (region->host_type == HOST_REGION_UNALLOCATED) ||
109 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
110 (region->host_type == HOST_REGION_REMOTE) ||
111 (region->host_type == HOST_REGION_SWAPPED)) {
117 pte[j].global_page = 0;
119 pte[j].page_base_addr = 0;
123 pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
128 pte[j].global_page = 0;
131 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
137 pte[j].page_base_addr = host_addr >> 12;
142 current_page_addr += PAGE_SIZE;
145 if (pte_present == 0) {
146 os_hooks->free_page(pte);
152 pde[i].large_pages = 0;
153 pde[i].global_page = 0;
155 pde[i].pt_base_addr = 0;
158 pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
161 pde[i].large_pages = 0;
162 pde[i].global_page = 0;
164 pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(pte);
177 void PrintPDE32(void * virtual_address, pde32_t * pde)
179 PrintDebug("PDE %p -> %p : present=%x, flags=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
181 (void *) (pde->pt_base_addr << PAGE_POWER),
191 void PrintPTE32(void * virtual_address, pte32_t * pte)
193 PrintDebug("PTE %p -> %p : present=%x, flags=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
195 (void*)(pte->page_base_addr << PAGE_POWER),
207 void PrintPD32(pde32_t * pde)
211 PrintDebug("Page Directory at %p:\n", pde);
212 for (i = 0; (i < MAX_PDE32_ENTRIES) && pde[i].present; i++) {
213 PrintPDE32((void*)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
217 void PrintPT32(void * starting_address, pte32_t * pte)
221 PrintDebug("Page Table at %p:\n", pte);
222 for (i = 0; (i < MAX_PTE32_ENTRIES) && pte[i].present; i++) {
223 PrintPTE32(starting_address + (PAGE_SIZE * i), &(pte[i]));
231 void PrintDebugPageTables(pde32_t * pde)
235 PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
237 for (i = 0; (i < MAX_PDE32_ENTRIES) && pde[i].present; i++) {
238 PrintPDE32((void *)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
239 PrintPT32((void *)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), (void *)(pde[i].pt_base_addr << PAGE_POWER));