1 /* Northwestern University */
2 /* (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> */
4 #include <palacios/vmm_paging.h>
6 #include <palacios/vmm.h>
8 #include <palacios/vm_guest_mem.h>
13 void delete_page_tables_pde32(pde32_t * pde) {
20 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
22 pte32_t * pte = (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER);
25 for (j = 0; (j < MAX_PTE32_ENTRIES); j++) {
26 if ((pte[j].present)) {
27 os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
31 //PrintDebug("Deleting PTE %d (%x)\n", i, pte);
36 // PrintDebug("Deleting PDE (%x)\n", pde);
44 int pt32_lookup(pde32_t * pd, addr_t vaddr, addr_t * paddr) {
46 pde32_entry_type_t pde_entry_type;
52 pde_entry_type = pde32_lookup(pd, vaddr, &pde_entry);
54 if (pde_entry_type == PDE32_ENTRY_PTE32) {
55 return pte32_lookup((pte32_t *)pde_entry, vaddr, paddr);
56 } else if (pde_entry_type == PDE32_ENTRY_LARGE_PAGE) {
66 /* We can't do a full lookup because we don't know what context the page tables are in...
67 * The entry addresses could be pointing to either guest physical memory or host physical memory
68 * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
70 pde32_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
71 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
73 if (!pde_entry->present) {
75 return PDE32_ENTRY_NOT_PRESENT;
78 if (pde_entry->large_page) {
79 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
81 *entry = PDE32_4MB_T_ADDR(*large_pde);
82 *entry += PD32_4MB_PAGE_OFFSET(addr);
83 return PDE32_ENTRY_LARGE_PAGE;
85 *entry = PDE32_T_ADDR(*pde_entry);
86 return PDE32_ENTRY_PTE32;
89 return PDE32_ENTRY_NOT_PRESENT;
94 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
96 int pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
97 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
99 if (!pte_entry->present) {
101 PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
104 *entry = PTE32_T_ADDR(*pte_entry) + PT32_PAGE_OFFSET(addr);
113 pt_access_status_t can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
114 pde32_t * entry = &pde[PDE32_INDEX(addr)];
116 if (entry->present == 0) {
117 return PT_ENTRY_NOT_PRESENT;
118 } else if ((entry->writable == 0) && (access_type.write == 1)) {
119 return PT_WRITE_ERROR;
120 } else if ((entry->user_page == 0) && (access_type.user == 1)) {
122 return PT_USER_ERROR;
129 pt_access_status_t can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
130 pte32_t * entry = &pte[PTE32_INDEX(addr)];
132 if (entry->present == 0) {
133 return PT_ENTRY_NOT_PRESENT;
134 } else if ((entry->writable == 0) && (access_type.write == 1)) {
135 return PT_WRITE_ERROR;
136 } else if ((entry->user_page == 0) && (access_type.user == 1)) {
138 return PT_USER_ERROR;
147 /* We generate a page table to correspond to a given memory layout
148 * pulling pages from the mem_list when necessary
149 * If there are any gaps in the layout, we add them as unmapped pages
151 pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info) {
152 ullong_t current_page_addr = 0;
154 struct shadow_map * map = &(guest_info->mem_map);
156 pde32_t * pde = V3_AllocPages(1);
158 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
160 pte32_t * pte = V3_AllocPages(1);
163 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
164 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
167 (region->host_type == HOST_REGION_HOOK) ||
168 (region->host_type == HOST_REGION_UNALLOCATED) ||
169 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
170 (region->host_type == HOST_REGION_REMOTE) ||
171 (region->host_type == HOST_REGION_SWAPPED)) {
174 pte[j].user_page = 0;
175 pte[j].write_through = 0;
176 pte[j].cache_disable = 0;
180 pte[j].global_page = 0;
182 pte[j].page_base_addr = 0;
187 pte[j].user_page = 1;
188 pte[j].write_through = 0;
189 pte[j].cache_disable = 0;
193 pte[j].global_page = 0;
196 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
202 pte[j].page_base_addr = host_addr >> 12;
207 current_page_addr += PAGE_SIZE;
210 if (pte_present == 0) {
215 pde[i].user_page = 0;
216 pde[i].write_through = 0;
217 pde[i].cache_disable = 0;
220 pde[i].large_page = 0;
221 pde[i].global_page = 0;
223 pde[i].pt_base_addr = 0;
227 pde[i].user_page = 1;
228 pde[i].write_through = 0;
229 pde[i].cache_disable = 0;
232 pde[i].large_page = 0;
233 pde[i].global_page = 0;
235 pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(pte);
248 void PrintPDE32(addr_t virtual_address, pde32_t * pde)
250 PrintDebug("PDE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
252 (void *) (pde->pt_base_addr << PAGE_POWER),
265 void PrintPTE32(addr_t virtual_address, pte32_t * pte)
267 PrintDebug("PTE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
269 (void*)(pte->page_base_addr << PAGE_POWER),
284 void PrintPD32(pde32_t * pde)
288 PrintDebug("Page Directory at %p:\n", pde);
289 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
290 if ( pde[i].present) {
291 PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
296 void PrintPT32(addr_t starting_address, pte32_t * pte)
300 PrintDebug("Page Table at %p:\n", pte);
301 for (i = 0; (i < MAX_PTE32_ENTRIES) ; i++) {
302 if (pte[i].present) {
303 PrintPTE32(starting_address + (PAGE_SIZE * i), &(pte[i]));
312 void PrintDebugPageTables(pde32_t * pde)
316 PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
318 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
319 if (pde[i].present) {
320 PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
321 PrintPT32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER));