1 /* (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> */
2 /* (c) 2008, The V3VEE Project <http://www.v3vee.org> */
5 #include <palacios/vmm_paging.h>
7 #include <palacios/vmm.h>
9 #include <palacios/vm_guest_mem.h>
14 void delete_page_tables_pde32(pde32_t * pde) {
21 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
23 pte32_t * pte = (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER);
26 for (j = 0; (j < MAX_PTE32_ENTRIES); j++) {
27 if ((pte[j].present)) {
28 os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
32 //PrintDebug("Deleting PTE %d (%x)\n", i, pte);
37 // PrintDebug("Deleting PDE (%x)\n", pde);
45 int pt32_lookup(pde32_t * pd, addr_t vaddr, addr_t * paddr) {
47 pde32_entry_type_t pde_entry_type;
53 pde_entry_type = pde32_lookup(pd, vaddr, &pde_entry);
55 if (pde_entry_type == PDE32_ENTRY_PTE32) {
56 return pte32_lookup((pte32_t *)pde_entry, vaddr, paddr);
57 } else if (pde_entry_type == PDE32_ENTRY_LARGE_PAGE) {
67 /* We can't do a full lookup because we don't know what context the page tables are in...
68 * The entry addresses could be pointing to either guest physical memory or host physical memory
69 * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
71 pde32_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
72 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
74 if (!pde_entry->present) {
76 return PDE32_ENTRY_NOT_PRESENT;
79 if (pde_entry->large_page) {
80 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
82 *entry = PDE32_4MB_T_ADDR(*large_pde);
83 *entry += PD32_4MB_PAGE_OFFSET(addr);
84 return PDE32_ENTRY_LARGE_PAGE;
86 *entry = PDE32_T_ADDR(*pde_entry);
87 return PDE32_ENTRY_PTE32;
90 return PDE32_ENTRY_NOT_PRESENT;
95 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
97 int pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
98 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
100 if (!pte_entry->present) {
102 PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
105 *entry = PTE32_T_ADDR(*pte_entry) + PT32_PAGE_OFFSET(addr);
114 pt_access_status_t can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
115 pde32_t * entry = &pde[PDE32_INDEX(addr)];
117 if (entry->present == 0) {
118 return PT_ENTRY_NOT_PRESENT;
119 } else if ((entry->writable == 0) && (access_type.write == 1)) {
120 return PT_WRITE_ERROR;
121 } else if ((entry->user_page == 0) && (access_type.user == 1)) {
123 return PT_USER_ERROR;
130 pt_access_status_t can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
131 pte32_t * entry = &pte[PTE32_INDEX(addr)];
133 if (entry->present == 0) {
134 return PT_ENTRY_NOT_PRESENT;
135 } else if ((entry->writable == 0) && (access_type.write == 1)) {
136 return PT_WRITE_ERROR;
137 } else if ((entry->user_page == 0) && (access_type.user == 1)) {
139 return PT_USER_ERROR;
148 /* We generate a page table to correspond to a given memory layout
149 * pulling pages from the mem_list when necessary
150 * If there are any gaps in the layout, we add them as unmapped pages
152 pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info) {
153 ullong_t current_page_addr = 0;
155 struct shadow_map * map = &(guest_info->mem_map);
157 pde32_t * pde = V3_AllocPages(1);
159 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
161 pte32_t * pte = V3_AllocPages(1);
164 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
165 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
168 (region->host_type == HOST_REGION_HOOK) ||
169 (region->host_type == HOST_REGION_UNALLOCATED) ||
170 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
171 (region->host_type == HOST_REGION_REMOTE) ||
172 (region->host_type == HOST_REGION_SWAPPED)) {
175 pte[j].user_page = 0;
176 pte[j].write_through = 0;
177 pte[j].cache_disable = 0;
181 pte[j].global_page = 0;
183 pte[j].page_base_addr = 0;
188 pte[j].user_page = 1;
189 pte[j].write_through = 0;
190 pte[j].cache_disable = 0;
194 pte[j].global_page = 0;
197 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
203 pte[j].page_base_addr = host_addr >> 12;
208 current_page_addr += PAGE_SIZE;
211 if (pte_present == 0) {
216 pde[i].user_page = 0;
217 pde[i].write_through = 0;
218 pde[i].cache_disable = 0;
221 pde[i].large_page = 0;
222 pde[i].global_page = 0;
224 pde[i].pt_base_addr = 0;
228 pde[i].user_page = 1;
229 pde[i].write_through = 0;
230 pde[i].cache_disable = 0;
233 pde[i].large_page = 0;
234 pde[i].global_page = 0;
236 pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(pte);
249 void PrintPDE32(addr_t virtual_address, pde32_t * pde)
251 PrintDebug("PDE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
253 (void *) (pde->pt_base_addr << PAGE_POWER),
266 void PrintPTE32(addr_t virtual_address, pte32_t * pte)
268 PrintDebug("PTE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
270 (void*)(pte->page_base_addr << PAGE_POWER),
285 void PrintPD32(pde32_t * pde)
289 PrintDebug("Page Directory at %p:\n", pde);
290 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
291 if ( pde[i].present) {
292 PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
297 void PrintPT32(addr_t starting_address, pte32_t * pte)
301 PrintDebug("Page Table at %p:\n", pte);
302 for (i = 0; (i < MAX_PTE32_ENTRIES) ; i++) {
303 if (pte[i].present) {
304 PrintPTE32(starting_address + (PAGE_SIZE * i), &(pte[i]));
313 void PrintDebugPageTables(pde32_t * pde)
317 PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
319 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
320 if (pde[i].present) {
321 PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
322 PrintPT32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER));