2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
29 void delete_page_tables_pde32(pde32_t * pde) {
36 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
38 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
39 pte32_t * pte = (pte32_t *)((addr_t)(pde[i].pt_base_addr << PAGE_POWER));
42 for (j = 0; (j < MAX_PTE32_ENTRIES); j++) {
43 if ((pte[j].present)) {
44 os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
48 //PrintDebug("Deleting PTE %d (%x)\n", i, pte);
53 // PrintDebug("Deleting PDE (%x)\n", pde);
54 V3_FreePage(V3_PAddr(pde));
61 int pt32_lookup(pde32_t * pd, addr_t vaddr, addr_t * paddr) {
63 pde32_entry_type_t pde_entry_type;
69 pde_entry_type = pde32_lookup(pd, vaddr, &pde_entry);
71 if (pde_entry_type == PDE32_ENTRY_PTE32) {
72 return pte32_lookup((pte32_t *)pde_entry, vaddr, paddr);
73 } else if (pde_entry_type == PDE32_ENTRY_LARGE_PAGE) {
83 /* We can't do a full lookup because we don't know what context the page tables are in...
84 * The entry addresses could be pointing to either guest physical memory or host physical memory
85 * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
87 pde32_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
88 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
90 if (!pde_entry->present) {
92 return PDE32_ENTRY_NOT_PRESENT;
95 if (pde_entry->large_page) {
96 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
98 *entry = PDE32_4MB_T_ADDR(*large_pde);
99 *entry += PD32_4MB_PAGE_OFFSET(addr);
100 return PDE32_ENTRY_LARGE_PAGE;
102 *entry = PDE32_T_ADDR(*pde_entry);
103 return PDE32_ENTRY_PTE32;
106 return PDE32_ENTRY_NOT_PRESENT;
111 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
113 int pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
114 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
116 if (!pte_entry->present) {
118 PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
121 *entry = PTE32_T_ADDR(*pte_entry) + PT32_PAGE_OFFSET(addr);
130 pt_access_status_t can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
131 pde32_t * entry = &pde[PDE32_INDEX(addr)];
133 if (entry->present == 0) {
134 return PT_ENTRY_NOT_PRESENT;
135 } else if ((entry->writable == 0) && (access_type.write == 1)) {
136 return PT_WRITE_ERROR;
137 } else if ((entry->user_page == 0) && (access_type.user == 1)) {
139 return PT_USER_ERROR;
146 pt_access_status_t can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
147 pte32_t * entry = &pte[PTE32_INDEX(addr)];
149 if (entry->present == 0) {
150 return PT_ENTRY_NOT_PRESENT;
151 } else if ((entry->writable == 0) && (access_type.write == 1)) {
152 return PT_WRITE_ERROR;
153 } else if ((entry->user_page == 0) && (access_type.user == 1)) {
155 return PT_USER_ERROR;
164 /* We generate a page table to correspond to a given memory layout
165 * pulling pages from the mem_list when necessary
166 * If there are any gaps in the layout, we add them as unmapped pages
168 pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info) {
169 ullong_t current_page_addr = 0;
171 struct shadow_map * map = &(guest_info->mem_map);
173 pde32_t * pde = V3_VAddr(V3_AllocPages(1));
175 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
177 pte32_t * pte = V3_VAddr(V3_AllocPages(1));
180 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
181 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
184 (region->host_type == HOST_REGION_HOOK) ||
185 (region->host_type == HOST_REGION_UNALLOCATED) ||
186 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
187 (region->host_type == HOST_REGION_REMOTE) ||
188 (region->host_type == HOST_REGION_SWAPPED)) {
191 pte[j].user_page = 0;
192 pte[j].write_through = 0;
193 pte[j].cache_disable = 0;
197 pte[j].global_page = 0;
199 pte[j].page_base_addr = 0;
204 pte[j].user_page = 1;
205 pte[j].write_through = 0;
206 pte[j].cache_disable = 0;
210 pte[j].global_page = 0;
213 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
219 pte[j].page_base_addr = host_addr >> 12;
224 current_page_addr += PAGE_SIZE;
227 if (pte_present == 0) {
228 V3_FreePage(V3_PAddr(pte));
232 pde[i].user_page = 0;
233 pde[i].write_through = 0;
234 pde[i].cache_disable = 0;
237 pde[i].large_page = 0;
238 pde[i].global_page = 0;
240 pde[i].pt_base_addr = 0;
244 pde[i].user_page = 1;
245 pde[i].write_through = 0;
246 pde[i].cache_disable = 0;
249 pde[i].large_page = 0;
250 pde[i].global_page = 0;
252 pde[i].pt_base_addr = PAGE_ALIGNED_ADDR((addr_t)V3_PAddr(pte));
265 void PrintPDE32(addr_t virtual_address, pde32_t * pde)
267 PrintDebug("PDE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
268 (void *)virtual_address,
269 (void *)(addr_t) (pde->pt_base_addr << PAGE_POWER),
282 void PrintPTE32(addr_t virtual_address, pte32_t * pte)
284 PrintDebug("PTE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
285 (void *)virtual_address,
286 (void*)(addr_t)(pte->page_base_addr << PAGE_POWER),
301 void PrintPD32(pde32_t * pde)
305 PrintDebug("Page Directory at %p:\n", pde);
306 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
307 if ( pde[i].present) {
308 PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
313 void PrintPT32(addr_t starting_address, pte32_t * pte)
317 PrintDebug("Page Table at %p:\n", pte);
318 for (i = 0; (i < MAX_PTE32_ENTRIES) ; i++) {
319 if (pte[i].present) {
320 PrintPTE32(starting_address + (PAGE_SIZE * i), &(pte[i]));
329 void PrintDebugPageTables(pde32_t * pde)
333 PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
335 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
336 if (pde[i].present) {
337 PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
338 PrintPT32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), (pte32_t *)V3_VAddr((void *)(addr_t)(pde[i].pt_base_addr << PAGE_POWER)));