1 #include <palacios/vmm_paging.h>
3 #include <palacios/vmm.h>
5 #include <palacios/vm_guest_mem.h>
8 extern struct vmm_os_hooks * os_hooks;
10 void delete_page_tables_pde32(pde32_t * pde) {
17 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
19 pte32_t * pte = (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER);
22 for (j = 0; (j < MAX_PTE32_ENTRIES); j++) {
23 if ((pte[j].present)) {
24 os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
28 //PrintDebug("Deleting PTE %d (%x)\n", i, pte);
29 os_hooks->free_page(pte);
33 // PrintDebug("Deleting PDE (%x)\n", pde);
34 os_hooks->free_page(pde);
43 /* We can't do a full lookup because we don't know what context the page tables are in...
44 * The entry addresses could be pointing to either guest physical memory or host physical memory
45 * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
47 pde32_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
48 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
50 if (!pde_entry->present) {
52 return PDE32_ENTRY_NOT_PRESENT;
54 *entry = PAGE_ADDR(pde_entry->pt_base_addr);
56 if (pde_entry->large_page) {
57 *entry += PAGE_OFFSET(addr);
58 return PDE32_ENTRY_LARGE_PAGE;
60 *entry = PDE32_T_ADDR(*pde_entry);
61 return PDE32_ENTRY_PTE32;
64 return PDE32_ENTRY_NOT_PRESENT;
69 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
71 int pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
72 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
74 if (!pte_entry->present) {
76 PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
79 *entry = PTE32_T_ADDR(*pte_entry) + PT32_PAGE_OFFSET(addr);
88 pt_access_status_t can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
89 pde32_t * entry = &pde[PDE32_INDEX(addr)];
91 if (entry->present == 0) {
92 return PT_ENTRY_NOT_PRESENT;
93 } else if ((entry->writable == 0) && (access_type.write == 1)) {
94 return PT_WRITE_ERROR;
95 } else if ((entry->user_page == 0) && (access_type.user == 1)) {
104 pt_access_status_t can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
105 pte32_t * entry = &pte[PTE32_INDEX(addr)];
107 if (entry->present == 0) {
108 return PT_ENTRY_NOT_PRESENT;
109 } else if ((entry->writable == 0) && (access_type.write == 1)) {
110 return PT_WRITE_ERROR;
111 } else if ((entry->user_page == 0) && (access_type.user == 1)) {
113 return PT_USER_ERROR;
122 /* We generate a page table to correspond to a given memory layout
123 * pulling pages from the mem_list when necessary
124 * If there are any gaps in the layout, we add them as unmapped pages
126 pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info) {
127 ullong_t current_page_addr = 0;
129 struct shadow_map * map = &(guest_info->mem_map);
131 pde32_t * pde = os_hooks->allocate_pages(1);
133 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
135 pte32_t * pte = os_hooks->allocate_pages(1);
138 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
139 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
142 (region->host_type == HOST_REGION_HOOK) ||
143 (region->host_type == HOST_REGION_UNALLOCATED) ||
144 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
145 (region->host_type == HOST_REGION_REMOTE) ||
146 (region->host_type == HOST_REGION_SWAPPED)) {
149 pte[j].user_page = 0;
150 pte[j].write_through = 0;
151 pte[j].cache_disable = 0;
155 pte[j].global_page = 0;
157 pte[j].page_base_addr = 0;
162 pte[j].user_page = 1;
163 pte[j].write_through = 0;
164 pte[j].cache_disable = 0;
168 pte[j].global_page = 0;
171 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
177 pte[j].page_base_addr = host_addr >> 12;
182 current_page_addr += PAGE_SIZE;
185 if (pte_present == 0) {
186 os_hooks->free_page(pte);
190 pde[i].user_page = 0;
191 pde[i].write_through = 0;
192 pde[i].cache_disable = 0;
195 pde[i].large_page = 0;
196 pde[i].global_page = 0;
198 pde[i].pt_base_addr = 0;
202 pde[i].user_page = 1;
203 pde[i].write_through = 0;
204 pde[i].cache_disable = 0;
207 pde[i].large_page = 0;
208 pde[i].global_page = 0;
210 pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(pte);
223 void PrintPDE32(addr_t virtual_address, pde32_t * pde)
225 PrintDebug("PDE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
227 (void *) (pde->pt_base_addr << PAGE_POWER),
240 void PrintPTE32(addr_t virtual_address, pte32_t * pte)
242 PrintDebug("PTE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
244 (void*)(pte->page_base_addr << PAGE_POWER),
259 void PrintPD32(pde32_t * pde)
263 PrintDebug("Page Directory at %p:\n", pde);
264 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
265 if ( pde[i].present) {
266 PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
271 void PrintPT32(addr_t starting_address, pte32_t * pte)
275 PrintDebug("Page Table at %p:\n", pte);
276 for (i = 0; (i < MAX_PTE32_ENTRIES) ; i++) {
277 if (pte[i].present) {
278 PrintPTE32(starting_address + (PAGE_SIZE * i), &(pte[i]));
287 void PrintDebugPageTables(pde32_t * pde)
291 PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
293 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
294 if (pde[i].present) {
295 PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
296 PrintPT32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER));