1 #include <palacios/vmm_paging.h>
3 #include <palacios/vmm.h>
5 #include <palacios/vm_guest_mem.h>
8 extern struct vmm_os_hooks * os_hooks;
10 void delete_page_tables_pde32(pde32_t * pde) {
17 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
19 pte32_t * pte = (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER);
21 for (j = 0; (j < MAX_PTE32_ENTRIES); j++) {
22 if ((pte[j].present)) {
23 os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
27 os_hooks->free_page(pte);
31 os_hooks->free_page(pde);
40 /* We can't do a full lookup because we don't know what context the page tables are in...
41 * The entry addresses could be pointing to either guest physical memory or host physical memory
42 * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
44 pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry) {
45 pde32_t * pde_entry = &(pde[PDE32_INDEX(addr)]);
47 if (!pde_entry->present) {
49 return PDE32_ENTRY_NOT_PRESENT;
51 *entry = PAGE_ADDR(pde_entry->pt_base_addr);
53 if (pde_entry->large_page) {
54 *entry += PAGE_OFFSET(addr);
55 return PDE32_ENTRY_LARGE_PAGE;
57 return PDE32_ENTRY_PTE32;
60 return PDE32_ENTRY_NOT_PRESENT;
65 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
67 int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry) {
68 pte32_t * pte_entry = &(pte[PTE32_INDEX(addr)]);
70 if (!pte_entry->present) {
74 *entry = PAGE_ADDR(pte_entry->page_base_addr);
75 *entry += PAGE_OFFSET(addr);
84 pt_access_status_t can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
85 pde32_t * entry = &pde[PDE32_INDEX(addr)];
87 if (entry->present == 0) {
88 return PT_ENTRY_NOT_PRESENT;
89 } else if ((entry->writable == 0) && (access_type.write == 1)) {
90 return PT_WRITE_ERROR;
91 } else if ((entry->user_page == 0) && (access_type.user == 1)) {
100 pt_access_status_t can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
101 pte32_t * entry = &pte[PTE32_INDEX(addr)];
103 if (entry->present == 0) {
104 return PT_ENTRY_NOT_PRESENT;
105 } else if ((entry->writable == 0) && (access_type.write == 1)) {
106 return PT_WRITE_ERROR;
107 } else if ((entry->user_page == 0) && (access_type.user == 1)) {
109 return PT_USER_ERROR;
118 /* We generate a page table to correspond to a given memory layout
119 * pulling pages from the mem_list when necessary
120 * If there are any gaps in the layout, we add them as unmapped pages
122 pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info) {
123 ullong_t current_page_addr = 0;
125 struct shadow_map * map = &(guest_info->mem_map);
128 pde32_t * pde = os_hooks->allocate_pages(1);
130 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
132 pte32_t * pte = os_hooks->allocate_pages(1);
135 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
136 shadow_region_t * region = get_shadow_region_by_addr(map, current_page_addr);
139 (region->host_type == HOST_REGION_NOTHING) ||
140 (region->host_type == HOST_REGION_UNALLOCATED) ||
141 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
142 (region->host_type == HOST_REGION_REMOTE) ||
143 (region->host_type == HOST_REGION_SWAPPED)) {
146 pte[j].user_page = 0;
147 pte[j].write_through = 0;
148 pte[j].cache_disable = 0;
152 pte[j].global_page = 0;
154 pte[j].page_base_addr = 0;
159 pte[j].user_page = 1;
160 pte[j].write_through = 0;
161 pte[j].cache_disable = 0;
165 pte[j].global_page = 0;
168 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
174 pte[j].page_base_addr = host_addr >> 12;
179 current_page_addr += PAGE_SIZE;
182 if (pte_present == 0) {
183 os_hooks->free_page(pte);
187 pde[i].user_page = 0;
188 pde[i].write_through = 0;
189 pde[i].cache_disable = 0;
192 pde[i].large_page = 0;
193 pde[i].global_page = 0;
195 pde[i].pt_base_addr = 0;
199 pde[i].user_page = 1;
200 pde[i].write_through = 0;
201 pde[i].cache_disable = 0;
204 pde[i].large_page = 0;
205 pde[i].global_page = 0;
207 pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(pte);
220 void PrintPDE32(addr_t virtual_address, pde32_t * pde)
222 PrintDebug("PDE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
224 (void *) (pde->pt_base_addr << PAGE_POWER),
237 void PrintPTE32(addr_t virtual_address, pte32_t * pte)
239 PrintDebug("PTE %p -> %p : present=%x, writable=%x, user=%x, wt=%x, cd=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
241 (void*)(pte->page_base_addr << PAGE_POWER),
256 void PrintPD32(pde32_t * pde)
260 PrintDebug("Page Directory at %p:\n", pde);
261 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
262 if ( pde[i].present) {
263 PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
268 void PrintPT32(addr_t starting_address, pte32_t * pte)
272 PrintDebug("Page Table at %p:\n", pte);
273 for (i = 0; (i < MAX_PTE32_ENTRIES) ; i++) {
274 if (pte[i].present) {
275 PrintPTE32(starting_address + (PAGE_SIZE * i), &(pte[i]));
284 void PrintDebugPageTables(pde32_t * pde)
288 PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
290 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
291 if (pde[i].present) {
292 PrintPDE32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
293 PrintPT32((addr_t)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER));