2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 static inline int activate_shadow_pt_32(struct guest_info * info) {
24 struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3);
25 struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
26 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
28 shdw_page->cr3 = shdw_page->page_pa;
30 shadow_cr3->pdt_base_addr = PAGE_BASE_ADDR_4KB(shdw_page->page_pa);
31 PrintDebug( "Created new shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pdt_base_addr));
33 shadow_cr3->pwt = guest_cr3->pwt;
34 shadow_cr3->pcd = guest_cr3->pcd;
36 #ifdef CONFIG_SYMBIOTIC_SWAP
49 * * 32 bit Page table fault handlers
53 static int handle_4MB_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
54 pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde);
56 static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
57 pte32_t * shadow_pt, pte32_t * guest_pt);
60 static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
61 pde32_t * guest_pd = NULL;
62 pde32_t * shadow_pd = CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
63 addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
64 pt_access_status_t guest_pde_access;
65 pt_access_status_t shadow_pde_access;
66 pde32_t * guest_pde = NULL;
67 pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]);
69 PrintDebug("Shadow page fault handler: %p\n", (void*) fault_addr );
70 PrintDebug("Handling PDE32 Fault\n");
72 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
73 PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
77 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
80 // Check the guest page permissions
81 guest_pde_access = v3_can_access_pde32(guest_pd, fault_addr, error_code);
83 // Check the shadow page permissions
84 shadow_pde_access = v3_can_access_pde32(shadow_pd, fault_addr, error_code);
86 /* Was the page fault caused by the Guest's page tables? */
87 if (is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
88 PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (shdw access error=%d) (pf error code=%d)\n",
89 *(uint_t *)&guest_pde_access, *(uint_t *)&shadow_pde_access, *(uint_t *)&error_code);
90 if (inject_guest_pf(info, fault_addr, error_code) == -1) {
91 PrintError("Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
99 if (shadow_pde_access == PT_ACCESS_USER_ERROR) {
101 // PDE Entry marked non user
103 PrintDebug("Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n",
104 shadow_pde_access, guest_pde_access);
106 if (inject_guest_pf(info, fault_addr, error_code) == -1) {
107 PrintError("Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
111 } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) &&
112 (guest_pde->large_page == 1)) {
114 ((pde32_4MB_t *)guest_pde)->dirty = 1;
115 shadow_pde->writable = guest_pde->writable;
117 } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
118 (shadow_pde_access != PT_ACCESS_OK)) {
119 // inject page fault in guest
120 if (inject_guest_pf(info, fault_addr, error_code) == -1) {
121 PrintError("Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
124 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
125 PrintDebug("Manual Says to inject page fault into guest\n");
130 pte32_t * shadow_pt = NULL;
131 pte32_t * guest_pt = NULL;
133 // Get the next shadow page level, allocate if not present
135 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
136 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
137 shadow_pt = (pte32_t *)V3_VAddr((void *)shdw_page->page_pa);
139 shadow_pde->present = 1;
140 shadow_pde->user_page = guest_pde->user_page;
143 if (guest_pde->large_page == 0) {
144 shadow_pde->writable = guest_pde->writable;
146 // This large page flag is temporary until we can get a working cache....
147 ((pde32_4MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
149 if (error_code.write) {
150 shadow_pde->writable = guest_pde->writable;
151 ((pde32_4MB_t *)guest_pde)->dirty = 1;
153 shadow_pde->writable = 0;
154 ((pde32_4MB_t *)guest_pde)->dirty = 0;
159 // VMM Specific options
160 shadow_pde->write_through = guest_pde->write_through;
161 shadow_pde->cache_disable = guest_pde->cache_disable;
162 shadow_pde->global_page = guest_pde->global_page;
165 guest_pde->accessed = 1;
170 shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
172 shadow_pt = (pte32_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
177 if (guest_pde->large_page == 0) {
178 if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
179 // Machine check the guest
180 PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
181 v3_raise_exception(info, MC_EXCEPTION);
185 if (handle_pte_shadow_pagefault_32(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
186 PrintError("Error handling Page fault caused by PTE\n");
190 if (handle_4MB_shadow_pagefault_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) {
191 PrintError("Error handling large pagefault\n");
200 static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
201 pte32_t * shadow_pt, pte32_t * guest_pt) {
203 pt_access_status_t guest_pte_access;
204 pt_access_status_t shadow_pte_access;
205 pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
206 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
207 addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
209 struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_pa);
211 if (shdw_reg == NULL) {
212 // Inject a machine check in the guest
213 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
214 v3_raise_exception(info, MC_EXCEPTION);
218 // Check the guest page permissions
219 guest_pte_access = v3_can_access_pte32(guest_pt, fault_addr, error_code);
221 // Check the shadow page permissions
222 shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
225 /* Was the page fault caused by the Guest's page tables? */
226 if (is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
228 PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n",
229 guest_pte_access, *(uint_t*)&error_code);
231 #ifdef CONFIG_SYMBIOTIC_SWAP
232 if (is_swapped_pte32(guest_pte)) {
233 pf_error_t swap_perms;
235 #ifdef CONFIG_SYMBIOTIC_SWAP_TELEMETRY
236 if (error_code.write == 0) {
237 info->swap_state.read_faults++;
239 info->swap_state.write_faults++;
243 if (v3_get_vaddr_perms(info, fault_addr, guest_pte, &swap_perms) == -1) {
244 PrintError("Error getting Swapped page permissions\n");
249 // swap_perms.write == 1 || error_code.write == 0
250 // swap_perms.user == 0 || error_code.user == 1
252 // This checks for permissions violations that require a guest PF injection
253 if ( (swap_perms.present == 1) &&
254 ( (swap_perms.write == 1) ||
255 (error_code.write == 0) ) &&
256 ( (swap_perms.user == 1) ||
257 (error_code.user == 0) ) ) {
258 addr_t swp_pg_addr = 0;
260 PrintDebug("Page fault on swapped out page (vaddr=%p) (pte=%x) (error_code=%x)\n",
261 (void *)fault_addr, *(uint32_t *)guest_pte, *(uint32_t *)&error_code);
263 swp_pg_addr = v3_get_swapped_pg_addr(info, shadow_pte, guest_pte);
265 PrintDebug("Swapped page address=%p\n", (void *)swp_pg_addr);
267 if (swp_pg_addr != 0) {
268 shadow_pte->writable = swap_perms.write;
269 shadow_pte->user_page = swap_perms.user;
271 shadow_pte->write_through = 0;
272 shadow_pte->cache_disable = 0;
273 shadow_pte->global_page = 0;
275 shadow_pte->present = 1;
277 shadow_pte->page_base_addr = swp_pg_addr;
279 #ifdef CONFIG_SYMBIOTIC_SWAP_TELEMETRY
280 info->swap_state.mapped_pages++;
286 PrintDebug("Not a sym swappable page\n");
292 if (inject_guest_pf(info, fault_addr, error_code) == -1) {
293 PrintError("Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
302 if (shadow_pte_access == PT_ACCESS_OK) {
303 // Inconsistent state...
304 // Guest Re-Entry will flush page tables and everything should now work
305 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
310 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
311 // Page Table Entry Not Present
312 PrintDebug("guest_pa =%p\n", (void *)guest_pa);
314 if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
315 (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
316 addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_pa);
318 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
320 PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
322 shadow_pte->present = guest_pte->present;
323 shadow_pte->user_page = guest_pte->user_page;
325 //set according to VMM policy
326 shadow_pte->write_through = guest_pte->write_through;
327 shadow_pte->cache_disable = guest_pte->cache_disable;
328 shadow_pte->global_page = guest_pte->global_page;
331 guest_pte->accessed = 1;
333 if (guest_pte->dirty == 1) {
334 shadow_pte->writable = guest_pte->writable;
335 } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
336 shadow_pte->writable = guest_pte->writable;
337 guest_pte->dirty = 1;
338 } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
339 shadow_pte->writable = 0;
344 // Write hooks trump all, and are set Read Only
345 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
346 shadow_pte->writable = 0;
350 // Page fault handled by hook functions
352 if (v3_handle_mem_full_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
353 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
357 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
358 guest_pte->dirty = 1;
360 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
361 if (v3_handle_mem_wr_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
362 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
366 PrintDebug("Shadow PTE Write Error\n");
367 shadow_pte->writable = guest_pte->writable;
374 // Inject page fault into the guest
375 if (inject_guest_pf(info, fault_addr, error_code) == -1) {
376 PrintError("Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
380 PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
381 PrintError("Manual Says to inject page fault into guest\n");
390 static int handle_4MB_shadow_pagefault_32(struct guest_info * info,
391 addr_t fault_addr, pf_error_t error_code,
392 pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde)
394 pt_access_status_t shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
395 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
396 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);
399 PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
400 PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
402 struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
405 if (shdw_reg == NULL) {
406 // Inject a machine check in the guest
407 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
408 v3_raise_exception(info, MC_EXCEPTION);
412 if (shadow_pte_access == PT_ACCESS_OK) {
413 // Inconsistent state...
414 // Guest Re-Entry will flush tables and everything should now workd
415 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
420 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
421 // Get the guest physical address of the fault
423 if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
424 (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
425 addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_fault_pa);
427 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
429 PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
431 shadow_pte->present = 1;
433 /* We are assuming that the PDE entry has precedence
434 * so the Shadow PDE will mirror the guest PDE settings,
435 * and we don't have to worry about them here
438 shadow_pte->user_page = 1;
440 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
441 shadow_pte->writable = 0;
443 shadow_pte->writable = 1;
446 //set according to VMM policy
447 shadow_pte->write_through = large_guest_pde->write_through;
448 shadow_pte->cache_disable = large_guest_pde->cache_disable;
449 shadow_pte->global_page = large_guest_pde->global_page;
453 if (v3_handle_mem_full_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
454 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
458 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
460 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
462 if (v3_handle_mem_wr_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
463 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
469 PrintError("Error in large page fault handler...\n");
470 PrintError("This case should have been handled at the top level handler\n");
474 PrintDebug("Returning from large page fault handler\n");
489 /* If we start to optimize we should look up the guest pages in the cache... */
490 static inline int handle_shadow_invlpg_32(struct guest_info * info, addr_t vaddr) {
491 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
492 pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(vaddr)];
494 addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
495 pde32_t * guest_pd = NULL;
498 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
499 PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
503 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(vaddr)]);
505 if (guest_pde->large_page == 1) {
506 shadow_pde->present = 0;
507 PrintDebug("Invalidating Large Page\n");
508 } else if (shadow_pde->present == 1) {
509 pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR_4KB(shadow_pde->pt_base_addr);
510 pte32_t * shadow_pte = (pte32_t *) V3_VAddr( (void*) &shadow_pt[PTE32_INDEX(vaddr)] );
512 PrintDebug("Setting not present\n");
514 shadow_pte->present = 0;