2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 static inline int activate_shadow_pt_32(struct guest_info * core) {
24 struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(core->ctrl_regs.cr3);
25 struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(core->shdw_pg_state.guest_cr3);
26 struct shadow_page_data * shdw_page = create_new_shadow_pt(core);
28 shdw_page->cr3 = shdw_page->page_pa;
30 shadow_cr3->pdt_base_addr = PAGE_BASE_ADDR_4KB(shdw_page->page_pa);
31 PrintDebug( "Created new shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pdt_base_addr));
33 shadow_cr3->pwt = guest_cr3->pwt;
34 shadow_cr3->pcd = guest_cr3->pcd;
45 * * 32 bit Page table fault handlers
49 static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
50 pt_access_status_t shadow_pde_access, pde32_4MB_t * large_shadow_pde,
51 pde32_4MB_t * large_guest_pde);
52 static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
53 pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde);
55 static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
56 pte32_t * shadow_pt, pte32_t * guest_pt);
59 static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
60 pde32_t * guest_pd = NULL;
61 pde32_t * shadow_pd = CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
62 addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
63 pt_access_status_t guest_pde_access;
64 pt_access_status_t shadow_pde_access;
65 pde32_t * guest_pde = NULL;
66 pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]);
68 PrintDebug("Shadow page fault handler: %p\n", (void*) fault_addr );
69 PrintDebug("Handling PDE32 Fault\n");
71 if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
72 PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
76 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
79 // Check the guest page permissions
80 guest_pde_access = v3_can_access_pde32(guest_pd, fault_addr, error_code);
82 // Check the shadow page permissions
83 shadow_pde_access = v3_can_access_pde32(shadow_pd, fault_addr, error_code);
85 /* Was the page fault caused by the Guest's page tables? */
86 if (v3_is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
87 PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (shdw access error=%d) (pf error code=%d)\n",
88 *(uint_t *)&guest_pde_access, *(uint_t *)&shadow_pde_access, *(uint_t *)&error_code);
89 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
90 PrintError("Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
98 if (shadow_pde_access == PT_ACCESS_USER_ERROR) {
100 // PDE Entry marked non user
102 PrintDebug("Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n",
103 shadow_pde_access, guest_pde_access);
105 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
106 PrintError("Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
110 } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) &&
111 (guest_pde->large_page == 1)) {
113 ((pde32_4MB_t *)guest_pde)->dirty = 1;
114 shadow_pde->writable = guest_pde->writable;
116 } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
117 (shadow_pde_access != PT_ACCESS_OK)) {
118 // inject page fault in guest
119 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
120 PrintError("Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
123 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
124 PrintDebug("Manual Says to inject page fault into guest\n");
129 pte32_t * shadow_pt = NULL;
130 pte32_t * guest_pt = NULL;
132 // Get the next shadow page level, allocate if not present
134 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
136 if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) {
137 // Check underlying physical memory map to see if a large page is viable
138 addr_t guest_pa = BASE_TO_PAGE_ADDR_4MB(((pde32_4MB_t *)guest_pde)->page_base_addr);
139 uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED);
141 if (page_size == PAGE_SIZE_4MB) {
142 PrintDebug("using large page for fault_addr %p (gpa=%p)\n", (void *)fault_addr, (void *)guest_pa);
143 if (handle_4MB_shadow_pagefault_pde_32(info, fault_addr, error_code, shadow_pde_access,
144 (pde32_4MB_t *)shadow_pde, (pde32_4MB_t *)guest_pde) == -1) {
145 PrintError("Error handling large pagefault with large page\n");
153 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
154 shadow_pt = (pte32_t *)V3_VAddr((void *)shdw_page->page_pa);
156 shadow_pde->present = 1;
157 shadow_pde->user_page = guest_pde->user_page;
160 if (guest_pde->large_page == 0) {
161 shadow_pde->writable = guest_pde->writable;
163 // This large page flag is temporary until we can get a working cache....
164 ((pde32_4MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
166 if (error_code.write) {
167 shadow_pde->writable = guest_pde->writable;
168 ((pde32_4MB_t *)guest_pde)->dirty = 1;
170 shadow_pde->writable = 0;
171 ((pde32_4MB_t *)guest_pde)->dirty = 0;
175 // VMM Specific options
176 shadow_pde->write_through = guest_pde->write_through;
177 shadow_pde->cache_disable = guest_pde->cache_disable;
178 shadow_pde->global_page = guest_pde->global_page;
181 guest_pde->accessed = 1;
183 shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
185 shadow_pt = (pte32_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
189 if (guest_pde->large_page == 0) {
190 if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
191 // Machine check the guest
192 PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
193 v3_raise_exception(info, MC_EXCEPTION);
197 if (handle_pte_shadow_pagefault_32(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
198 PrintError("Error handling Page fault caused by PTE\n");
202 if (handle_4MB_shadow_pagefault_pte_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) {
203 PrintError("Error handling large pagefault\n");
212 static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
213 pte32_t * shadow_pt, pte32_t * guest_pt) {
215 pt_access_status_t guest_pte_access;
216 pt_access_status_t shadow_pte_access;
217 pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
218 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
219 addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
221 struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
223 if (shdw_reg == NULL) {
224 // Inject a machine check in the guest
225 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
226 v3_raise_exception(info, MC_EXCEPTION);
230 // Check the guest page permissions
231 guest_pte_access = v3_can_access_pte32(guest_pt, fault_addr, error_code);
233 // Check the shadow page permissions
234 shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
237 /* Was the page fault caused by the Guest's page tables? */
238 if (v3_is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
240 PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n",
241 guest_pte_access, *(uint_t*)&error_code);
245 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
246 PrintError("Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
255 if (shadow_pte_access == PT_ACCESS_OK) {
256 // Inconsistent state...
257 // Guest Re-Entry will flush page tables and everything should now work
258 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
263 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
264 // Page Table Entry Not Present
265 PrintDebug("guest_pa =%p\n", (void *)guest_pa);
267 if ((shdw_reg->flags.alloced == 1) && (shdw_reg->flags.read == 1)) {
268 addr_t shadow_pa = 0;
270 if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) {
271 PrintError("could not translate page fault address (%p)\n", (void *)guest_pa);
275 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
277 PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
279 shadow_pte->present = guest_pte->present;
280 shadow_pte->user_page = guest_pte->user_page;
282 //set according to VMM policy
283 shadow_pte->write_through = guest_pte->write_through;
284 shadow_pte->cache_disable = guest_pte->cache_disable;
285 shadow_pte->global_page = guest_pte->global_page;
288 guest_pte->accessed = 1;
290 if (guest_pte->dirty == 1) {
291 shadow_pte->writable = guest_pte->writable;
292 } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
293 shadow_pte->writable = guest_pte->writable;
294 guest_pte->dirty = 1;
295 } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
296 shadow_pte->writable = 0;
300 if (shdw_reg->flags.write == 0) {
301 shadow_pte->writable = 0;
305 // Page fault on unhandled memory region
307 if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
308 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
312 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
313 guest_pte->dirty = 1;
315 if (shdw_reg->flags.write == 1) {
316 PrintDebug("Shadow PTE Write Error\n");
317 shadow_pte->writable = guest_pte->writable;
319 if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
320 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
329 // Inject page fault into the guest
330 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
331 PrintError("Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
335 PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
336 PrintError("Manual Says to inject page fault into guest\n");
343 // Handle a 4MB page fault with small pages in the PTE
344 static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * info,
345 addr_t fault_addr, pf_error_t error_code,
346 pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde)
348 pt_access_status_t shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
349 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
350 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);
353 PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
354 PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
356 struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
359 if (shdw_reg == NULL) {
360 // Inject a machine check in the guest
361 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
362 v3_raise_exception(info, MC_EXCEPTION);
366 if (shadow_pte_access == PT_ACCESS_OK) {
367 // Inconsistent state...
368 // Guest Re-Entry will flush tables and everything should now workd
369 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
374 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
375 // Get the guest physical address of the fault
377 if ((shdw_reg->flags.alloced == 1) &&
378 (shdw_reg->flags.read == 1)) {
379 addr_t shadow_pa = 0;
382 if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
383 PrintError("could not translate page fault address (%p)\n", (void *)guest_fault_pa);
387 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
389 PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
391 shadow_pte->present = 1;
393 /* We are assuming that the PDE entry has precedence
394 * so the Shadow PDE will mirror the guest PDE settings,
395 * and we don't have to worry about them here
398 shadow_pte->user_page = 1;
400 //set according to VMM policy
401 shadow_pte->write_through = large_guest_pde->write_through;
402 shadow_pte->cache_disable = large_guest_pde->cache_disable;
403 shadow_pte->global_page = large_guest_pde->global_page;
407 if (shdw_reg->flags.write == 0) {
408 shadow_pte->writable = 0;
410 shadow_pte->writable = 1;
414 if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
415 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
419 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
421 if (shdw_reg->flags.write == 0) {
422 if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
423 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
429 PrintError("Error in large page fault handler...\n");
430 PrintError("This case should have been handled at the top level handler\n");
434 PrintDebug("Returning from large page->small page fault handler\n");
439 // Handle a 4MB page fault with a 4MB page in the PDE
440 static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info,
441 addr_t fault_addr, pf_error_t error_code,
442 pt_access_status_t shadow_pde_access,
443 pde32_4MB_t * large_shadow_pde, pde32_4MB_t * large_guest_pde)
445 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);
448 PrintDebug("Handling 4MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
449 PrintDebug("LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
451 struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
454 if (shdw_reg == NULL) {
455 // Inject a machine check in the guest
456 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
457 v3_raise_exception(info, MC_EXCEPTION);
461 if (shadow_pde_access == PT_ACCESS_OK) {
462 // Inconsistent state...
463 // Guest Re-Entry will flush tables and everything should now workd
464 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
469 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
470 // Get the guest physical address of the fault
472 if ((shdw_reg->flags.alloced == 1) &&
473 (shdw_reg->flags.read == 1)) {
474 addr_t shadow_pa = 0;
477 if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
478 PrintError("could not translate page fault address (%p)\n", (void *)guest_fault_pa);
482 PrintDebug("shadow PA = %p\n", (void *)shadow_pa);
485 large_guest_pde->vmm_info = V3_LARGE_PG; /* For invalidations */
486 large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_4MB(shadow_pa);
487 large_shadow_pde->large_page = 1;
488 large_shadow_pde->present = 1;
489 large_shadow_pde->user_page = 1;
491 PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR_4MB(large_shadow_pde->page_base_addr));
493 if (shdw_reg->flags.write == 0) {
494 large_shadow_pde->writable = 0;
496 large_shadow_pde->writable = 1;
499 //set according to VMM policy
500 large_shadow_pde->write_through = large_guest_pde->write_through;
501 large_shadow_pde->cache_disable = large_guest_pde->cache_disable;
502 large_shadow_pde->global_page = large_guest_pde->global_page;
506 if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
507 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
511 } else if (shadow_pde_access == PT_ACCESS_WRITE_ERROR) {
513 if (shdw_reg->flags.write == 0) {
514 if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
515 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
521 PrintError("Error in large page fault handler...\n");
522 PrintError("This case should have been handled at the top level handler\n");
526 PrintDebug("Returning from large page->large page fault handler\n");
530 /* If we start to optimize we should look up the guest pages in the cache... */
531 static inline int handle_shadow_invlpg_32(struct guest_info * info, addr_t vaddr) {
532 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
533 pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(vaddr)];
535 addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
536 pde32_t * guest_pd = NULL;
539 if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
540 PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
544 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(vaddr)]);
546 if (guest_pde->large_page == 1) {
547 shadow_pde->present = 0;
548 PrintDebug("Invalidating Large Page\n");
549 } else if (shadow_pde->present == 1) {
550 pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR_4KB(shadow_pde->pt_base_addr);
551 pte32_t * shadow_pte = (pte32_t *) V3_VAddr( (void*) &shadow_pt[PTE32_INDEX(vaddr)] );
553 PrintDebug("Setting not present\n");
555 shadow_pte->present = 0;