2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 static inline int activate_shadow_pt_64(struct guest_info * info) {
22 struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
23 struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
24 struct shadow_page_data * shadow_pt = create_new_shadow_pt(info);
25 addr_t shadow_pt_addr = shadow_pt->page_pa;
27 // Because this is a new CR3 load the allocated page is the new CR3 value
28 shadow_pt->cr3 = shadow_pt->page_pa;
30 PrintDebug("Top level Shadow page pa=%p\n", (void *)shadow_pt_addr);
32 shadow_cr3->pml4t_base_addr = PAGE_BASE_ADDR_4KB(shadow_pt_addr);
33 PrintDebug("Creating new 64 bit shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pml4t_base_addr));
36 shadow_cr3->pwt = guest_cr3->pwt;
37 shadow_cr3->pcd = guest_cr3->pcd;
50 * * 64 bit Page table fault handlers
55 static int handle_2MB_shadow_pagefault_pde_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
56 pt_access_status_t shadow_pde_access, pde64_2MB_t * shadow_pt,
57 pde64_2MB_t * large_guest_pde);
58 static int handle_2MB_shadow_pagefault_pte_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
59 pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde);
61 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
62 pte64_t * shadow_pt, pte64_t * guest_pt);
64 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
65 pde64_t * shadow_pd, pde64_t * guest_pd);
67 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
68 pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp);
71 static inline int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
72 pml4e64_t * guest_pml = NULL;
73 pml4e64_t * shadow_pml = CR3_TO_PML4E64_VA(info->ctrl_regs.cr3);
74 addr_t guest_cr3 = CR3_TO_PML4E64_PA(info->shdw_pg_state.guest_cr3);
75 pt_access_status_t guest_pml4e_access;
76 pt_access_status_t shadow_pml4e_access;
77 pml4e64_t * guest_pml4e = NULL;
78 pml4e64_t * shadow_pml4e = (pml4e64_t *)&(shadow_pml[PML4E64_INDEX(fault_addr)]);
80 PrintDebug("64 bit Shadow page fault handler: %p\n", (void *)fault_addr);
81 PrintDebug("Handling PML fault\n");
83 if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pml) == -1) {
84 PrintError("Invalid Guest PML4E Address: 0x%p\n", (void *)guest_cr3);
88 guest_pml4e = (pml4e64_t *)&(guest_pml[PML4E64_INDEX(fault_addr)]);
90 PrintDebug("Checking Guest %p\n", (void *)guest_pml);
91 // Check the guest page permissions
92 guest_pml4e_access = v3_can_access_pml4e64(guest_pml, fault_addr, error_code);
94 PrintDebug("Checking shadow %p\n", (void *)shadow_pml);
95 // Check the shadow page permissions
96 shadow_pml4e_access = v3_can_access_pml4e64(shadow_pml, fault_addr, error_code);
98 /* Was the page fault caused by the Guest's page tables? */
99 if (v3_is_guest_pf(guest_pml4e_access, shadow_pml4e_access) == 1) {
100 PrintDebug("Injecting PML4E pf to guest: (guest access error=%d) (pf error code=%d)\n",
101 *(uint_t *)&guest_pml4e_access, *(uint_t *)&error_code);
102 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
103 PrintError("Could not inject guest page fault\n");
109 if (shadow_pml4e_access == PT_ACCESS_USER_ERROR) {
111 // PML4 Entry marked non-user
113 PrintDebug("Shadow Paging User access error (shadow_pml4e_access=0x%x, guest_pml4e_access=0x%x)\n",
114 shadow_pml4e_access, guest_pml4e_access);
115 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
116 PrintError("Could not inject guest page fault\n");
120 } else if ((shadow_pml4e_access != PT_ACCESS_NOT_PRESENT) &&
121 (shadow_pml4e_access != PT_ACCESS_OK)) {
122 // inject page fault in guest
123 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
124 PrintError("Could not inject guest page fault\n");
127 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pml4e_access);
128 PrintDebug("Manual Says to inject page fault into guest\n");
133 pdpe64_t * shadow_pdp = NULL;
134 pdpe64_t * guest_pdp = NULL;
136 // Get the next shadow page level, allocate if not present
138 if (shadow_pml4e_access == PT_ACCESS_NOT_PRESENT) {
139 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
140 shadow_pdp = (pdpe64_t *)V3_VAddr((void *)shdw_page->page_pa);
143 shadow_pml4e->present = 1;
144 shadow_pml4e->user_page = guest_pml4e->user_page;
145 shadow_pml4e->writable = guest_pml4e->writable;
146 shadow_pml4e->cache_disable = guest_pml4e->cache_disable;
147 shadow_pml4e->write_through = guest_pml4e->write_through;
149 guest_pml4e->accessed = 1;
151 shadow_pml4e->pdp_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
153 shadow_pdp = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pml4e->pdp_base_addr));
156 // Continue processing at the next level
158 if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
159 // Machine check the guest
160 PrintError("Invalid Guest PDP Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr));
161 v3_raise_exception(info, MC_EXCEPTION);
165 if (handle_pdpe_shadow_pagefault_64(info, fault_addr, error_code, shadow_pdp, guest_pdp) == -1) {
166 PrintError("Error handling Page fault caused by PDPE\n");
175 // For now we are not going to handle 1 Gigabyte pages
176 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
177 pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp) {
178 pt_access_status_t guest_pdpe_access;
179 pt_access_status_t shadow_pdpe_access;
180 pdpe64_t * guest_pdpe = (pdpe64_t *)&(guest_pdp[PDPE64_INDEX(fault_addr)]);
181 pdpe64_t * shadow_pdpe = (pdpe64_t *)&(shadow_pdp[PDPE64_INDEX(fault_addr)]);
183 PrintDebug("Handling PDP fault\n");
186 PrintDebug("Guest Page Tree for guest virtual address zero fault\n");
187 PrintGuestPageTree(info,fault_addr,(addr_t)(info->shdw_pg_state.guest_cr3));
188 PrintDebug("Host Page Tree for guest virtual address zero fault\n");
189 PrintHostPageTree(info,fault_addr,(addr_t)(info->ctrl_regs.cr3));
192 // Check the guest page permissions
193 guest_pdpe_access = v3_can_access_pdpe64(guest_pdp, fault_addr, error_code);
195 // Check the shadow page permissions
196 shadow_pdpe_access = v3_can_access_pdpe64(shadow_pdp, fault_addr, error_code);
198 /* Was the page fault caused by the Guest's page tables? */
199 if (v3_is_guest_pf(guest_pdpe_access, shadow_pdpe_access) == 1) {
200 PrintDebug("Injecting PDPE pf to guest: (guest access error=%d) (pf error code=%d)\n",
201 *(uint_t *)&guest_pdpe_access, *(uint_t *)&error_code);
202 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
203 PrintError("Could not inject guest page fault\n");
209 if (shadow_pdpe_access == PT_ACCESS_USER_ERROR) {
211 // PML4 Entry marked non-user
213 PrintDebug("Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n",
214 shadow_pdpe_access, guest_pdpe_access);
215 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
216 PrintError("Could not inject guest page fault\n");
220 } else if ((shadow_pdpe_access != PT_ACCESS_NOT_PRESENT) &&
221 (shadow_pdpe_access != PT_ACCESS_OK)) {
222 // inject page fault in guest
223 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
224 PrintError("Could not inject guest page fault\n");
227 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pdpe_access);
228 PrintDebug("Manual Says to inject page fault into guest\n");
233 pde64_t * shadow_pd = NULL;
234 pde64_t * guest_pd = NULL;
236 // Get the next shadow page level, allocate if not present
238 if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
239 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
240 shadow_pd = (pde64_t *)V3_VAddr((void *)shdw_page->page_pa);
243 shadow_pdpe->present = 1;
244 shadow_pdpe->user_page = guest_pdpe->user_page;
245 shadow_pdpe->writable = guest_pdpe->writable;
246 shadow_pdpe->write_through = guest_pdpe->write_through;
247 shadow_pdpe->cache_disable = guest_pdpe->cache_disable;
250 guest_pdpe->accessed = 1;
252 shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
254 shadow_pd = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
257 // Continue processing at the next level
259 if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
260 // Machine check the guest
261 PrintError("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr));
262 v3_raise_exception(info, MC_EXCEPTION);
266 if (handle_pde_shadow_pagefault_64(info, fault_addr, error_code, shadow_pd, guest_pd) == -1) {
267 PrintError("Error handling Page fault caused by PDE\n");
274 // For an address on a page of size page_size, compute the actual alignment
275 // of the physical page it maps to
276 int compute_physical_alignment(addr_t va, addr_t pa, uint32_t page_size)
278 addr_t va_offset, pa_base;
281 va_offset = PAGE_OFFSET_1GB(va);
284 va_offset = PAGE_OFFSET_4MB(va);
287 va_offset = PAGE_OFFSET_2MB(va);
292 PrintError("Invalid page size in %s.\n", __FUNCTION__);
296 pa_base = pa - va_offset;
298 if (PAGE_OFFSET_1GB(pa_base) == 0) {
299 return PAGE_SIZE_1GB;
300 } else if (PAGE_OFFSET_4MB(pa_base) == 0) {
301 return PAGE_SIZE_4MB;
302 } else if (PAGE_OFFSET_2MB(pa_base) == 0) {
303 return PAGE_SIZE_2MB;
304 } else if (PAGE_OFFSET_4KB(pa_base) == 0) {
305 return PAGE_SIZE_4KB;
307 PrintError("Incorrection alignment setup or calculation in %s.\n", __FUNCTION__);
312 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
313 pde64_t * shadow_pd, pde64_t * guest_pd) {
314 pt_access_status_t guest_pde_access;
315 pt_access_status_t shadow_pde_access;
316 pde64_t * guest_pde = (pde64_t *)&(guest_pd[PDE64_INDEX(fault_addr)]);
317 pde64_t * shadow_pde = (pde64_t *)&(shadow_pd[PDE64_INDEX(fault_addr)]);
319 PrintDebug("Handling PDE fault\n");
321 // Check the guest page permissions
322 guest_pde_access = v3_can_access_pde64(guest_pd, fault_addr, error_code);
324 // Check the shadow page permissions
325 shadow_pde_access = v3_can_access_pde64(shadow_pd, fault_addr, error_code);
327 /* Was the page fault caused by the Guest's page tables? */
328 if (v3_is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
329 PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n",
330 *(uint_t *)&guest_pde_access, *(uint_t *)&error_code);
331 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
332 PrintError("Could not inject guest page fault\n");
338 if (shadow_pde_access == PT_ACCESS_USER_ERROR) {
340 // PDE Entry marked non-user
342 PrintDebug("Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n",
343 shadow_pde_access, guest_pde_access);
344 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
345 PrintError("Could not inject guest page fault\n");
350 } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) &&
351 (guest_pde->large_page == 1)) {
353 ((pde64_2MB_t *)guest_pde)->dirty = 1;
354 shadow_pde->writable = guest_pde->writable;
356 //PrintDebug("Returning due to large page Write Error\n");
357 //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
360 } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
361 (shadow_pde_access != PT_ACCESS_OK)) {
362 // inject page fault in guest
363 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
364 PrintError("Could not inject guest page fault\n");
367 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
368 PrintDebug("Manual Says to inject page fault into guest\n");
372 pte64_t * shadow_pt = NULL;
373 pte64_t * guest_pt = NULL;
375 // get the next shadow page level, allocate if not present
376 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
377 // Check if we can use large pages and the guest memory is properly aligned
378 // to potentially use a large page
379 if (info->use_large_pages && guest_pde->large_page) {
380 // Check underlying physical memory map to see if a large page is viable
381 addr_t guest_pa = BASE_TO_PAGE_ADDR_2MB(((pde64_2MB_t *)guest_pde)->page_base_addr);
382 if ((compute_physical_alignment(fault_addr, guest_pa, PAGE_SIZE_2MB) >= PAGE_SIZE_2MB)
383 && (v3_get_max_page_size(info, guest_pa, PAGE_SIZE_2MB) >= PAGE_SIZE_2MB)) {
384 // should be able to use a large page.
385 if (handle_2MB_shadow_pagefault_pde_64(info, fault_addr, error_code, shadow_pde_access,
386 (pde64_2MB_t *)shadow_pde, (pde64_2MB_t *)guest_pde) == 0) {
389 PrintError("Error handling large pagefault with large page\n");
393 PrintDebug("Alignment or underlying physical memory map doesn't allow use of a large page.\n");
395 // Fallthrough to handle the region with small pages
398 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
399 shadow_pt = (pte64_t *)V3_VAddr((void *)shdw_page->page_pa);
401 PrintDebug("Creating new shadow PT: %p\n", shadow_pt);
403 shadow_pde->present = 1;
404 shadow_pde->user_page = guest_pde->user_page;
407 if (guest_pde->large_page == 0) {
408 shadow_pde->writable = guest_pde->writable;
410 // This large page flag is temporary until we can get a working cache....
411 ((pde64_2MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
413 if (error_code.write) {
414 shadow_pde->writable = guest_pde->writable;
415 ((pde64_2MB_t *)guest_pde)->dirty = 1;
417 shadow_pde->writable = 0;
418 ((pde64_2MB_t *)guest_pde)->dirty = 0;
422 // VMM Specific options
423 shadow_pde->write_through = guest_pde->write_through;
424 shadow_pde->cache_disable = guest_pde->cache_disable;
425 shadow_pde->global_page = guest_pde->global_page;
428 guest_pde->accessed = 1;
430 shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
432 shadow_pt = (pte64_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
435 // Continue processing at the next level
436 if (guest_pde->large_page == 0) {
437 if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
438 // Machine check the guest
439 PrintError("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
440 v3_raise_exception(info, MC_EXCEPTION);
444 if (handle_pte_shadow_pagefault_64(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
445 PrintError("Error handling Page fault caused by PDE\n");
449 if (handle_2MB_shadow_pagefault_pte_64(info, fault_addr, error_code, shadow_pt, (pde64_2MB_t *)guest_pde) == -1) {
450 PrintError("Error handling large pagefault with small page\n");
459 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
460 pte64_t * shadow_pt, pte64_t * guest_pt) {
461 pt_access_status_t guest_pte_access;
462 pt_access_status_t shadow_pte_access;
463 pte64_t * guest_pte = (pte64_t *)&(guest_pt[PTE64_INDEX(fault_addr)]);;
464 pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
465 addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
466 // struct shadow_page_state * state = &(info->shdw_pg_state);
468 PrintDebug("Handling PTE fault\n");
470 struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
474 if (shdw_reg == NULL) {
475 // Inject a machine check in the guest
476 PrintError("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
477 v3_raise_exception(info, MC_EXCEPTION);
481 // Check the guest page permissions
482 guest_pte_access = v3_can_access_pte64(guest_pt, fault_addr, error_code);
484 // Check the shadow page permissions
485 shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
487 /* Was the page fault caused by the Guest's page tables? */
488 if (v3_is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
490 PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n",
491 guest_pte_access, *(uint_t*)&error_code);
493 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
494 PrintError("Could not inject guest page fault\n");
502 if (shadow_pte_access == PT_ACCESS_OK) {
503 // Inconsistent state...
504 // Guest Re-Entry will flush page tables and everything should now work
505 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
510 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
511 // Page Table Entry Not Present
512 PrintDebug("guest_pa =%p\n", (void *)guest_pa);
514 if ((shdw_reg->flags.alloced == 1) ||
515 (shdw_reg->flags.read == 1)) {
516 addr_t shadow_pa = 0;
518 if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) {
519 PrintError("could not translate page fault address (%p)\n", (void *)guest_pa);
523 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
525 shadow_pte->present = guest_pte->present;
526 shadow_pte->user_page = guest_pte->user_page;
528 //set according to VMM policy
529 shadow_pte->write_through = guest_pte->write_through;
530 shadow_pte->cache_disable = guest_pte->cache_disable;
531 shadow_pte->global_page = guest_pte->global_page;
534 guest_pte->accessed = 1;
536 if (guest_pte->dirty == 1) {
537 shadow_pte->writable = guest_pte->writable;
538 } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
539 shadow_pte->writable = guest_pte->writable;
540 guest_pte->dirty = 1;
541 } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
542 shadow_pte->writable = 0;
546 // Write hooks trump all, and are set Read Only
547 if (shdw_reg->flags.write == 0) {
548 shadow_pte->writable = 0;
552 // Pass to unhandled call back
553 if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
554 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
558 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
559 guest_pte->dirty = 1;
562 if (shdw_reg->flags.write == 1) {
563 PrintDebug("Shadow PTE Write Error\n");
564 shadow_pte->writable = guest_pte->writable;
566 if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
567 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
577 // Inject page fault into the guest
578 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
579 PrintError("Could not inject guest page fault\n");
582 PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
583 PrintError("Manual Says to inject page fault into guest\n");
591 static int handle_2MB_shadow_pagefault_pde_64(struct guest_info * info,
592 addr_t fault_addr, pf_error_t error_code,
593 pt_access_status_t shadow_pde_access,
594 pde64_2MB_t * large_shadow_pde, pde64_2MB_t * large_guest_pde)
596 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
597 // struct shadow_page_state * state = &(info->shdw_pg_state);
599 PrintDebug("Handling 2MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
600 PrintDebug("LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
602 struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
604 if (shdw_reg == NULL) {
605 // Inject a machine check in the guest
606 PrintError("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
607 v3_raise_exception(info, MC_EXCEPTION);
611 if (shadow_pde_access == PT_ACCESS_OK) {
612 // Inconsistent state...
613 // Guest Re-Entry will flush tables and everything should now workd
614 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
615 //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
620 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
621 // Get the guest physical address of the fault
623 if ((shdw_reg->flags.alloced == 1) ||
624 (shdw_reg->flags.read == 1)) {
625 addr_t shadow_pa = 0;
627 if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
628 PrintError("could not translate page fault address (%p)\n", (void *)guest_fault_pa);
632 large_guest_pde->vmm_info = V3_LARGE_PG; /* For invalidations */
633 large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_2MB(shadow_pa);
634 large_shadow_pde->large_page = 1;
635 large_shadow_pde->present = 1;
636 large_shadow_pde->user_page = 1;
638 if (shdw_reg->flags.write == 0) {
639 large_shadow_pde->writable = 0;
641 large_shadow_pde->writable = 1;
644 //set according to VMM policy
645 large_shadow_pde->write_through = large_guest_pde->write_through;
646 large_shadow_pde->cache_disable = large_guest_pde->cache_disable;
647 large_shadow_pde->global_page = large_guest_pde->global_page;
651 if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
652 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
656 } else if (shadow_pde_access == PT_ACCESS_WRITE_ERROR) {
657 if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
658 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
662 PrintError("Error in large page fault handler...\n");
663 PrintError("This case should have been handled at the top level handler\n");
667 // PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
668 PrintDebug("Returning from large page->large page fault handler\n");
672 static int handle_2MB_shadow_pagefault_pte_64(struct guest_info * info,
673 addr_t fault_addr, pf_error_t error_code,
674 pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde)
676 pt_access_status_t shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
677 pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
678 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
679 // struct shadow_page_state * state = &(info->shdw_pg_state);
681 PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
682 PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
684 struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
687 if (shdw_reg == NULL) {
688 // Inject a machine check in the guest
689 PrintError("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
690 v3_raise_exception(info, MC_EXCEPTION);
694 if (shadow_pte_access == PT_ACCESS_OK) {
695 // Inconsistent state...
696 // Guest Re-Entry will flush tables and everything should now workd
697 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
698 //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
703 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
704 // Get the guest physical address of the fault
706 if ((shdw_reg->flags.alloced == 1) ||
707 (shdw_reg->flags.read == 1)) {
708 addr_t shadow_pa = 0;
710 if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
711 PrintError("could not translate page fault address (%p)\n", (void *)guest_fault_pa);
715 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
717 shadow_pte->present = 1;
719 /* We are assuming that the PDE entry has precedence
720 * so the Shadow PDE will mirror the guest PDE settings,
721 * and we don't have to worry about them here
724 shadow_pte->user_page = 1;
726 if (shdw_reg->flags.write == 0) {
727 shadow_pte->writable = 0;
729 shadow_pte->writable = 1;
732 //set according to VMM policy
733 shadow_pte->write_through = large_guest_pde->write_through;
734 shadow_pte->cache_disable = large_guest_pde->cache_disable;
735 shadow_pte->global_page = large_guest_pde->global_page;
739 if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
740 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
744 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
745 if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
746 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
750 PrintError("Error in large page fault handler...\n");
751 PrintError("This case should have been handled at the top level handler\n");
755 // PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
756 PrintDebug("Returning from large page->small page fault handler\n");
763 static int invalidation_cb_64(struct guest_info * info, page_type_t type,
764 addr_t vaddr, addr_t page_ptr, addr_t page_pa,
765 void * private_data) {
770 pml4e64_t * pml = (pml4e64_t *)page_ptr;
772 if (pml[PML4E64_INDEX(vaddr)].present == 0) {
779 pdpe64_t * pdp = (pdpe64_t *)page_ptr;
780 pdpe64_t * pdpe = &(pdp[PDPE64_INDEX(vaddr)]);
782 if (pdpe->present == 0) {
786 if (pdpe->vmm_info == V3_LARGE_PG) {
787 PrintError("1 Gigabyte pages not supported\n");
798 pde64_t * pd = (pde64_t *)page_ptr;
799 pde64_t * pde = &(pd[PDE64_INDEX(vaddr)]);
801 if (pde->present == 0) {
805 if (pde->vmm_info == V3_LARGE_PG) {
814 pte64_t * pt = (pte64_t *)page_ptr;
816 pt[PTE64_INDEX(vaddr)].present = 0;
821 PrintError("Invalid Page Type\n");
826 // should not get here
827 PrintError("Should not get here....\n");
832 static inline int handle_shadow_invlpg_64(struct guest_info * info, addr_t vaddr) {
833 PrintDebug("INVLPG64 - %p\n",(void*)vaddr);
835 int ret = v3_drill_host_pt_64(info, info->ctrl_regs.cr3, vaddr, invalidation_cb_64, NULL);
837 PrintError("Page table drill returned error.... \n");
838 PrintHostPageTree(info, vaddr, info->ctrl_regs.cr3);
841 return (ret == -1) ? -1 : 0;