2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 static inline int activate_shadow_pt_64(struct guest_info * info) {
22 struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
23 struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
24 struct shadow_page_data * shadow_pt = create_new_shadow_pt(info);
25 addr_t shadow_pt_addr = shadow_pt->page_pa;
27 // Because this is a new CR3 load the allocated page is the new CR3 value
28 shadow_pt->cr3 = shadow_pt->page_pa;
30 PrintDebug("Top level Shadow page pa=%p\n", (void *)shadow_pt_addr);
32 shadow_cr3->pml4t_base_addr = PAGE_BASE_ADDR_4KB(shadow_pt_addr);
33 PrintDebug("Creating new 64 bit shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pml4t_base_addr));
36 shadow_cr3->pwt = guest_cr3->pwt;
37 shadow_cr3->pcd = guest_cr3->pcd;
50 * * 64 bit Page table fault handlers
55 static int handle_2MB_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
56 pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde, int speculative);
58 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
59 pte64_t * shadow_pt, pte64_t * guest_pt, int speculative);
61 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
62 pde64_t * shadow_pd, pde64_t * guest_pd, int speculative);
64 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
65 pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp, int speculative);
68 static inline int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, int speculative) {
69 pml4e64_t * guest_pml = NULL;
70 pml4e64_t * shadow_pml = CR3_TO_PML4E64_VA(info->ctrl_regs.cr3);
71 addr_t guest_cr3 = CR3_TO_PML4E64_PA(info->shdw_pg_state.guest_cr3);
72 pt_access_status_t guest_pml4e_access;
73 pt_access_status_t shadow_pml4e_access;
74 pml4e64_t * guest_pml4e = NULL;
75 pml4e64_t * shadow_pml4e = (pml4e64_t *)&(shadow_pml[PML4E64_INDEX(fault_addr)]);
77 PrintDebug("64 bit Shadow page fault handler: %p\n", (void *)fault_addr);
78 PrintDebug("Handling PML fault\n");
80 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pml) == -1) {
81 PrintError("Invalid Guest PML4E Address: 0x%p\n", (void *)guest_cr3);
85 guest_pml4e = (pml4e64_t *)&(guest_pml[PML4E64_INDEX(fault_addr)]);
87 PrintDebug("Checking Guest %p\n", (void *)guest_pml);
88 // Check the guest page permissions
89 guest_pml4e_access = v3_can_access_pml4e64(guest_pml, fault_addr, error_code);
91 PrintDebug("Checking shadow %p\n", (void *)shadow_pml);
92 // Check the shadow page permissions
93 shadow_pml4e_access = v3_can_access_pml4e64(shadow_pml, fault_addr, error_code);
95 /* Was the page fault caused by the Guest's page tables? */
96 if (is_guest_pf(guest_pml4e_access, shadow_pml4e_access) == 1) {
97 PrintDebug("Injecting PML4E pf to guest: (guest access error=%d) (pf error code=%d)\n",
98 *(uint_t *)&guest_pml4e_access, *(uint_t *)&error_code);
100 inject_guest_pf(info, fault_addr, error_code);
105 if (shadow_pml4e_access == PT_ACCESS_USER_ERROR) {
107 // PML4 Entry marked non-user
109 PrintDebug("Shadow Paging User access error (shadow_pml4e_access=0x%x, guest_pml4e_access=0x%x)\n",
110 shadow_pml4e_access, guest_pml4e_access);
112 inject_guest_pf(info, fault_addr, error_code);
115 } else if ((shadow_pml4e_access != PT_ACCESS_NOT_PRESENT) &&
116 (shadow_pml4e_access != PT_ACCESS_OK)) {
117 // inject page fault in guest
119 inject_guest_pf(info, fault_addr, error_code);
121 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pml4e_access);
122 PrintDebug("Manual Says to inject page fault into guest\n");
127 pdpe64_t * shadow_pdp = NULL;
128 pdpe64_t * guest_pdp = NULL;
130 // Get the next shadow page level, allocate if not present
132 if (shadow_pml4e_access == PT_ACCESS_NOT_PRESENT) {
133 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
134 shadow_pdp = (pdpe64_t *)V3_VAddr((void *)shdw_page->page_pa);
137 shadow_pml4e->present = 1;
138 shadow_pml4e->user_page = guest_pml4e->user_page;
139 shadow_pml4e->writable = guest_pml4e->writable;
141 // VMM Specific options
142 shadow_pml4e->write_through = 0;
143 shadow_pml4e->cache_disable = 0;
146 guest_pml4e->accessed = 1;
148 shadow_pml4e->pdp_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
150 shadow_pdp = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pml4e->pdp_base_addr));
153 // Continue processing at the next level
155 if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
156 // Machine check the guest
157 PrintDebug("Invalid Guest PDP Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr));
159 v3_raise_exception(info, MC_EXCEPTION);
164 if (handle_pdpe_shadow_pagefault_64(info, fault_addr, error_code, shadow_pdp, guest_pdp, speculative) == -1) {
165 PrintError("Error handling Page fault caused by PDPE\n");
174 // For now we are not going to handle 1 Gigabyte pages
175 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
176 pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp, int speculative) {
177 pt_access_status_t guest_pdpe_access;
178 pt_access_status_t shadow_pdpe_access;
179 pdpe64_t * guest_pdpe = (pdpe64_t *)&(guest_pdp[PDPE64_INDEX(fault_addr)]);
180 pdpe64_t * shadow_pdpe = (pdpe64_t *)&(shadow_pdp[PDPE64_INDEX(fault_addr)]);
182 PrintDebug("Handling PDP fault\n");
185 PrintDebug("Guest Page Tree for guest virtual address zero fault\n");
186 PrintGuestPageTree(info,fault_addr,(addr_t)(info->shdw_pg_state.guest_cr3));
187 PrintDebug("Host Page Tree for guest virtual address zero fault\n");
188 PrintHostPageTree(info,fault_addr,(addr_t)(info->ctrl_regs.cr3));
191 // Check the guest page permissions
192 guest_pdpe_access = v3_can_access_pdpe64(guest_pdp, fault_addr, error_code);
194 // Check the shadow page permissions
195 shadow_pdpe_access = v3_can_access_pdpe64(shadow_pdp, fault_addr, error_code);
197 /* Was the page fault caused by the Guest's page tables? */
198 if (is_guest_pf(guest_pdpe_access, shadow_pdpe_access) == 1) {
199 PrintDebug("Injecting PDPE pf to guest: (guest access error=%d) (pf error code=%d)\n",
200 *(uint_t *)&guest_pdpe_access, *(uint_t *)&error_code);
202 inject_guest_pf(info, fault_addr, error_code);
207 if (shadow_pdpe_access == PT_ACCESS_USER_ERROR) {
209 // PML4 Entry marked non-user
211 PrintDebug("Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n",
212 shadow_pdpe_access, guest_pdpe_access);
214 inject_guest_pf(info, fault_addr, error_code);
217 } else if ((shadow_pdpe_access != PT_ACCESS_NOT_PRESENT) &&
218 (shadow_pdpe_access != PT_ACCESS_OK)) {
219 // inject page fault in guest
221 inject_guest_pf(info, fault_addr, error_code);
223 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pdpe_access);
224 PrintDebug("Manual Says to inject page fault into guest\n");
229 pde64_t * shadow_pd = NULL;
230 pde64_t * guest_pd = NULL;
232 // Get the next shadow page level, allocate if not present
234 if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
235 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
236 shadow_pd = (pde64_t *)V3_VAddr((void *)shdw_page->page_pa);
239 shadow_pdpe->present = 1;
240 shadow_pdpe->user_page = guest_pdpe->user_page;
241 shadow_pdpe->writable = guest_pdpe->writable;
243 // VMM Specific options
244 shadow_pdpe->write_through = 0;
245 shadow_pdpe->cache_disable = 0;
248 guest_pdpe->accessed = 1;
250 shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
252 shadow_pd = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
255 // Continue processing at the next level
257 if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
258 // Machine check the guest
259 PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr));
261 v3_raise_exception(info, MC_EXCEPTION);
266 if (handle_pde_shadow_pagefault_64(info, fault_addr, error_code, shadow_pd, guest_pd, speculative) == -1) {
267 PrintError("Error handling Page fault caused by PDE\n");
275 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
276 pde64_t * shadow_pd, pde64_t * guest_pd, int speculative) {
277 pt_access_status_t guest_pde_access;
278 pt_access_status_t shadow_pde_access;
279 pde64_t * guest_pde = (pde64_t *)&(guest_pd[PDE64_INDEX(fault_addr)]);
280 pde64_t * shadow_pde = (pde64_t *)&(shadow_pd[PDE64_INDEX(fault_addr)]);
282 PrintDebug("Handling PDE fault\n");
284 // Check the guest page permissions
285 guest_pde_access = v3_can_access_pde64(guest_pd, fault_addr, error_code);
287 // Check the shadow page permissions
288 shadow_pde_access = v3_can_access_pde64(shadow_pd, fault_addr, error_code);
290 /* Was the page fault caused by the Guest's page tables? */
291 if (is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
292 PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n",
293 *(uint_t *)&guest_pde_access, *(uint_t *)&error_code);
295 inject_guest_pf(info, fault_addr, error_code);
300 if (shadow_pde_access == PT_ACCESS_USER_ERROR) {
302 // PDE Entry marked non-user
304 PrintDebug("Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n",
305 shadow_pde_access, guest_pde_access);
307 inject_guest_pf(info, fault_addr, error_code);
311 } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) &&
312 (guest_pde->large_page == 1)) {
314 ((pde64_2MB_t *)guest_pde)->dirty = 1;
315 shadow_pde->writable = guest_pde->writable;
317 //PrintDebug("Returning due to large page Write Error\n");
318 //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
321 } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
322 (shadow_pde_access != PT_ACCESS_OK)) {
323 // inject page fault in guest
325 inject_guest_pf(info, fault_addr, error_code);
327 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
328 PrintDebug("Manual Says to inject page fault into guest\n");
333 pte64_t * shadow_pt = NULL;
334 pte64_t * guest_pt = NULL;
336 // Get the next shadow page level, allocate if not present
338 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
339 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
340 shadow_pt = (pte64_t *)V3_VAddr((void *)shdw_page->page_pa);
342 PrintDebug("Creating new shadow PT: %p\n", shadow_pt);
344 shadow_pde->present = 1;
345 shadow_pde->user_page = guest_pde->user_page;
348 if (guest_pde->large_page == 0) {
349 shadow_pde->writable = guest_pde->writable;
351 // This large page flag is temporary until we can get a working cache....
352 ((pde64_2MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
354 if (error_code.write) {
355 shadow_pde->writable = guest_pde->writable;
356 ((pde64_2MB_t *)guest_pde)->dirty = 1;
358 shadow_pde->writable = 0;
359 ((pde64_2MB_t *)guest_pde)->dirty = 0;
363 // VMM Specific options
364 shadow_pde->write_through = 0;
365 shadow_pde->cache_disable = 0;
366 shadow_pde->global_page = 0;
369 guest_pde->accessed = 1;
371 shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
373 shadow_pt = (pte64_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
376 // Continue processing at the next level
377 if (guest_pde->large_page == 0) {
378 if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
379 // Machine check the guest
380 PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
382 v3_raise_exception(info, MC_EXCEPTION);
387 if (handle_pte_shadow_pagefault_64(info, fault_addr, error_code, shadow_pt, guest_pt, speculative) == -1) {
388 PrintError("Error handling Page fault caused by PDE\n");
392 if (handle_2MB_shadow_pagefault_64(info, fault_addr, error_code, shadow_pt, (pde64_2MB_t *)guest_pde, speculative) == -1) {
393 PrintError("Error handling large pagefault\n");
402 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
403 pte64_t * shadow_pt, pte64_t * guest_pt, int speculative) {
404 pt_access_status_t guest_pte_access;
405 pt_access_status_t shadow_pte_access;
406 pte64_t * guest_pte = (pte64_t *)&(guest_pt[PTE64_INDEX(fault_addr)]);;
407 pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
408 addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
409 // struct shadow_page_state * state = &(info->shdw_pg_state);
411 PrintDebug("Handling PTE fault\n");
413 struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_pa);
417 if ((shdw_reg == NULL) ||
418 (shdw_reg->host_type == SHDW_REGION_INVALID)) {
419 // Inject a machine check in the guest
420 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
422 v3_raise_exception(info, MC_EXCEPTION);
427 // Check the guest page permissions
428 guest_pte_access = v3_can_access_pte64(guest_pt, fault_addr, error_code);
430 // Check the shadow page permissions
431 shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
433 /* Was the page fault caused by the Guest's page tables? */
434 if (is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
435 PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n",
436 guest_pte_access, *(uint_t*)&error_code);
438 inject_guest_pf(info, fault_addr, error_code);
444 if (shadow_pte_access == PT_ACCESS_OK) {
445 // Inconsistent state...
446 // Guest Re-Entry will flush page tables and everything should now work
447 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
452 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
453 // Page Table Entry Not Present
454 PrintDebug("guest_pa =%p\n", (void *)guest_pa);
456 if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
457 (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
458 addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_pa);
460 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
462 shadow_pte->present = guest_pte->present;
463 shadow_pte->user_page = guest_pte->user_page;
465 //set according to VMM policy
466 shadow_pte->write_through = 0;
467 shadow_pte->cache_disable = 0;
468 shadow_pte->global_page = 0;
471 guest_pte->accessed = 1;
473 if (guest_pte->dirty == 1) {
474 shadow_pte->writable = guest_pte->writable;
475 } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
476 shadow_pte->writable = guest_pte->writable;
477 guest_pte->dirty = 1;
478 } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
479 shadow_pte->writable = 0;
482 // dirty flag has been set, check if its in the cache
483 /* if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) { */
484 /* if (error_code.write == 1) { */
485 /* state->cached_cr3 = 0; */
486 /* shadow_pte->writable = guest_pte->writable; */
488 /* shadow_pte->writable = 0; */
492 // Write hooks trump all, and are set Read Only
493 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
494 shadow_pte->writable = 0;
498 // Page fault handled by hook functions
500 if (v3_handle_mem_full_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
501 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
505 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
506 guest_pte->dirty = 1;
508 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
509 if (v3_handle_mem_wr_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
510 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
514 PrintDebug("Shadow PTE Write Error\n");
515 shadow_pte->writable = guest_pte->writable;
518 /* if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) { */
519 /* struct shadow_page_state * state = &(info->shdw_pg_state); */
520 /* PrintDebug("Write operation on Guest PAge Table Page\n"); */
521 /* state->cached_cr3 = 0; */
527 // Inject page fault into the guest
529 inject_guest_pf(info, fault_addr, error_code);
531 PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
532 PrintError("Manual Says to inject page fault into guest\n");
541 static int handle_2MB_shadow_pagefault_64(struct guest_info * info,
542 addr_t fault_addr, pf_error_t error_code,
543 pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde, int speculative)
545 pt_access_status_t shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
546 pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
547 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
548 // struct shadow_page_state * state = &(info->shdw_pg_state);
550 PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
551 PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
553 struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
556 if ((shdw_reg == NULL) ||
557 (shdw_reg->host_type == SHDW_REGION_INVALID)) {
558 // Inject a machine check in the guest
559 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
561 v3_raise_exception(info, MC_EXCEPTION);
566 if (shadow_pte_access == PT_ACCESS_OK) {
567 // Inconsistent state...
568 // Guest Re-Entry will flush tables and everything should now workd
569 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
570 //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
575 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
576 // Get the guest physical address of the fault
578 if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
579 (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
580 addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_fault_pa);
582 PrintDebug("Shadow PA=%p, ShadowPTE=%p\n", (void *)shadow_pa, (void *)shadow_pte);
584 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
585 PrintDebug("Test1\n");
587 shadow_pte->present = 1;
589 /* We are assuming that the PDE entry has precedence
590 * so the Shadow PDE will mirror the guest PDE settings,
591 * and we don't have to worry about them here
594 shadow_pte->user_page = 1;
598 /* if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) { */
599 /* // Check if the entry is a page table... */
600 /* PrintDebug("Marking page as Guest Page Table (large page)\n"); */
601 /* shadow_pte->writable = 0; */
602 /* } else */ if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
603 shadow_pte->writable = 0;
605 shadow_pte->writable = 1;
608 //set according to VMM policy
609 shadow_pte->write_through = 0;
610 shadow_pte->cache_disable = 0;
611 shadow_pte->global_page = 0;
615 // Handle hooked pages as well as other special pages
616 // if (handle_special_page_fault(info, fault_addr, guest_fault_pa, error_code) == -1) {
618 if (v3_handle_mem_full_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
619 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
623 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
625 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
627 if (v3_handle_mem_wr_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
628 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
634 /* if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) { */
635 /* struct shadow_page_state * state = &(info->shdw_pg_state); */
636 /* PrintDebug("Write operation on Guest PAge Table Page (large page)\n"); */
637 /* state->cached_cr3 = 0; */
638 /* shadow_pte->writable = 1; */
642 PrintError("Error in large page fault handler...\n");
643 PrintError("This case should have been handled at the top level handler\n");
647 // PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
648 PrintDebug("Returning from large page fault handler\n");
655 static int invalidation_cb_64(struct guest_info * info, page_type_t type,
656 addr_t vaddr, addr_t page_ptr, addr_t page_pa,
657 void * private_data) {
662 pml4e64_t * pml = (pml4e64_t *)page_ptr;
664 if (pml[PML4E64_INDEX(vaddr)].present == 0) {
671 pdpe64_t * pdp = (pdpe64_t *)page_ptr;
672 pdpe64_t * pdpe = &(pdp[PDPE64_INDEX(vaddr)]);
674 if (pdpe->present == 0) {
678 if (pdpe->vmm_info == V3_LARGE_PG) {
679 PrintError("1 Gigabyte pages not supported\n");
690 pde64_t * pd = (pde64_t *)page_ptr;
691 pde64_t * pde = &(pd[PDE64_INDEX(vaddr)]);
693 if (pde->present == 0) {
697 if (pde->vmm_info == V3_LARGE_PG) {
706 pte64_t * pt = (pte64_t *)page_ptr;
708 pt[PTE64_INDEX(vaddr)].present = 0;
713 PrintError("Invalid Page Type\n");
718 // should not get here
719 PrintError("Should not get here....\n");
724 static inline int handle_shadow_invlpg_64(struct guest_info * info, addr_t vaddr) {
725 PrintDebug("INVLPG64 - %p\n",(void*)vaddr);
727 int ret = v3_drill_host_pt_64(info, info->ctrl_regs.cr3, vaddr, invalidation_cb_64, NULL);
729 PrintError("Page table drill returned error.... \n");
730 PrintHostPageTree(info, vaddr, info->ctrl_regs.cr3);
733 return (ret == -1) ? -1 : 0;