2 static inline int activate_shadow_pt_64(struct guest_info * info) {
3 struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
4 struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
5 struct shadow_page_data * shadow_pt = create_new_shadow_pt(info);
6 addr_t shadow_pt_addr = shadow_pt->page_pa;
8 // Because this is a new CR3 load the allocated page is the new CR3 value
9 shadow_pt->cr3 = shadow_pt->page_pa;
11 PrintDebug("Top level Shadow page pa=%p\n", (void *)shadow_pt_addr);
13 shadow_cr3->pml4t_base_addr = PAGE_BASE_ADDR_4KB(shadow_pt_addr);
14 PrintDebug("Creating new 64 bit shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pml4t_base_addr));
17 shadow_cr3->pwt = guest_cr3->pwt;
18 shadow_cr3->pcd = guest_cr3->pcd;
31 * * 64 bit Page table fault handlers
36 static int handle_2MB_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
37 pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde);
39 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
40 pte64_t * shadow_pt, pte64_t * guest_pt);
42 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
43 pde64_t * shadow_pd, pde64_t * guest_pd);
45 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
46 pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp);
49 static inline int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
50 pml4e64_t * guest_pml = NULL;
51 pml4e64_t * shadow_pml = CR3_TO_PML4E64_VA(info->ctrl_regs.cr3);
52 addr_t guest_cr3 = CR3_TO_PML4E64_PA(info->shdw_pg_state.guest_cr3);
53 pt_access_status_t guest_pml4e_access;
54 pt_access_status_t shadow_pml4e_access;
55 pml4e64_t * guest_pml4e = NULL;
56 pml4e64_t * shadow_pml4e = (pml4e64_t *)&(shadow_pml[PML4E64_INDEX(fault_addr)]);
58 PrintDebug("64 bit Shadow page fault handler: %p\n", (void *)fault_addr);
59 PrintDebug("Handling PML fault\n");
61 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pml) == -1) {
62 PrintError("Invalid Guest PML4E Address: 0x%p\n", (void *)guest_cr3);
66 guest_pml4e = (pml4e64_t *)&(guest_pml[PML4E64_INDEX(fault_addr)]);
68 PrintDebug("Checking Guest %p\n", (void *)guest_pml);
69 // Check the guest page permissions
70 guest_pml4e_access = v3_can_access_pml4e64(guest_pml, fault_addr, error_code);
72 PrintDebug("Checking shadow %p\n", (void *)shadow_pml);
73 // Check the shadow page permissions
74 shadow_pml4e_access = v3_can_access_pml4e64(shadow_pml, fault_addr, error_code);
76 /* Was the page fault caused by the Guest's page tables? */
77 if (is_guest_pf(guest_pml4e_access, shadow_pml4e_access) == 1) {
78 PrintDebug("Injecting PML4E pf to guest: (guest access error=%d) (pf error code=%d)\n",
79 *(uint_t *)&guest_pml4e_access, *(uint_t *)&error_code);
80 inject_guest_pf(info, fault_addr, error_code);
84 if (shadow_pml4e_access == PT_ACCESS_USER_ERROR) {
86 // PML4 Entry marked non-user
88 PrintDebug("Shadow Paging User access error (shadow_pml4e_access=0x%x, guest_pml4e_access=0x%x)\n",
89 shadow_pml4e_access, guest_pml4e_access);
90 inject_guest_pf(info, fault_addr, error_code);
92 } else if ((shadow_pml4e_access != PT_ACCESS_NOT_PRESENT) &&
93 (shadow_pml4e_access != PT_ACCESS_OK)) {
94 // inject page fault in guest
95 inject_guest_pf(info, fault_addr, error_code);
96 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pml4e_access);
97 PrintDebug("Manual Says to inject page fault into guest\n");
102 pdpe64_t * shadow_pdp = NULL;
103 pdpe64_t * guest_pdp = NULL;
105 // Get the next shadow page level, allocate if not present
107 if (shadow_pml4e_access == PT_ACCESS_NOT_PRESENT) {
108 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
109 shadow_pdp = (pdpe64_t *)V3_VAddr((void *)shdw_page->page_pa);
112 shadow_pml4e->present = 1;
113 shadow_pml4e->user_page = guest_pml4e->user_page;
114 shadow_pml4e->writable = guest_pml4e->writable;
116 // VMM Specific options
117 shadow_pml4e->write_through = 0;
118 shadow_pml4e->cache_disable = 0;
121 guest_pml4e->accessed = 1;
123 shadow_pml4e->pdp_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
125 shadow_pdp = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pml4e->pdp_base_addr));
128 // Continue processing at the next level
130 if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
131 // Machine check the guest
132 PrintDebug("Invalid Guest PDP Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr));
133 v3_raise_exception(info, MC_EXCEPTION);
137 if (handle_pdpe_shadow_pagefault_64(info, fault_addr, error_code, shadow_pdp, guest_pdp) == -1) {
138 PrintError("Error handling Page fault caused by PDPE\n");
147 // For now we are not going to handle 1 Gigabyte pages
148 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
149 pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp) {
150 pt_access_status_t guest_pdpe_access;
151 pt_access_status_t shadow_pdpe_access;
152 pdpe64_t * guest_pdpe = (pdpe64_t *)&(guest_pdp[PDPE64_INDEX(fault_addr)]);
153 pdpe64_t * shadow_pdpe = (pdpe64_t *)&(shadow_pdp[PDPE64_INDEX(fault_addr)]);
155 PrintDebug("Handling PDP fault\n");
157 // Check the guest page permissions
158 guest_pdpe_access = v3_can_access_pdpe64(guest_pdp, fault_addr, error_code);
160 // Check the shadow page permissions
161 shadow_pdpe_access = v3_can_access_pdpe64(shadow_pdp, fault_addr, error_code);
163 /* Was the page fault caused by the Guest's page tables? */
164 if (is_guest_pf(guest_pdpe_access, shadow_pdpe_access) == 1) {
165 PrintDebug("Injecting PDPE pf to guest: (guest access error=%d) (pf error code=%d)\n",
166 *(uint_t *)&guest_pdpe_access, *(uint_t *)&error_code);
167 inject_guest_pf(info, fault_addr, error_code);
171 if (shadow_pdpe_access == PT_ACCESS_USER_ERROR) {
173 // PML4 Entry marked non-user
175 PrintDebug("Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n",
176 shadow_pdpe_access, guest_pdpe_access);
177 inject_guest_pf(info, fault_addr, error_code);
179 } else if ((shadow_pdpe_access != PT_ACCESS_NOT_PRESENT) &&
180 (shadow_pdpe_access != PT_ACCESS_OK)) {
181 // inject page fault in guest
182 inject_guest_pf(info, fault_addr, error_code);
183 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pdpe_access);
184 PrintDebug("Manual Says to inject page fault into guest\n");
189 pde64_t * shadow_pd = NULL;
190 pde64_t * guest_pd = NULL;
192 // Get the next shadow page level, allocate if not present
194 if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
195 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
196 shadow_pd = (pde64_t *)V3_VAddr((void *)shdw_page->page_pa);
199 shadow_pdpe->present = 1;
200 shadow_pdpe->user_page = guest_pdpe->user_page;
201 shadow_pdpe->writable = guest_pdpe->writable;
203 // VMM Specific options
204 shadow_pdpe->write_through = 0;
205 shadow_pdpe->cache_disable = 0;
208 guest_pdpe->accessed = 1;
210 shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
212 shadow_pd = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
215 // Continue processing at the next level
217 if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
218 // Machine check the guest
219 PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr));
220 v3_raise_exception(info, MC_EXCEPTION);
224 if (handle_pde_shadow_pagefault_64(info, fault_addr, error_code, shadow_pd, guest_pd) == -1) {
225 PrintError("Error handling Page fault caused by PDE\n");
233 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
234 pde64_t * shadow_pd, pde64_t * guest_pd) {
235 pt_access_status_t guest_pde_access;
236 pt_access_status_t shadow_pde_access;
237 pde64_t * guest_pde = (pde64_t *)&(guest_pd[PDE64_INDEX(fault_addr)]);
238 pde64_t * shadow_pde = (pde64_t *)&(shadow_pd[PDE64_INDEX(fault_addr)]);
240 PrintDebug("Handling PDE fault\n");
242 // Check the guest page permissions
243 guest_pde_access = v3_can_access_pde64(guest_pd, fault_addr, error_code);
245 // Check the shadow page permissions
246 shadow_pde_access = v3_can_access_pde64(shadow_pd, fault_addr, error_code);
248 /* Was the page fault caused by the Guest's page tables? */
249 if (is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
250 PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n",
251 *(uint_t *)&guest_pde_access, *(uint_t *)&error_code);
252 inject_guest_pf(info, fault_addr, error_code);
256 if (shadow_pde_access == PT_ACCESS_USER_ERROR) {
258 // PDE Entry marked non-user
260 PrintDebug("Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n",
261 shadow_pde_access, guest_pde_access);
262 inject_guest_pf(info, fault_addr, error_code);
265 } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) &&
266 (guest_pde->large_page == 1)) {
268 ((pde64_2MB_t *)guest_pde)->dirty = 1;
269 shadow_pde->writable = guest_pde->writable;
271 PrintDebug("Returning due to large page Write Error\n");
272 PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
275 } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
276 (shadow_pde_access != PT_ACCESS_OK)) {
277 // inject page fault in guest
278 inject_guest_pf(info, fault_addr, error_code);
279 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
280 PrintDebug("Manual Says to inject page fault into guest\n");
285 pte64_t * shadow_pt = NULL;
286 pte64_t * guest_pt = NULL;
288 // Get the next shadow page level, allocate if not present
290 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
291 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
292 shadow_pt = (pte64_t *)V3_VAddr((void *)shdw_page->page_pa);
294 PrintDebug("Creating new shadow PT: %p\n", shadow_pt);
296 shadow_pde->present = 1;
297 shadow_pde->user_page = guest_pde->user_page;
300 if (guest_pde->large_page == 0) {
301 shadow_pde->writable = guest_pde->writable;
303 if (error_code.write) {
304 shadow_pde->writable = guest_pde->writable;
305 ((pde64_2MB_t *)guest_pde)->dirty = 1;
307 shadow_pde->writable = 0;
308 ((pde64_2MB_t *)guest_pde)->dirty = 0;
312 // VMM Specific options
313 shadow_pde->write_through = 0;
314 shadow_pde->cache_disable = 0;
315 shadow_pde->global_page = 0;
318 guest_pde->accessed = 1;
320 shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
322 shadow_pt = (pte64_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
325 // Continue processing at the next level
326 if (guest_pde->large_page == 0) {
327 if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
328 // Machine check the guest
329 PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
330 v3_raise_exception(info, MC_EXCEPTION);
334 if (handle_pte_shadow_pagefault_64(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
335 PrintError("Error handling Page fault caused by PDE\n");
339 if (handle_2MB_shadow_pagefault_64(info, fault_addr, error_code, shadow_pt, (pde64_2MB_t *)guest_pde) == -1) {
340 PrintError("Error handling large pagefault\n");
349 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
350 pte64_t * shadow_pt, pte64_t * guest_pt) {
351 pt_access_status_t guest_pte_access;
352 pt_access_status_t shadow_pte_access;
353 pte64_t * guest_pte = (pte64_t *)&(guest_pt[PTE64_INDEX(fault_addr)]);;
354 pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
355 addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
356 // struct shadow_page_state * state = &(info->shdw_pg_state);
358 PrintDebug("Handling PTE fault\n");
360 struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_pa);
364 if ((shdw_reg == NULL) ||
365 (shdw_reg->host_type == SHDW_REGION_INVALID)) {
366 // Inject a machine check in the guest
367 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
368 v3_raise_exception(info, MC_EXCEPTION);
372 // Check the guest page permissions
373 guest_pte_access = v3_can_access_pte64(guest_pt, fault_addr, error_code);
375 // Check the shadow page permissions
376 shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
378 /* Was the page fault caused by the Guest's page tables? */
379 if (is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
380 PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n",
381 guest_pte_access, *(uint_t*)&error_code);
382 inject_guest_pf(info, fault_addr, error_code);
387 if (shadow_pte_access == PT_ACCESS_OK) {
388 // Inconsistent state...
389 // Guest Re-Entry will flush page tables and everything should now work
390 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
395 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
396 // Page Table Entry Not Present
397 PrintDebug("guest_pa =%p\n", (void *)guest_pa);
399 if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
400 (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
401 addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_pa);
403 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
405 shadow_pte->present = guest_pte->present;
406 shadow_pte->user_page = guest_pte->user_page;
408 //set according to VMM policy
409 shadow_pte->write_through = 0;
410 shadow_pte->cache_disable = 0;
411 shadow_pte->global_page = 0;
414 guest_pte->accessed = 1;
416 if (guest_pte->dirty == 1) {
417 shadow_pte->writable = guest_pte->writable;
418 } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
419 shadow_pte->writable = guest_pte->writable;
420 guest_pte->dirty = 1;
421 } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
422 shadow_pte->writable = 0;
425 // dirty flag has been set, check if its in the cache
426 /* if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) { */
427 /* if (error_code.write == 1) { */
428 /* state->cached_cr3 = 0; */
429 /* shadow_pte->writable = guest_pte->writable; */
431 /* shadow_pte->writable = 0; */
435 // Write hooks trump all, and are set Read Only
436 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
437 shadow_pte->writable = 0;
441 // Page fault handled by hook functions
443 if (v3_handle_mem_full_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
444 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
448 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
449 guest_pte->dirty = 1;
451 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
452 if (v3_handle_mem_wr_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
453 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
457 PrintDebug("Shadow PTE Write Error\n");
458 shadow_pte->writable = guest_pte->writable;
461 /* if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) { */
462 /* struct shadow_page_state * state = &(info->shdw_pg_state); */
463 /* PrintDebug("Write operation on Guest PAge Table Page\n"); */
464 /* state->cached_cr3 = 0; */
470 // Inject page fault into the guest
471 inject_guest_pf(info, fault_addr, error_code);
472 PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
473 PrintError("Manual Says to inject page fault into guest\n");
482 static int handle_2MB_shadow_pagefault_64(struct guest_info * info,
483 addr_t fault_addr, pf_error_t error_code,
484 pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde)
486 pt_access_status_t shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
487 pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
488 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
489 // struct shadow_page_state * state = &(info->shdw_pg_state);
491 PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
492 PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
494 struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
497 if ((shdw_reg == NULL) ||
498 (shdw_reg->host_type == SHDW_REGION_INVALID)) {
499 // Inject a machine check in the guest
500 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
501 v3_raise_exception(info, MC_EXCEPTION);
505 if (shadow_pte_access == PT_ACCESS_OK) {
506 // Inconsistent state...
507 // Guest Re-Entry will flush tables and everything should now workd
508 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
509 PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
514 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
515 // Get the guest physical address of the fault
517 if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
518 (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
519 addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_fault_pa);
521 PrintDebug("Shadow PA=%p, ShadowPTE=%p\n", (void *)shadow_pa, (void *)shadow_pte);
523 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
524 PrintDebug("Test1\n");
526 shadow_pte->present = 1;
528 /* We are assuming that the PDE entry has precedence
529 * so the Shadow PDE will mirror the guest PDE settings,
530 * and we don't have to worry about them here
533 shadow_pte->user_page = 1;
537 /* if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) { */
538 /* // Check if the entry is a page table... */
539 /* PrintDebug("Marking page as Guest Page Table (large page)\n"); */
540 /* shadow_pte->writable = 0; */
541 /* } else */ if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
542 shadow_pte->writable = 0;
544 shadow_pte->writable = 1;
547 //set according to VMM policy
548 shadow_pte->write_through = 0;
549 shadow_pte->cache_disable = 0;
550 shadow_pte->global_page = 0;
554 // Handle hooked pages as well as other special pages
555 // if (handle_special_page_fault(info, fault_addr, guest_fault_pa, error_code) == -1) {
557 if (v3_handle_mem_full_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
558 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
562 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
564 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
566 if (v3_handle_mem_wr_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
567 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
573 /* if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) { */
574 /* struct shadow_page_state * state = &(info->shdw_pg_state); */
575 /* PrintDebug("Write operation on Guest PAge Table Page (large page)\n"); */
576 /* state->cached_cr3 = 0; */
577 /* shadow_pte->writable = 1; */
581 PrintError("Error in large page fault handler...\n");
582 PrintError("This case should have been handled at the top level handler\n");
586 PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
587 PrintDebug("Returning from large page fault handler\n");
595 static inline int handle_shadow_invlpg_64(struct guest_info * info, addr_t vaddr) {
596 PrintError("64 bit shadow paging not implemented\n");