2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #ifdef CONFIG_SHADOW_CACHE
22 #define PT64_NX_MASK (1ULL << 63)
25 static inline int activate_shadow_pt_64(struct guest_info * core) {
26 struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(core->ctrl_regs.cr3);
27 struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(core->shdw_pg_state.guest_cr3);
29 struct shadow_page_cache_data *shadow_pt;
31 if(core->n_free_shadow_pages < MIN_FREE_SHADOW_PAGES) {
32 shadow_free_some_pages(core);
34 shadow_pt = shadow_page_get_page(core, (addr_t)(guest_cr3->pml4t_base_addr), 4, 0, 0, 0, 0);
35 PrintDebug("Activate shadow_pt %p\n", (void *)BASE_TO_PAGE_ADDR(guest_cr3->pml4t_base_addr));
37 struct shadow_page_cache_data * shadow_pt = create_new_shadow_pt(core);
39 addr_t shadow_pt_addr = shadow_pt->page_pa;
41 // Because this is a new CR3 load the allocated page is the new CR3 value
42 shadow_pt->cr3 = shadow_pt->page_pa;
44 PrintDebug("Top level Shadow page pa=%p\n", (void *)shadow_pt_addr);
46 shadow_cr3->pml4t_base_addr = PAGE_BASE_ADDR_4KB(shadow_pt_addr);
47 PrintDebug("Creating new 64 bit shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pml4t_base_addr));
49 shadow_cr3->pwt = guest_cr3->pwt;
50 shadow_cr3->pcd = guest_cr3->pcd;
52 shadow_topup_caches(core);
65 * * 64 bit Page table fault handlers
70 static inline void burst_64 (struct guest_info * core) {
72 struct shadow_page_cache_data * sp, *node;
75 list_for_each_entry_safe(sp, node, &core->active_shadow_pages, link) {
76 pt = (pte64_t *)V3_VAddr((void *)sp->page_pa);
77 PrintDebug("burst: pt %p\n",(void *)pt);
78 for (idx = 0; idx < PT_ENT_PER_PAGE; ++idx) {
79 pte = (pte64_t *)&(pt[idx]);
80 if(*((uint64_t*)pte)) PrintDebug("%d: s %p\n",idx, (void*)*((uint64_t*)pte));
87 static inline int fix_read_pf_64(pte64_t *shadow_pte, uint_t vmm_info) {
89 PrintDebug("\tReadPf, start vmm_info %d\n", vmm_info);
91 if ((vmm_info & PT_USER_MASK) && !(shadow_pte->user_page)) {
92 shadow_pte->user_page = 1;
93 shadow_pte->writable = 0;
99 static inline int fix_write_pf_64(struct guest_info *core, pte64_t *shadow_pte, pte64_t *guest_pte,
100 int user, int *write_pt, addr_t guest_fn, uint_t vmm_info) {
103 struct cr0_64 *guest_cr0;
104 struct shadow_page_cache_data *page;
107 PrintDebug("\tWritePf, start vmm_info %d\n", vmm_info);
109 if (shadow_pte->writable) {
113 PrintDebug("\tWritePf, pass writable\n");
114 writable_shadow = vmm_info & PT_WRITABLE_MASK;
115 PrintDebug("\tWritePf, writable_shadow %d\n", writable_shadow);
118 if (!(vmm_info & PT_USER_MASK) || !writable_shadow) {
119 PrintDebug("\tWritePf: 1st User Check\n");
123 if (!writable_shadow) {
124 guest_cr0 = (struct cr0_64 *)&(core->shdw_pg_state.guest_cr0);
125 PrintDebug("\tWritePf: WP %d\n", guest_cr0->wp);
130 shadow_pte->user_page = 0;
134 if (guest_pte->present == 0) {
135 memset((void*)shadow_pte, 0, sizeof(struct pte64));
136 PrintDebug("\tWritePf: Guest Not Present\n");
141 while ((page = shadow_page_lookup_page(core, guest_fn, 0)) != NULL) {
142 shadow_zap_page(core, page);
145 PrintDebug("\tWritePf: Zap Page\n");
146 } else if ((page = shadow_page_lookup_page(core, guest_fn, 0)) != NULL) {
147 if ((page = shadow_page_lookup_page (core, guest_fn, 0)) != NULL) {
148 guest_pte->dirty = 1;
150 PrintDebug("\tWritePf: Write Needed\n");
155 shadow_pte->writable = 1;
156 guest_pte->dirty = 1;
158 rmap_add(core, (addr_t)shadow_pte);
160 PrintDebug("\tWritePf: On Writable\n");
166 static int handle_2MB_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
167 pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde, uint32_t inherited_ar);
169 static int handle_pte_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
170 pte64_t * shadow_pt, pte64_t * guest_pt, uint32_t inherited_ar);
172 static int handle_pde_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
173 pde64_t * shadow_pd, pde64_t * guest_pd, uint32_t inherited_ar);
175 static int handle_pdpe_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
176 pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp, uint32_t inherited_ar);
179 static inline int handle_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) {
180 pml4e64_t * guest_pml = NULL;
181 pml4e64_t * shadow_pml = CR3_TO_PML4E64_VA(core->ctrl_regs.cr3);
182 addr_t guest_cr3 = CR3_TO_PML4E64_PA(core->shdw_pg_state.guest_cr3);
183 pt_access_status_t guest_pml4e_access;
184 pt_access_status_t shadow_pml4e_access;
185 pml4e64_t * guest_pml4e = NULL;
186 pml4e64_t * shadow_pml4e = (pml4e64_t *)&(shadow_pml[PML4E64_INDEX(fault_addr)]);
188 PrintDebug("64 bit Shadow page fault handler: %p\n", (void *)fault_addr);
189 PrintDebug("Handling PML fault\n");
191 int metaphysical = 0;
192 unsigned hugepage_access = 0;
193 addr_t pml4e_base_addr = 0;
194 uint32_t inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
196 if (core->n_free_shadow_pages < MIN_FREE_SHADOW_PAGES) {
197 shadow_free_some_pages(core);
199 shadow_topup_caches(core);
206 if (guest_pa_to_host_va(core, guest_cr3, (addr_t*)&guest_pml) == -1) {
207 PrintError("Invalid Guest PML4E Address: 0x%p\n", (void *)guest_cr3);
211 guest_pml4e = (pml4e64_t *)&(guest_pml[PML4E64_INDEX(fault_addr)]);
213 pml4e_base_addr = (addr_t)(guest_pml4e->pdp_base_addr);
215 PrintDebug("Checking Guest %p\n", (void *)guest_pml);
216 // Check the guest page permissions
217 guest_pml4e_access = v3_can_access_pml4e64(guest_pml, fault_addr, error_code);
219 PrintDebug("Checking shadow %p\n", (void *)shadow_pml);
220 // Check the shadow page permissions
221 shadow_pml4e_access = v3_can_access_pml4e64(shadow_pml, fault_addr, error_code);
223 if (guest_pml4e_access == PT_ACCESS_NOT_PRESENT) {
224 error_code.present = 0;
228 if (guest_pml4e_access == PT_ACCESS_WRITE_ERROR) {
229 struct cr0_64 *guest_cr0 = (struct cr0_64*)&(core->shdw_pg_state.guest_cr0);
230 if (error_code.user || guest_cr0->wp) {
231 error_code.present = 1;
236 if (guest_pml4e_access == PT_ACCESS_USER_ERROR) {
237 error_code.present = 1;
242 if (error_code.ifetch == 1 && ((*(uint64_t*)guest_pml4e) & PT64_NX_MASK)) {
243 struct efer_64 *guest_efer = (struct efer_64*)&(core->shdw_pg_state.guest_efer);
244 if (guest_efer->lma == 1) {
253 PrintDebug("Injecting PML Pf to Guest: (Guest Access Error = %d) (SHDW Access Error = %d) (Pf Error Code = %d)\n",
254 *(uint_t*)&guest_pml4e_access, *(uint_t*)&shadow_pml4e_access, *(uint_t*)&error_code);
255 if (inject_guest_pf(core, fault_addr, error_code) == -1) {
256 PrintError("Could Not Inject Guest Page Fault\n");
263 if (guest_pml4e->accessed == 0) {
264 guest_pml4e->accessed = 1;
267 inherited_ar &= *(uint64_t*)guest_pml4e;
268 PrintDebug("PML: inherited %x\n", inherited_ar);
270 pdpe64_t * shadow_pdp = NULL;
271 pdpe64_t * guest_pdp = NULL;
273 // Get the next shadow page level, allocate if not present
275 if (shadow_pml4e_access == PT_ACCESS_NOT_PRESENT) {
276 struct shadow_page_cache_data *shdw_page = shadow_page_get_page(core, pml4e_base_addr, 3, metaphysical,
277 hugepage_access, (addr_t)shadow_pml4e, 0);
278 shadow_pdp = (pdpe64_t *)V3_VAddr((void *)shdw_page->page_pa);
280 shadow_pml4e->present =1;
281 shadow_pml4e->accessed=1;
282 shadow_pml4e->writable=1;
283 shadow_pml4e->user_page=1;
285 shadow_pml4e->pdp_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
287 shadow_pdp = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pml4e->pdp_base_addr));
290 // Continue processing at the next level
292 if (guest_pa_to_host_va(core, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
293 // Machine check the guest
294 PrintError("Invalid Guest PDP Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr));
295 v3_raise_exception(core, MC_EXCEPTION);
299 if (handle_pdpe_shadow_pagefault_64(core, fault_addr, error_code, shadow_pdp, guest_pdp, inherited_ar) == -1) {
300 PrintError("Error handling Page fault caused by PDPE\n");
309 // For now we are not going to handle 1 Gigabyte pages
310 static int handle_pdpe_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
311 pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp, uint32_t inherited_ar) {
312 pt_access_status_t guest_pdpe_access;
313 pt_access_status_t shadow_pdpe_access;
314 pdpe64_t * guest_pdpe = (pdpe64_t *)&(guest_pdp[PDPE64_INDEX(fault_addr)]);
315 pdpe64_t * shadow_pdpe = (pdpe64_t *)&(shadow_pdp[PDPE64_INDEX(fault_addr)]);
317 PrintDebug("Handling PDP fault\n");
320 PrintDebug("Guest Page Tree for guest virtual address zero fault\n");
321 PrintGuestPageTree(core,fault_addr,(addr_t)(core->shdw_pg_state.guest_cr3));
322 PrintDebug("Host Page Tree for guest virtual address zero fault\n");
323 PrintHostPageTree(core,fault_addr,(addr_t)(core->ctrl_regs.cr3));
326 int metaphysical = 0;
327 unsigned hugepage_access = 0;
329 addr_t pdpe_base_addr = (addr_t)(guest_pdpe->pd_base_addr);
331 // Check the guest page permissions
332 guest_pdpe_access = v3_can_access_pdpe64(guest_pdp, fault_addr, error_code);
334 // Check the shadow page permissions
335 shadow_pdpe_access = v3_can_access_pdpe64(shadow_pdp, fault_addr, error_code);
337 if (guest_pdpe_access == PT_ACCESS_NOT_PRESENT) {
338 PrintDebug("Guest Page Tree for guest virtual address zero fault\n");
339 error_code.present = 0;
343 if (guest_pdpe_access == PT_ACCESS_WRITE_ERROR) {
344 struct cr0_64 *guest_cr0 = (struct cr0_64*)&(core->shdw_pg_state.guest_cr0);
345 if (error_code.user || guest_cr0->wp) {
346 error_code.present = 1;
351 if (guest_pdpe_access == PT_ACCESS_USER_ERROR) {
352 error_code.present = 1;
356 if (error_code.ifetch == 1 && ((*(uint64_t*)guest_pdpe) & PT64_NX_MASK)) {
357 struct efer_64 *guest_efer = (struct efer_64*)&(core->shdw_pg_state.guest_efer);
358 if (guest_efer->lma == 1) {
367 PrintDebug("Injecting PML Pf to Guest: (Guest Access Error = %d) (SHDW Access Error = %d) (Pf Error Code = %d)\n",
368 *(uint_t*)&guest_pdpe_access, *(uint_t*)&shadow_pdpe_access, *(uint_t*)&error_code);
369 if (inject_guest_pf(core, fault_addr, error_code) == -1) {
370 PrintError("Could Not Inject Guest Page Fault\n");
377 if (guest_pdpe->accessed == 0) {
378 guest_pdpe->accessed = 1;
381 inherited_ar &= *(uint64_t*)guest_pdpe;
382 PrintDebug("PDPE: inherited %x\n", inherited_ar);
384 pde64_t * shadow_pd = NULL;
385 pde64_t * guest_pd = NULL;
387 // Get the next shadow page level, allocate if not present
389 if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
390 struct shadow_page_cache_data *shdw_page = shadow_page_get_page(core, pdpe_base_addr, 2, metaphysical,
391 hugepage_access, (addr_t) shadow_pdpe, 0);
393 shadow_pd = (pde64_t *)V3_VAddr((void *)shdw_page->page_pa);
395 shadow_pdpe->present =1;
396 shadow_pdpe->accessed=1;
397 shadow_pdpe->writable=1;
398 shadow_pdpe->user_page=1;
400 shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
402 shadow_pd = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
405 // Continue processing at the next level
407 if (guest_pa_to_host_va(core, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
408 // Machine check the guest
409 PrintError("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr));
410 v3_raise_exception(core, MC_EXCEPTION);
414 if (handle_pde_shadow_pagefault_64(core, fault_addr, error_code, shadow_pd, guest_pd, inherited_ar) == -1) {
415 PrintError("Error handling Page fault caused by PDE\n");
423 static int handle_pde_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
424 pde64_t * shadow_pd, pde64_t * guest_pd, uint32_t inherited_ar) {
425 pt_access_status_t guest_pde_access;
426 pt_access_status_t shadow_pde_access;
427 pde64_t * guest_pde = (pde64_t *)&(guest_pd[PDE64_INDEX(fault_addr)]);
428 pde64_t * shadow_pde = (pde64_t *)&(shadow_pd[PDE64_INDEX(fault_addr)]);
430 PrintDebug("Handling PDE fault\n");
432 int metaphysical = 0;
433 unsigned hugepage_access = 0;
435 addr_t pde_base_addr = (addr_t)(guest_pde->pt_base_addr);
437 if (guest_pde->large_page == 1) {
438 pde_base_addr = (addr_t)PAGE_BASE_ADDR(BASE_TO_PAGE_ADDR_2MB(((pde64_2MB_t *) guest_pde)->page_base_addr));
440 hugepage_access = (((pde64_2MB_t*) guest_pde)->writable | (((pde64_2MB_t*) guest_pde)->user_page << 1));
443 // Check the guest page permissions
444 guest_pde_access = v3_can_access_pde64(guest_pd, fault_addr, error_code);
446 // Check the shadow page permissions
447 shadow_pde_access = v3_can_access_pde64(shadow_pd, fault_addr, error_code);
449 if (guest_pde_access == PT_ACCESS_NOT_PRESENT) {
450 error_code.present = 0;
454 if (guest_pde_access == PT_ACCESS_WRITE_ERROR) {
455 struct cr0_64 *guest_cr0 = (struct cr0_64*)&(core->shdw_pg_state.guest_cr0);
456 if (error_code.user || guest_cr0->wp) {
457 error_code.present = 1;
462 if (guest_pde_access == PT_ACCESS_USER_ERROR) {
463 error_code.present = 1;
467 if (error_code.ifetch == 1 && ((*(uint64_t*)guest_pde) & PT64_NX_MASK)) {
468 struct efer_64 *guest_efer = (struct efer_64*)&(core->shdw_pg_state.guest_efer);
469 if (guest_efer->lma == 1) {
478 PrintDebug("Injecting PML Pf to Guest: (Guest Access Error = %d) (SHDW Access Error = %d) (Pf Error Code = %d)\n",
479 *(uint_t*)&guest_pde_access, *(uint_t*)&shadow_pde_access, *(uint_t*)&error_code);
480 if (inject_guest_pf(core, fault_addr, error_code) == -1) {
481 PrintError("Could Not Inject Guest Page Fault\n");
488 if (guest_pde->accessed == 0) {
489 guest_pde->accessed = 1;
492 inherited_ar &= *(uint64_t*)guest_pde;
493 PrintDebug("PDE: inherited %x\n", inherited_ar);
495 pte64_t * shadow_pt = NULL;
496 pte64_t * guest_pt = NULL;
498 // Get the next shadow page level, allocate if not present
500 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
501 struct shadow_page_cache_data *shdw_page = shadow_page_get_page(core, pde_base_addr, 1, metaphysical,
502 hugepage_access, (addr_t) shadow_pde, 0);
503 shadow_pt = (pte64_t *)V3_VAddr((void *)shdw_page->page_pa);
505 PrintDebug("Creating new shadow PT: %p\n", shadow_pt);
507 shadow_pde->present =1;
508 shadow_pde->accessed=1;
509 shadow_pde->writable=1;
510 shadow_pde->user_page=1;
512 shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
514 shadow_pt = (pte64_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
517 // Continue processing at the next level
518 if (guest_pde->large_page == 0) {
519 if (guest_pa_to_host_va(core, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
520 // Machine check the guest
521 PrintError("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
522 v3_raise_exception(core, MC_EXCEPTION);
526 if (handle_pte_shadow_pagefault_64(core, fault_addr, error_code, shadow_pt, guest_pt, inherited_ar) == -1) {
527 PrintError("Error handling Page fault caused by PDE\n");
532 if (handle_2MB_shadow_pagefault_64(core, fault_addr, error_code, shadow_pt,
533 (pde64_2MB_t *)guest_pde, inherited_ar) == -1) {
534 PrintError("Error handling large pagefault\n");
543 static int handle_pte_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
544 pte64_t * shadow_pt, pte64_t * guest_pt, uint32_t inherited_ar) {
545 pt_access_status_t guest_pte_access;
546 pt_access_status_t shadow_pte_access;
547 pte64_t * guest_pte = (pte64_t *)&(guest_pt[PTE64_INDEX(fault_addr)]);;
548 pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
549 addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
550 // struct shadow_page_state * state = &(core->shdw_pg_state);
552 PrintDebug("Handling PTE fault\n");
554 struct v3_shadow_region * shdw_reg = v3_get_shadow_region(core->vm_info, core->cpu_id, guest_pa);
558 if (shdw_reg == NULL) {
559 // Inject a machine check in the guest
560 PrintError("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
561 v3_raise_exception(core, MC_EXCEPTION);
565 // Check the guest page permissions
566 guest_pte_access = v3_can_access_pte64(guest_pt, fault_addr, error_code);
568 // Check the shadow page permissions
569 shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
571 if (guest_pte_access == PT_ACCESS_NOT_PRESENT) {
572 error_code.present = 0;
576 if (guest_pte_access == PT_ACCESS_WRITE_ERROR) {
577 struct cr0_64 *guest_cr0 = (struct cr0_64*)&(core->shdw_pg_state.guest_cr0);
578 if (error_code.user || guest_cr0->wp) {
579 error_code.present = 1;
584 if (guest_pte_access == PT_ACCESS_USER_ERROR) {
585 error_code.present = 1;
589 if (error_code.ifetch == 1 && ((*(uint64_t*)guest_pte) & PT64_NX_MASK)) {
590 struct efer_64 *guest_efer = (struct efer_64*)&(core->shdw_pg_state.guest_efer);
591 if (guest_efer->lma == 1) {
600 PrintDebug("Injecting PML Pf to Guest: (Guest Access Error = %d) (SHDW Access Error = %d) (Pf Error Code = %d)\n",
601 *(uint_t*)&guest_pte_access, *(uint_t*)&shadow_pte_access, *(uint_t*)&error_code);
602 if (inject_guest_pf(core, fault_addr, error_code) == -1) {
603 PrintError("Could Not Inject Guest Page Fault\n");
610 if (guest_pte->accessed == 0) {
611 guest_pte->accessed = 1;
614 inherited_ar &= *(uint64_t*)guest_pte;
615 PrintDebug("PTE: inherited %x\n", inherited_ar);
617 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
618 // Page Table Entry Not Present
619 PrintDebug("guest_pa =%p\n", (void *)guest_pa);
621 if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
622 (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
624 int inherited_ar_user = ((inherited_ar & PT_USER_MASK) == PT_USER_MASK) ? 1 : 0;
625 int inherited_ar_writable = ((inherited_ar & PT_WRITABLE_MASK) == PT_WRITABLE_MASK) ? 1 : 0;
627 addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, core->cpu_id, guest_pa);
629 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
631 shadow_pte->present = guest_pte->present;
633 shadow_pte->user_page = inherited_ar_user;
634 PrintDebug("PTE: inheritied shdow_pte_user %d, guest_pte_user %d\n", shadow_pte->user_page, guest_pte->user_page);
636 //set according to VMM policy
637 shadow_pte->global_page = guest_pte->global_page;
640 shadow_pte->accessed = guest_pte->accessed;
641 shadow_pte->dirty = guest_pte->dirty;
642 shadow_pte->writable = inherited_ar_writable;
644 PrintDebug("PTE: inheritied shdow_pte_writable %d, guest_pte_writable %d\n", shadow_pte->writable, guest_pte->writable);
647 // Write hooks trump all, and are set Read Only
648 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
649 shadow_pte->writable = 0;
652 shadow_pte->vmm_info = (inherited_ar_writable << 1) | (inherited_ar_user << 2);
654 if (inherited_ar_writable & guest_pte->writable) {
656 struct shadow_page_cache_data *shadow;
657 shadow = shadow_page_lookup_page(core, PAGE_BASE_ADDR(guest_pa), 0);
659 if (shadow_pte->writable) {
660 shadow_pte->writable = 0;
665 PrintDebug("PTE: Updated Shadow Present %d, Write %d, User %d, Dirty %d, Accessed %d, Global %d\n",
666 shadow_pte->present, shadow_pte->writable, shadow_pte->user_page, shadow_pte->dirty, shadow_pte->accessed,
667 shadow_pte->global_page);
668 PrintDebug("PTE: Shadow %p\n", (void*)*((addr_t*)shadow_pte));
669 rmap_add(core, (addr_t)shadow_pte);
672 // Page fault handled by hook functions
674 if (v3_handle_mem_full_hook(core, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
675 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
679 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
680 guest_pte->dirty = 1;
682 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
683 if (v3_handle_mem_wr_hook(core, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
684 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
688 PrintDebug("Shadow PTE Write Error\n");
689 shadow_pte->writable = guest_pte->writable;
697 uint_t vmm_info = shadow_pte->vmm_info;
699 if (error_code.write == 1) {
700 fixed = fix_write_pf_64(core, shadow_pte, guest_pte, (int)error_code.user, &write_pt, PAGE_BASE_ADDR(guest_pa), vmm_info);
702 fixed = fix_read_pf_64(shadow_pte, vmm_info);
705 PrintDebug("PTE: Fixed %d Write_Pt %d\n", fixed, write_pt);
706 PrintDebug("PTE: Shadow Present %d, Write %d, User %d, Dirty %d, Accessed %d, Global %d\n",
707 shadow_pte->present, shadow_pte->writable, shadow_pte->user_page, shadow_pte->dirty, shadow_pte->accessed,
708 shadow_pte->global_page);
709 PrintDebug("PTE: Shadow %p\n", (void*)*((addr_t*)shadow_pte));
711 if (shdw_reg->host_type == SHDW_REGION_ALLOCATED && write_pt == 1) {
712 PrintDebug("PTE: Emul\n");
713 if (v3_handle_mem_wr_hook(core, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
714 shadow_unprotect_page(core, (addr_t)guest_pte->page_base_addr);
718 PrintDebug("PTE: PTE end\n");
719 PrintDebug("PTE: Updated Shadow Present %d, Write %d, User %d, Dirty %d, Accessed %d, Global %d\n",
720 shadow_pte->present, shadow_pte->writable, shadow_pte->user_page, shadow_pte->dirty, shadow_pte->accessed,
721 shadow_pte->global_page);
722 PrintDebug("PTE: Updated Shadow %p\n", (void*)*((addr_t*)shadow_pte));
723 PrintDebug("PTE: Guest PA %p, Host PA %p\n", (void*)guest_pa, (void*)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
730 static int handle_2MB_shadow_pagefault_64(struct guest_info * core,
731 addr_t fault_addr, pf_error_t error_code,
732 pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde, uint32_t inherited_ar)
734 pt_access_status_t shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
735 pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
736 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
737 // struct shadow_page_state * state = &(core->shdw_pg_state);
739 PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
740 PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
742 struct v3_shadow_region * shdw_reg = v3_get_shadow_region(core->vm_info, core->cpu_id, guest_fault_pa);
748 if (shdw_reg == NULL) {
749 // Inject a machine check in the guest
750 PrintError("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
751 v3_raise_exception(core, MC_EXCEPTION);
756 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
757 // Get the guest physical address of the fault
758 int inherited_ar_user = ((inherited_ar & PT_USER_MASK) == PT_USER_MASK) ? 1 : 0;
759 int inherited_ar_writable = ((inherited_ar & PT_WRITABLE_MASK) == PT_WRITABLE_MASK) ? 1 : 0;
761 if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
762 (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
763 addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, core->cpu_id, guest_fault_pa);
765 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
767 PrintDebug("LPTE: inherited_ar %d\n", inherited_ar);
768 shadow_pte->user_page = inherited_ar_user;
769 PrintDebug("LPTE: inheritied shdow_pte_user %d\n", shadow_pte->user_page);
771 shadow_pte->present = large_guest_pde->present;
772 shadow_pte->dirty = large_guest_pde->dirty;
774 shadow_pte->present = 1;
776 /* We are assuming that the PDE entry has precedence
777 * so the Shadow PDE will mirror the guest PDE settings,
778 * and we don't have to worry about them here
781 shadow_pte->user_page = 1;
783 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
784 shadow_pte->writable = 0;
786 shadow_pte->writable = inherited_ar_writable;
787 PrintDebug("LPTE: inheritied shdow_pte_writable %d, PT_WRITABLE_MASK %p, inherited_ar & PT_WRITABLE_MASK %p\n",
788 shadow_pte->writable, (void*)PT_WRITABLE_MASK, (void*)(inherited_ar & PT_WRITABLE_MASK));
791 //set according to VMM policy
792 shadow_pte->global_page = large_guest_pde->global_page;
795 shadow_pte->vmm_info = (inherited_ar_writable <<1) | (inherited_ar_user << 2);
797 if (large_guest_pde->writable) {
798 struct shadow_page_cache_data *shadow;
799 shadow = shadow_page_lookup_page(core, PAGE_BASE_ADDR(guest_fault_pa), 0);
802 if (shadow_pte->writable) {
803 shadow_pte->writable = 0;
808 rmap_add(core, (addr_t)shadow_pte);
811 if (v3_handle_mem_full_hook(core, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
812 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
816 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
818 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
820 if (v3_handle_mem_wr_hook(core, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
821 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
828 struct cr3_64 *guest_cr3 = (struct cr3_64 *)&(core->shdw_pg_state.guest_cr3);
829 guest_fn = (addr_t)guest_cr3->pml4t_base_addr;
830 uint_t vmm_info = shadow_pte->vmm_info;
832 if (error_code.write == 1) {
833 fixed = fix_write_pf_64(core, shadow_pte, (pte64_t *)large_guest_pde, (int) error_code.user,
834 &write_pt, PAGE_BASE_ADDR(guest_fault_pa), vmm_info);
836 fixed = fix_read_pf_64(shadow_pte, vmm_info);
839 PrintDebug("LPTE: Fixed %d, Write_Pt %d\n", fixed, write_pt);
840 PrintDebug("LPTE: Shadow Present %d, Write %d, User %d, Dirty %d, Accessed %d, Global %d\n",
841 shadow_pte->present, shadow_pte->writable, shadow_pte->user_page, shadow_pte->dirty,
842 shadow_pte->accessed, shadow_pte->global_page);
843 PrintDebug("LPTE: Shadow %p\n", (void*)*((addr_t*)shadow_pte));
845 if (shdw_reg->host_type == SHDW_REGION_ALLOCATED && write_pt == 1){
846 if (v3_handle_mem_wr_hook(core, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
847 shadow_unprotect_page(core, (addr_t)PAGE_BASE_ADDR(guest_fault_pa));
851 PrintDebug("Updated LPTE: Shadow Present %d, Write %d, User %d, Dirty %d, Accessed %d, Global %d\n",
852 shadow_pte->present, shadow_pte->writable, shadow_pte->user_page, shadow_pte->dirty, shadow_pte->accessed,
853 shadow_pte->global_page);
854 PrintDebug("LPTE: Updated Shadow %p\n", (void*)*((addr_t*)shadow_pte));
855 PrintDebug("LPTE: Guest PA %p Host PA %p\n",
856 (void*)BASE_TO_PAGE_ADDR(PAGE_BASE_ADDR(guest_fault_pa)),
857 (void*)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
858 PrintDebug("Returning from Large Page Fault Handler\n");
860 // PrintHostPageTree(core, fault_addr, info->ctrl_regs.cr3);
861 PrintDebug("Returning from large page fault handler\n");
868 static int invalidation_cb_64(struct guest_info * core, page_type_t type,
869 addr_t vaddr, addr_t page_ptr, addr_t page_pa,
870 void * private_data) {
875 pml4e64_t * pml = (pml4e64_t *)page_ptr;
877 if (pml[PML4E64_INDEX(vaddr)].present == 0) {
884 pdpe64_t * pdp = (pdpe64_t *)page_ptr;
885 pdpe64_t * pdpe = &(pdp[PDPE64_INDEX(vaddr)]);
887 if (pdpe->present == 0) {
891 if (pdpe->vmm_info == V3_LARGE_PG) {
892 PrintError("1 Gigabyte pages not supported\n");
903 pde64_t * pd = (pde64_t *)page_ptr;
904 pde64_t * pde = &(pd[PDE64_INDEX(vaddr)]);
906 if (pde->present == 0) {
910 if (pde->vmm_info == V3_LARGE_PG) {
919 pte64_t * pt = (pte64_t *)page_ptr;
921 pt[PTE64_INDEX(vaddr)].present = 0;
926 PrintError("Invalid Page Type\n");
931 // should not get here
932 PrintError("Should not get here....\n");
937 static inline int handle_shadow_invlpg_64(struct guest_info * core, addr_t vaddr) {
938 PrintDebug("INVLPG64 - %p\n",(void*)vaddr);
940 int ret = v3_drill_host_pt_64(core, core->ctrl_regs.cr3, vaddr, invalidation_cb_64, NULL);
942 PrintError("Page table drill returned error.... \n");
943 PrintHostPageTree(core, vaddr, core->ctrl_regs.cr3);
946 return (ret == -1) ? -1 : 0;