3 #ifdef V3_CONFIG_SHADOW_CACHE
5 #define PT64_NX_MASK (1ULL << 63)
8 static inline int activate_shadow_pt_64(struct guest_info * core) {
9 struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(core->ctrl_regs.cr3);
10 struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(core->shdw_pg_state.guest_cr3);
12 struct shadow_page_cache_data *shadow_pt;
14 if(core->n_free_shadow_pages < MIN_FREE_SHADOW_PAGES) {
15 shadow_free_some_pages(core);
17 shadow_pt = shadow_page_get_page(core, (addr_t)(guest_cr3->pml4t_base_addr), 4, 0, 0, 0, 0);
18 PrintDebug(info->vm_info, info, "Activate shadow_pt %p\n", (void *)BASE_TO_PAGE_ADDR(guest_cr3->pml4t_base_addr));
20 struct shadow_page_cache_data * shadow_pt = create_new_shadow_pt(core);
22 addr_t shadow_pt_addr = shadow_pt->page_pa;
24 // Because this is a new CR3 load the allocated page is the new CR3 value
25 shadow_pt->cr3 = shadow_pt->page_pa;
27 PrintDebug(info->vm_info, info, "Top level Shadow page pa=%p\n", (void *)shadow_pt_addr);
29 shadow_cr3->pml4t_base_addr = PAGE_BASE_ADDR_4KB(shadow_pt_addr);
30 PrintDebug(info->vm_info, info, "Creating new 64 bit shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pml4t_base_addr));
32 shadow_cr3->pwt = guest_cr3->pwt;
33 shadow_cr3->pcd = guest_cr3->pcd;
35 shadow_topup_caches(core);
48 * * 64 bit Page table fault handlers
53 static inline void burst_64 (struct guest_info * core) {
55 struct shadow_page_cache_data * sp, *node;
58 list_for_each_entry_safe(sp, node, &core->active_shadow_pages, link) {
59 pt = (pte64_t *)V3_VAddr((void *)sp->page_pa);
60 PrintDebug(info->vm_info, info, "burst: pt %p\n",(void *)pt);
61 for (idx = 0; idx < PT_ENT_PER_PAGE; ++idx) {
62 pte = (pte64_t *)&(pt[idx]);
63 if(*((uint64_t*)pte)) PrintDebug(info->vm_info, info, "%d: s %p\n",idx, (void*)*((uint64_t*)pte));
70 static inline int fix_read_pf_64(pte64_t *shadow_pte, uint_t vmm_info) {
72 PrintDebug(info->vm_info, info, "\tReadPf, start vmm_info %d\n", vmm_info);
74 if ((vmm_info & PT_USER_MASK) && !(shadow_pte->user_page)) {
75 shadow_pte->user_page = 1;
76 shadow_pte->writable = 0;
82 static inline int fix_write_pf_64(struct guest_info *core, pte64_t *shadow_pte, pte64_t *guest_pte,
83 int user, int *write_pt, addr_t guest_fn, uint_t vmm_info) {
86 struct cr0_64 *guest_cr0;
87 struct shadow_page_cache_data *page;
90 PrintDebug(info->vm_info, info, "\tWritePf, start vmm_info %d\n", vmm_info);
92 if (shadow_pte->writable) {
96 PrintDebug(info->vm_info, info, "\tWritePf, pass writable\n");
97 writable_shadow = vmm_info & PT_WRITABLE_MASK;
98 PrintDebug(info->vm_info, info, "\tWritePf, writable_shadow %d\n", writable_shadow);
101 if (!(vmm_info & PT_USER_MASK) || !writable_shadow) {
102 PrintDebug(info->vm_info, info, "\tWritePf: 1st User Check\n");
106 if (!writable_shadow) {
107 guest_cr0 = (struct cr0_64 *)&(core->shdw_pg_state.guest_cr0);
108 PrintDebug(info->vm_info, info, "\tWritePf: WP %d\n", guest_cr0->wp);
113 shadow_pte->user_page = 0;
117 if (guest_pte->present == 0) {
118 memset((void*)shadow_pte, 0, sizeof(uint64_t));
119 PrintDebug(info->vm_info, info, "\tWritePf: Guest Not Present\n");
124 while ((page = shadow_page_lookup_page(core, guest_fn, 0)) != NULL) {
125 shadow_zap_page(core, page);
128 PrintDebug(info->vm_info, info, "\tWritePf: Zap Page\n");
129 } else if ((page = shadow_page_lookup_page(core, guest_fn, 0)) != NULL) {
130 if ((page = shadow_page_lookup_page (core, guest_fn, 0)) != NULL) {
131 guest_pte->dirty = 1;
133 PrintDebug(info->vm_info, info, "\tWritePf: Write Needed\n");
138 shadow_pte->writable = 1;
139 guest_pte->dirty = 1;
141 rmap_add(core, (addr_t)shadow_pte);
143 PrintDebug(info->vm_info, info, "\tWritePf: On Writable\n");
149 static int handle_2MB_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
150 pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde, uint32_t inherited_ar);
152 static int handle_pte_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
153 pte64_t * shadow_pt, pte64_t * guest_pt, uint32_t inherited_ar);
155 static int handle_pde_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
156 pde64_t * shadow_pd, pde64_t * guest_pd, uint32_t inherited_ar);
158 static int handle_pdpe_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
159 pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp, uint32_t inherited_ar);
162 static inline int handle_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) {
163 pml4e64_t * guest_pml = NULL;
164 pml4e64_t * shadow_pml = CR3_TO_PML4E64_VA(core->ctrl_regs.cr3);
165 addr_t guest_cr3 = CR3_TO_PML4E64_PA(core->shdw_pg_state.guest_cr3);
166 pt_access_status_t guest_pml4e_access;
167 pt_access_status_t shadow_pml4e_access;
168 pml4e64_t * guest_pml4e = NULL;
169 pml4e64_t * shadow_pml4e = (pml4e64_t *)&(shadow_pml[PML4E64_INDEX(fault_addr)]);
171 PrintDebug(info->vm_info, info, "64 bit Shadow page fault handler: %p\n", (void *)fault_addr);
172 PrintDebug(info->vm_info, info, "Handling PML fault\n");
174 int metaphysical = 0;
175 unsigned hugepage_access = 0;
176 addr_t pml4e_base_addr = 0;
177 uint32_t inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
179 if (core->n_free_shadow_pages < MIN_FREE_SHADOW_PAGES) {
180 shadow_free_some_pages(core);
182 shadow_topup_caches(core);
189 if (guest_pa_to_host_va(core, guest_cr3, (addr_t*)&guest_pml) == -1) {
190 PrintError(info->vm_info, info, "Invalid Guest PML4E Address: 0x%p\n", (void *)guest_cr3);
194 guest_pml4e = (pml4e64_t *)&(guest_pml[PML4E64_INDEX(fault_addr)]);
196 pml4e_base_addr = (addr_t)(guest_pml4e->pdp_base_addr);
198 PrintDebug(info->vm_info, info, "Checking Guest %p\n", (void *)guest_pml);
199 // Check the guest page permissions
200 guest_pml4e_access = v3_can_access_pml4e64(guest_pml, fault_addr, error_code);
202 PrintDebug(info->vm_info, info, "Checking shadow %p\n", (void *)shadow_pml);
203 // Check the shadow page permissions
204 shadow_pml4e_access = v3_can_access_pml4e64(shadow_pml, fault_addr, error_code);
206 if (guest_pml4e_access == PT_ACCESS_NOT_PRESENT) {
207 error_code.present = 0;
211 if (guest_pml4e_access == PT_ACCESS_WRITE_ERROR) {
212 struct cr0_64 *guest_cr0 = (struct cr0_64*)&(core->shdw_pg_state.guest_cr0);
213 if (error_code.user || guest_cr0->wp) {
214 error_code.present = 1;
219 if (guest_pml4e_access == PT_ACCESS_USER_ERROR) {
220 error_code.present = 1;
225 if (error_code.ifetch == 1 && ((*(uint64_t*)guest_pml4e) & PT64_NX_MASK)) {
226 struct efer_64 *guest_efer = (struct efer_64*)&(core->shdw_pg_state.guest_efer);
227 if (guest_efer->lma == 1) {
236 PrintDebug(info->vm_info, info, "Injecting PML Pf to Guest: (Guest Access Error = %d) (SHDW Access Error = %d) (Pf Error Code = %d)\n",
237 *(uint_t*)&guest_pml4e_access, *(uint_t*)&shadow_pml4e_access, *(uint_t*)&error_code);
238 if (inject_guest_pf(core, fault_addr, error_code) == -1) {
239 PrintError(info->vm_info, info, "Could Not Inject Guest Page Fault\n");
246 if (guest_pml4e->accessed == 0) {
247 guest_pml4e->accessed = 1;
250 inherited_ar &= *(uint64_t*)guest_pml4e;
251 PrintDebug(info->vm_info, info, "PML: inherited %x\n", inherited_ar);
253 pdpe64_t * shadow_pdp = NULL;
254 pdpe64_t * guest_pdp = NULL;
256 // Get the next shadow page level, allocate if not present
258 if (shadow_pml4e_access == PT_ACCESS_NOT_PRESENT) {
259 struct shadow_page_cache_data *shdw_page = shadow_page_get_page(core, pml4e_base_addr, 3, metaphysical,
260 hugepage_access, (addr_t)shadow_pml4e, 0);
261 shadow_pdp = (pdpe64_t *)V3_VAddr((void *)shdw_page->page_pa);
263 shadow_pml4e->present =1;
264 shadow_pml4e->accessed=1;
265 shadow_pml4e->writable=1;
266 shadow_pml4e->user_page=1;
268 shadow_pml4e->pdp_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
270 shadow_pdp = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pml4e->pdp_base_addr));
273 // Continue processing at the next level
275 if (guest_pa_to_host_va(core, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
276 // Machine check the guest
277 PrintError(info->vm_info, info, "Invalid Guest PDP Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr));
278 v3_raise_exception(core, MC_EXCEPTION);
282 if (handle_pdpe_shadow_pagefault_64(core, fault_addr, error_code, shadow_pdp, guest_pdp, inherited_ar) == -1) {
283 PrintError(info->vm_info, info, "Error handling Page fault caused by PDPE\n");
292 // For now we are not going to handle 1 Gigabyte pages
293 static int handle_pdpe_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
294 pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp, uint32_t inherited_ar) {
295 pt_access_status_t guest_pdpe_access;
296 pt_access_status_t shadow_pdpe_access;
297 pdpe64_t * guest_pdpe = (pdpe64_t *)&(guest_pdp[PDPE64_INDEX(fault_addr)]);
298 pdpe64_t * shadow_pdpe = (pdpe64_t *)&(shadow_pdp[PDPE64_INDEX(fault_addr)]);
300 PrintDebug(info->vm_info, info, "Handling PDP fault\n");
303 PrintDebug(info->vm_info, info, "Guest Page Tree for guest virtual address zero fault\n");
304 PrintGuestPageTree(core,fault_addr,(addr_t)(core->shdw_pg_state.guest_cr3));
305 PrintDebug(info->vm_info, info, "Host Page Tree for guest virtual address zero fault\n");
306 PrintHostPageTree(core,fault_addr,(addr_t)(core->ctrl_regs.cr3));
309 int metaphysical = 0;
310 unsigned hugepage_access = 0;
312 addr_t pdpe_base_addr = (addr_t)(guest_pdpe->pd_base_addr);
314 // Check the guest page permissions
315 guest_pdpe_access = v3_can_access_pdpe64(guest_pdp, fault_addr, error_code);
317 // Check the shadow page permissions
318 shadow_pdpe_access = v3_can_access_pdpe64(shadow_pdp, fault_addr, error_code);
320 if (guest_pdpe_access == PT_ACCESS_NOT_PRESENT) {
321 PrintDebug(info->vm_info, info, "Guest Page Tree for guest virtual address zero fault\n");
322 error_code.present = 0;
326 if (guest_pdpe_access == PT_ACCESS_WRITE_ERROR) {
327 struct cr0_64 *guest_cr0 = (struct cr0_64*)&(core->shdw_pg_state.guest_cr0);
328 if (error_code.user || guest_cr0->wp) {
329 error_code.present = 1;
334 if (guest_pdpe_access == PT_ACCESS_USER_ERROR) {
335 error_code.present = 1;
339 if (error_code.ifetch == 1 && ((*(uint64_t*)guest_pdpe) & PT64_NX_MASK)) {
340 struct efer_64 *guest_efer = (struct efer_64*)&(core->shdw_pg_state.guest_efer);
341 if (guest_efer->lma == 1) {
350 PrintDebug(info->vm_info, info, "Injecting PML Pf to Guest: (Guest Access Error = %d) (SHDW Access Error = %d) (Pf Error Code = %d)\n",
351 *(uint_t*)&guest_pdpe_access, *(uint_t*)&shadow_pdpe_access, *(uint_t*)&error_code);
352 if (inject_guest_pf(core, fault_addr, error_code) == -1) {
353 PrintError(info->vm_info, info, "Could Not Inject Guest Page Fault\n");
360 if (guest_pdpe->accessed == 0) {
361 guest_pdpe->accessed = 1;
364 inherited_ar &= *(uint64_t*)guest_pdpe;
365 PrintDebug(info->vm_info, info, "PDPE: inherited %x\n", inherited_ar);
367 pde64_t * shadow_pd = NULL;
368 pde64_t * guest_pd = NULL;
370 // Get the next shadow page level, allocate if not present
372 if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
373 struct shadow_page_cache_data *shdw_page = shadow_page_get_page(core, pdpe_base_addr, 2, metaphysical,
374 hugepage_access, (addr_t) shadow_pdpe, 0);
376 shadow_pd = (pde64_t *)V3_VAddr((void *)shdw_page->page_pa);
378 shadow_pdpe->present =1;
379 shadow_pdpe->accessed=1;
380 shadow_pdpe->writable=1;
381 shadow_pdpe->user_page=1;
383 shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
385 shadow_pd = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
388 // Continue processing at the next level
390 if (guest_pa_to_host_va(core, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
391 // Machine check the guest
392 PrintError(info->vm_info, info, "Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr));
393 v3_raise_exception(core, MC_EXCEPTION);
397 if (handle_pde_shadow_pagefault_64(core, fault_addr, error_code, shadow_pd, guest_pd, inherited_ar) == -1) {
398 PrintError(info->vm_info, info, "Error handling Page fault caused by PDE\n");
406 static int handle_pde_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
407 pde64_t * shadow_pd, pde64_t * guest_pd, uint32_t inherited_ar) {
408 pt_access_status_t guest_pde_access;
409 pt_access_status_t shadow_pde_access;
410 pde64_t * guest_pde = (pde64_t *)&(guest_pd[PDE64_INDEX(fault_addr)]);
411 pde64_t * shadow_pde = (pde64_t *)&(shadow_pd[PDE64_INDEX(fault_addr)]);
413 PrintDebug(info->vm_info, info, "Handling PDE fault\n");
415 int metaphysical = 0;
416 unsigned hugepage_access = 0;
418 addr_t pde_base_addr = (addr_t)(guest_pde->pt_base_addr);
420 if (guest_pde->large_page == 1) {
421 pde_base_addr = (addr_t)PAGE_BASE_ADDR(BASE_TO_PAGE_ADDR_2MB(((pde64_2MB_t *) guest_pde)->page_base_addr));
423 hugepage_access = (((pde64_2MB_t*) guest_pde)->writable | (((pde64_2MB_t*) guest_pde)->user_page << 1));
426 // Check the guest page permissions
427 guest_pde_access = v3_can_access_pde64(guest_pd, fault_addr, error_code);
429 // Check the shadow page permissions
430 shadow_pde_access = v3_can_access_pde64(shadow_pd, fault_addr, error_code);
432 if (guest_pde_access == PT_ACCESS_NOT_PRESENT) {
433 error_code.present = 0;
437 if (guest_pde_access == PT_ACCESS_WRITE_ERROR) {
438 struct cr0_64 *guest_cr0 = (struct cr0_64*)&(core->shdw_pg_state.guest_cr0);
439 if (error_code.user || guest_cr0->wp) {
440 error_code.present = 1;
445 if (guest_pde_access == PT_ACCESS_USER_ERROR) {
446 error_code.present = 1;
450 if (error_code.ifetch == 1 && ((*(uint64_t*)guest_pde) & PT64_NX_MASK)) {
451 struct efer_64 *guest_efer = (struct efer_64*)&(core->shdw_pg_state.guest_efer);
452 if (guest_efer->lma == 1) {
461 PrintDebug(info->vm_info, info, "Injecting PML Pf to Guest: (Guest Access Error = %d) (SHDW Access Error = %d) (Pf Error Code = %d)\n",
462 *(uint_t*)&guest_pde_access, *(uint_t*)&shadow_pde_access, *(uint_t*)&error_code);
463 if (inject_guest_pf(core, fault_addr, error_code) == -1) {
464 PrintError(info->vm_info, info, "Could Not Inject Guest Page Fault\n");
471 if (guest_pde->accessed == 0) {
472 guest_pde->accessed = 1;
475 inherited_ar &= *(uint64_t*)guest_pde;
476 PrintDebug(info->vm_info, info, "PDE: inherited %x\n", inherited_ar);
478 pte64_t * shadow_pt = NULL;
479 pte64_t * guest_pt = NULL;
481 // Get the next shadow page level, allocate if not present
483 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
484 struct shadow_page_cache_data *shdw_page = shadow_page_get_page(core, pde_base_addr, 1, metaphysical,
485 hugepage_access, (addr_t) shadow_pde, 0);
486 shadow_pt = (pte64_t *)V3_VAddr((void *)shdw_page->page_pa);
488 PrintDebug(info->vm_info, info, "Creating new shadow PT: %p\n", shadow_pt);
490 shadow_pde->present =1;
491 shadow_pde->accessed=1;
492 shadow_pde->writable=1;
493 shadow_pde->user_page=1;
495 shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
497 shadow_pt = (pte64_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
500 // Continue processing at the next level
501 if (guest_pde->large_page == 0) {
502 if (guest_pa_to_host_va(core, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
503 // Machine check the guest
504 PrintError(info->vm_info, info, "Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
505 v3_raise_exception(core, MC_EXCEPTION);
509 if (handle_pte_shadow_pagefault_64(core, fault_addr, error_code, shadow_pt, guest_pt, inherited_ar) == -1) {
510 PrintError(info->vm_info, info, "Error handling Page fault caused by PDE\n");
515 if (handle_2MB_shadow_pagefault_64(core, fault_addr, error_code, shadow_pt,
516 (pde64_2MB_t *)guest_pde, inherited_ar) == -1) {
517 PrintError(info->vm_info, info, "Error handling large pagefault\n");
526 static int handle_pte_shadow_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code,
527 pte64_t * shadow_pt, pte64_t * guest_pt, uint32_t inherited_ar) {
528 pt_access_status_t guest_pte_access;
529 pt_access_status_t shadow_pte_access;
530 pte64_t * guest_pte = (pte64_t *)&(guest_pt[PTE64_INDEX(fault_addr)]);;
531 pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
532 addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
533 // struct shadow_page_state * state = &(core->shdw_pg_state);
535 PrintDebug(info->vm_info, info, "Handling PTE fault\n");
537 struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, guest_pa);
541 if (shdw_reg == NULL) {
542 // Inject a machine check in the guest
543 PrintError(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
544 v3_raise_exception(core, MC_EXCEPTION);
548 // Check the guest page permissions
549 guest_pte_access = v3_can_access_pte64(guest_pt, fault_addr, error_code);
551 // Check the shadow page permissions
552 shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
554 if (guest_pte_access == PT_ACCESS_NOT_PRESENT) {
555 error_code.present = 0;
559 if (guest_pte_access == PT_ACCESS_WRITE_ERROR) {
560 struct cr0_64 *guest_cr0 = (struct cr0_64*)&(core->shdw_pg_state.guest_cr0);
561 if (error_code.user || guest_cr0->wp) {
562 error_code.present = 1;
567 if (guest_pte_access == PT_ACCESS_USER_ERROR) {
568 error_code.present = 1;
572 if (error_code.ifetch == 1 && ((*(uint64_t*)guest_pte) & PT64_NX_MASK)) {
573 struct efer_64 *guest_efer = (struct efer_64*)&(core->shdw_pg_state.guest_efer);
574 if (guest_efer->lma == 1) {
583 PrintDebug(info->vm_info, info, "Injecting PML Pf to Guest: (Guest Access Error = %d) (SHDW Access Error = %d) (Pf Error Code = %d)\n",
584 *(uint_t*)&guest_pte_access, *(uint_t*)&shadow_pte_access, *(uint_t*)&error_code);
585 if (inject_guest_pf(core, fault_addr, error_code) == -1) {
586 PrintError(info->vm_info, info, "Could Not Inject Guest Page Fault\n");
593 if (guest_pte->accessed == 0) {
594 guest_pte->accessed = 1;
597 inherited_ar &= *(uint64_t*)guest_pte;
598 PrintDebug(info->vm_info, info, "PTE: inherited %x\n", inherited_ar);
600 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
601 // Page Table Entry Not Present
602 PrintDebug(info->vm_info, info, "guest_pa =%p\n", (void *)guest_pa);
604 if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
605 (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
607 int inherited_ar_user = ((inherited_ar & PT_USER_MASK) == PT_USER_MASK) ? 1 : 0;
608 int inherited_ar_writable = ((inherited_ar & PT_WRITABLE_MASK) == PT_WRITABLE_MASK) ? 1 : 0;
610 addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, core->vcpu_id, guest_pa);
612 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
614 shadow_pte->present = guest_pte->present;
616 shadow_pte->user_page = inherited_ar_user;
617 PrintDebug(info->vm_info, info, "PTE: inheritied shdow_pte_user %d, guest_pte_user %d\n", shadow_pte->user_page, guest_pte->user_page);
619 //set according to VMM policy
620 shadow_pte->global_page = guest_pte->global_page;
623 shadow_pte->accessed = guest_pte->accessed;
624 shadow_pte->dirty = guest_pte->dirty;
625 shadow_pte->writable = inherited_ar_writable;
627 PrintDebug(info->vm_info, info, "PTE: inheritied shdow_pte_writable %d, guest_pte_writable %d\n", shadow_pte->writable, guest_pte->writable);
630 // Write hooks trump all, and are set Read Only
631 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
632 shadow_pte->writable = 0;
635 shadow_pte->vmm_info = (inherited_ar_writable << 1) | (inherited_ar_user << 2);
637 if (inherited_ar_writable & guest_pte->writable) {
639 struct shadow_page_cache_data *shadow;
640 shadow = shadow_page_lookup_page(core, PAGE_BASE_ADDR(guest_pa), 0);
642 if (shadow_pte->writable) {
643 shadow_pte->writable = 0;
648 PrintDebug(info->vm_info, info, "PTE: Updated Shadow Present %d, Write %d, User %d, Dirty %d, Accessed %d, Global %d\n",
649 shadow_pte->present, shadow_pte->writable, shadow_pte->user_page, shadow_pte->dirty, shadow_pte->accessed,
650 shadow_pte->global_page);
651 PrintDebug(info->vm_info, info, "PTE: Shadow %p\n", (void*)*((addr_t*)shadow_pte));
652 rmap_add(core, (addr_t)shadow_pte);
655 // Page fault handled by hook functions
657 if (v3_handle_mem_full_hook(core, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
658 PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
662 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
663 guest_pte->dirty = 1;
665 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
666 if (v3_handle_mem_wr_hook(core, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
667 PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
671 PrintDebug(info->vm_info, info, "Shadow PTE Write Error\n");
672 shadow_pte->writable = guest_pte->writable;
680 uint_t vmm_info = shadow_pte->vmm_info;
682 if (error_code.write == 1) {
683 fixed = fix_write_pf_64(core, shadow_pte, guest_pte, (int)error_code.user, &write_pt, PAGE_BASE_ADDR(guest_pa), vmm_info);
685 fixed = fix_read_pf_64(shadow_pte, vmm_info);
688 PrintDebug(info->vm_info, info, "PTE: Fixed %d Write_Pt %d\n", fixed, write_pt);
689 PrintDebug(info->vm_info, info, "PTE: Shadow Present %d, Write %d, User %d, Dirty %d, Accessed %d, Global %d\n",
690 shadow_pte->present, shadow_pte->writable, shadow_pte->user_page, shadow_pte->dirty, shadow_pte->accessed,
691 shadow_pte->global_page);
692 PrintDebug(info->vm_info, info, "PTE: Shadow %p\n", (void*)*((addr_t*)shadow_pte));
694 if (shdw_reg->host_type == SHDW_REGION_ALLOCATED && write_pt == 1) {
695 PrintDebug(info->vm_info, info, "PTE: Emul\n");
696 if (v3_handle_mem_wr_hook(core, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
697 shadow_unprotect_page(core, (addr_t)guest_pte->page_base_addr);
701 PrintDebug(info->vm_info, info, "PTE: PTE end\n");
702 PrintDebug(info->vm_info, info, "PTE: Updated Shadow Present %d, Write %d, User %d, Dirty %d, Accessed %d, Global %d\n",
703 shadow_pte->present, shadow_pte->writable, shadow_pte->user_page, shadow_pte->dirty, shadow_pte->accessed,
704 shadow_pte->global_page);
705 PrintDebug(info->vm_info, info, "PTE: Updated Shadow %p\n", (void*)*((addr_t*)shadow_pte));
706 PrintDebug(info->vm_info, info, "PTE: Guest PA %p, Host PA %p\n", (void*)guest_pa, (void*)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
713 static int handle_2MB_shadow_pagefault_64(struct guest_info * core,
714 addr_t fault_addr, pf_error_t error_code,
715 pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde, uint32_t inherited_ar)
717 pt_access_status_t shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
718 pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
719 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
720 // struct shadow_page_state * state = &(core->shdw_pg_state);
722 PrintDebug(info->vm_info, info, "Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
723 PrintDebug(info->vm_info, info, "ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
725 struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, guest_fault_pa);
731 if (shdw_reg == NULL) {
732 // Inject a machine check in the guest
733 PrintError(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
734 v3_raise_exception(core, MC_EXCEPTION);
739 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
740 // Get the guest physical address of the fault
741 int inherited_ar_user = ((inherited_ar & PT_USER_MASK) == PT_USER_MASK) ? 1 : 0;
742 int inherited_ar_writable = ((inherited_ar & PT_WRITABLE_MASK) == PT_WRITABLE_MASK) ? 1 : 0;
744 if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
745 (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
746 addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, core->vcpu_id, guest_fault_pa);
748 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
750 PrintDebug(info->vm_info, info, "LPTE: inherited_ar %d\n", inherited_ar);
751 shadow_pte->user_page = inherited_ar_user;
752 PrintDebug(info->vm_info, info, "LPTE: inheritied shdow_pte_user %d\n", shadow_pte->user_page);
754 shadow_pte->present = large_guest_pde->present;
755 shadow_pte->dirty = large_guest_pde->dirty;
757 shadow_pte->present = 1;
759 /* We are assuming that the PDE entry has precedence
760 * so the Shadow PDE will mirror the guest PDE settings,
761 * and we don't have to worry about them here
764 shadow_pte->user_page = 1;
766 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
767 shadow_pte->writable = 0;
769 shadow_pte->writable = inherited_ar_writable;
770 PrintDebug(info->vm_info, info, "LPTE: inheritied shdow_pte_writable %d, PT_WRITABLE_MASK %p, inherited_ar & PT_WRITABLE_MASK %p\n",
771 shadow_pte->writable, (void*)PT_WRITABLE_MASK, (void*)(inherited_ar & PT_WRITABLE_MASK));
774 //set according to VMM policy
775 shadow_pte->global_page = large_guest_pde->global_page;
778 shadow_pte->vmm_info = (inherited_ar_writable <<1) | (inherited_ar_user << 2);
780 if (large_guest_pde->writable) {
781 struct shadow_page_cache_data *shadow;
782 shadow = shadow_page_lookup_page(core, PAGE_BASE_ADDR(guest_fault_pa), 0);
785 if (shadow_pte->writable) {
786 shadow_pte->writable = 0;
791 rmap_add(core, (addr_t)shadow_pte);
794 if (v3_handle_mem_full_hook(core, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
795 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
799 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
801 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
803 if (v3_handle_mem_wr_hook(core, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
804 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
811 struct cr3_64 *guest_cr3 = (struct cr3_64 *)&(core->shdw_pg_state.guest_cr3);
812 guest_fn = (addr_t)guest_cr3->pml4t_base_addr;
813 uint_t vmm_info = shadow_pte->vmm_info;
815 if (error_code.write == 1) {
816 fixed = fix_write_pf_64(core, shadow_pte, (pte64_t *)large_guest_pde, (int) error_code.user,
817 &write_pt, PAGE_BASE_ADDR(guest_fault_pa), vmm_info);
819 fixed = fix_read_pf_64(shadow_pte, vmm_info);
822 PrintDebug(info->vm_info, info, "LPTE: Fixed %d, Write_Pt %d\n", fixed, write_pt);
823 PrintDebug(info->vm_info, info, "LPTE: Shadow Present %d, Write %d, User %d, Dirty %d, Accessed %d, Global %d\n",
824 shadow_pte->present, shadow_pte->writable, shadow_pte->user_page, shadow_pte->dirty,
825 shadow_pte->accessed, shadow_pte->global_page);
826 PrintDebug(info->vm_info, info, "LPTE: Shadow %p\n", (void*)*((addr_t*)shadow_pte));
828 if (shdw_reg->host_type == SHDW_REGION_ALLOCATED && write_pt == 1){
829 if (v3_handle_mem_wr_hook(core, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
830 shadow_unprotect_page(core, (addr_t)PAGE_BASE_ADDR(guest_fault_pa));
834 PrintDebug(info->vm_info, info, "Updated LPTE: Shadow Present %d, Write %d, User %d, Dirty %d, Accessed %d, Global %d\n",
835 shadow_pte->present, shadow_pte->writable, shadow_pte->user_page, shadow_pte->dirty, shadow_pte->accessed,
836 shadow_pte->global_page);
837 PrintDebug(info->vm_info, info, "LPTE: Updated Shadow %p\n", (void*)*((addr_t*)shadow_pte));
838 PrintDebug(info->vm_info, info, "LPTE: Guest PA %p Host PA %p\n",
839 (void*)BASE_TO_PAGE_ADDR(PAGE_BASE_ADDR(guest_fault_pa)),
840 (void*)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
841 PrintDebug(info->vm_info, info, "Returning from Large Page Fault Handler\n");
843 // PrintHostPageTree(core, fault_addr, info->ctrl_regs.cr3);
844 PrintDebug(info->vm_info, info, "Returning from large page fault handler\n");
851 static int invalidation_cb_64(struct guest_info * core, page_type_t type,
852 addr_t vaddr, addr_t page_ptr, addr_t page_pa,
853 void * private_data) {
858 pml4e64_t * pml = (pml4e64_t *)page_ptr;
860 if (pml[PML4E64_INDEX(vaddr)].present == 0) {
867 pdpe64_t * pdp = (pdpe64_t *)page_ptr;
868 pdpe64_t * pdpe = &(pdp[PDPE64_INDEX(vaddr)]);
870 if (pdpe->present == 0) {
874 if (pdpe->vmm_info == V3_LARGE_PG) {
875 PrintError(info->vm_info, info, "1 Gigabyte pages not supported\n");
886 pde64_t * pd = (pde64_t *)page_ptr;
887 pde64_t * pde = &(pd[PDE64_INDEX(vaddr)]);
889 if (pde->present == 0) {
893 if (pde->vmm_info == V3_LARGE_PG) {
902 pte64_t * pt = (pte64_t *)page_ptr;
904 pt[PTE64_INDEX(vaddr)].present = 0;
909 PrintError(info->vm_info, info, "Invalid Page Type\n");
914 // should not get here
915 PrintError(info->vm_info, info, "Should not get here....\n");
920 static inline int handle_shadow_invlpg_64(struct guest_info * core, addr_t vaddr) {
921 PrintDebug(info->vm_info, info, "INVLPG64 - %p\n",(void*)vaddr);
923 int ret = v3_drill_host_pt_64(core, core->ctrl_regs.cr3, vaddr, invalidation_cb_64, NULL);
925 PrintError(info->vm_info, info, "Page table drill returned error.... \n");
926 PrintHostPageTree(core, vaddr, core->ctrl_regs.cr3);
929 return (ret == -1) ? -1 : 0;