2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #ifdef V3_CONFIG_TM_FUNC
21 #include <extensions/trans_mem.h>
24 static inline int activate_shadow_pt_64(struct guest_info * info) {
25 struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
26 struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
27 struct shadow_page_data * shadow_pt = create_new_shadow_pt(info);
28 addr_t shadow_pt_addr = shadow_pt->page_pa;
30 // Because this is a new CR3 load the allocated page is the new CR3 value
31 shadow_pt->cr3 = shadow_pt->page_pa;
33 PrintDebug(info->vm_info, info, "Top level Shadow page pa=%p\n", (void *)shadow_pt_addr);
35 shadow_cr3->pml4t_base_addr = PAGE_BASE_ADDR_4KB(shadow_pt_addr);
36 PrintDebug(info->vm_info, info, "Creating new 64 bit shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pml4t_base_addr));
39 shadow_cr3->pwt = guest_cr3->pwt;
40 shadow_cr3->pcd = guest_cr3->pcd;
53 * * 64 bit Page table fault handlers
58 static int handle_2MB_shadow_pagefault_pde_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
59 pt_access_status_t shadow_pde_access, pde64_2MB_t * shadow_pt,
60 pde64_2MB_t * large_guest_pde);
61 static int handle_2MB_shadow_pagefault_pte_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
62 pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde);
64 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
65 pte64_t * shadow_pt, pte64_t * guest_pt);
67 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
68 pde64_t * shadow_pd, pde64_t * guest_pd);
70 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
71 pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp);
74 static inline int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
75 pml4e64_t * guest_pml = NULL;
76 pml4e64_t * shadow_pml = CR3_TO_PML4E64_VA(info->ctrl_regs.cr3);
77 addr_t guest_cr3 = CR3_TO_PML4E64_PA(info->shdw_pg_state.guest_cr3);
78 pt_access_status_t guest_pml4e_access;
79 pt_access_status_t shadow_pml4e_access;
80 pml4e64_t * guest_pml4e = NULL;
81 pml4e64_t * shadow_pml4e = (pml4e64_t *)&(shadow_pml[PML4E64_INDEX(fault_addr)]);
83 PrintDebug(info->vm_info, info, "64 bit Shadow page fault handler: %p\n", (void *)fault_addr);
84 PrintDebug(info->vm_info, info, "Handling PML fault\n");
86 if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pml) == -1) {
87 PrintError(info->vm_info, info, "Invalid Guest PML4E Address: 0x%p\n", (void *)guest_cr3);
91 guest_pml4e = (pml4e64_t *)&(guest_pml[PML4E64_INDEX(fault_addr)]);
93 PrintDebug(info->vm_info, info, "Checking Guest %p\n", (void *)guest_pml);
94 // Check the guest page permissions
95 guest_pml4e_access = v3_can_access_pml4e64(guest_pml, fault_addr, error_code);
97 PrintDebug(info->vm_info, info, "Checking shadow %p\n", (void *)shadow_pml);
98 // Check the shadow page permissions
99 shadow_pml4e_access = v3_can_access_pml4e64(shadow_pml, fault_addr, error_code);
101 /* Was the page fault caused by the Guest's page tables? */
102 if (v3_is_guest_pf(guest_pml4e_access, shadow_pml4e_access) == 1) {
103 PrintDebug(info->vm_info, info, "Injecting PML4E pf to guest: (guest access error=%d) (pf error code=%d)\n",
104 *(uint_t *)&guest_pml4e_access, *(uint_t *)&error_code);
105 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
106 PrintError(info->vm_info, info, "Could not inject guest page fault\n");
112 if (shadow_pml4e_access == PT_ACCESS_USER_ERROR) {
114 // PML4 Entry marked non-user
116 PrintDebug(info->vm_info, info, "Shadow Paging User access error (shadow_pml4e_access=0x%x, guest_pml4e_access=0x%x)\n",
117 shadow_pml4e_access, guest_pml4e_access);
118 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
119 PrintError(info->vm_info, info, "Could not inject guest page fault\n");
123 } else if ((shadow_pml4e_access != PT_ACCESS_NOT_PRESENT) &&
124 (shadow_pml4e_access != PT_ACCESS_OK)) {
125 // inject page fault in guest
126 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
127 PrintError(info->vm_info, info, "Could not inject guest page fault\n");
130 PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pml4e_access);
131 PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
136 pdpe64_t * shadow_pdp = NULL;
137 pdpe64_t * guest_pdp = NULL;
139 // Get the next shadow page level, allocate if not present
141 if (shadow_pml4e_access == PT_ACCESS_NOT_PRESENT) {
142 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
143 shadow_pdp = (pdpe64_t *)V3_VAddr((void *)shdw_page->page_pa);
146 shadow_pml4e->present = 1;
147 shadow_pml4e->user_page = guest_pml4e->user_page;
148 shadow_pml4e->writable = guest_pml4e->writable;
149 shadow_pml4e->cache_disable = guest_pml4e->cache_disable;
150 shadow_pml4e->write_through = guest_pml4e->write_through;
152 guest_pml4e->accessed = 1;
154 shadow_pml4e->pdp_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
156 shadow_pdp = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pml4e->pdp_base_addr));
159 // Continue processing at the next level
161 if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
162 // Machine check the guest
163 PrintError(info->vm_info, info, "Invalid Guest PDP Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr));
164 v3_raise_exception(info, MC_EXCEPTION);
168 if (handle_pdpe_shadow_pagefault_64(info, fault_addr, error_code, shadow_pdp, guest_pdp) == -1) {
169 PrintError(info->vm_info, info, "Error handling Page fault caused by PDPE\n");
178 // For now we are not going to handle 1 Gigabyte pages
179 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
180 pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp) {
181 pt_access_status_t guest_pdpe_access;
182 pt_access_status_t shadow_pdpe_access;
183 pdpe64_t * guest_pdpe = (pdpe64_t *)&(guest_pdp[PDPE64_INDEX(fault_addr)]);
184 pdpe64_t * shadow_pdpe = (pdpe64_t *)&(shadow_pdp[PDPE64_INDEX(fault_addr)]);
186 PrintDebug(info->vm_info, info, "Handling PDP fault\n");
189 PrintDebug(info->vm_info, info, "Guest Page Tree for guest virtual address zero fault\n");
190 PrintGuestPageTree(info,fault_addr,(addr_t)(info->shdw_pg_state.guest_cr3));
191 PrintDebug(info->vm_info, info, "Host Page Tree for guest virtual address zero fault\n");
192 PrintHostPageTree(info,fault_addr,(addr_t)(info->ctrl_regs.cr3));
195 // Check the guest page permissions
196 guest_pdpe_access = v3_can_access_pdpe64(guest_pdp, fault_addr, error_code);
198 // Check the shadow page permissions
199 shadow_pdpe_access = v3_can_access_pdpe64(shadow_pdp, fault_addr, error_code);
201 /* Was the page fault caused by the Guest's page tables? */
202 if (v3_is_guest_pf(guest_pdpe_access, shadow_pdpe_access) == 1) {
203 PrintDebug(info->vm_info, info, "Injecting PDPE pf to guest: (guest access error=%d) (pf error code=%d)\n",
204 *(uint_t *)&guest_pdpe_access, *(uint_t *)&error_code);
205 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
206 PrintError(info->vm_info, info, "Could not inject guest page fault\n");
212 if (shadow_pdpe_access == PT_ACCESS_USER_ERROR) {
214 // PML4 Entry marked non-user
216 PrintDebug(info->vm_info, info, "Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n",
217 shadow_pdpe_access, guest_pdpe_access);
218 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
219 PrintError(info->vm_info, info, "Could not inject guest page fault\n");
223 } else if ((shadow_pdpe_access != PT_ACCESS_NOT_PRESENT) &&
224 (shadow_pdpe_access != PT_ACCESS_OK)) {
225 // inject page fault in guest
226 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
227 PrintError(info->vm_info, info, "Could not inject guest page fault\n");
230 PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pdpe_access);
231 PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
236 pde64_t * shadow_pd = NULL;
237 pde64_t * guest_pd = NULL;
239 // Get the next shadow page level, allocate if not present
241 if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
242 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
243 shadow_pd = (pde64_t *)V3_VAddr((void *)shdw_page->page_pa);
246 shadow_pdpe->present = 1;
247 shadow_pdpe->user_page = guest_pdpe->user_page;
248 shadow_pdpe->writable = guest_pdpe->writable;
249 shadow_pdpe->write_through = guest_pdpe->write_through;
250 shadow_pdpe->cache_disable = guest_pdpe->cache_disable;
253 guest_pdpe->accessed = 1;
255 shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
257 shadow_pd = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
260 // Continue processing at the next level
262 if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
263 // Machine check the guest
264 PrintError(info->vm_info, info, "Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr));
265 v3_raise_exception(info, MC_EXCEPTION);
269 if (handle_pde_shadow_pagefault_64(info, fault_addr, error_code, shadow_pd, guest_pd) == -1) {
270 PrintError(info->vm_info, info, "Error handling Page fault caused by PDE\n");
277 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
278 pde64_t * shadow_pd, pde64_t * guest_pd) {
279 pt_access_status_t guest_pde_access;
280 pt_access_status_t shadow_pde_access;
281 pde64_t * guest_pde = (pde64_t *)&(guest_pd[PDE64_INDEX(fault_addr)]);
282 pde64_t * shadow_pde = (pde64_t *)&(shadow_pd[PDE64_INDEX(fault_addr)]);
284 PrintDebug(info->vm_info, info, "Handling PDE fault\n");
286 // Check the guest page permissions
287 guest_pde_access = v3_can_access_pde64(guest_pd, fault_addr, error_code);
289 // Check the shadow page permissions
290 shadow_pde_access = v3_can_access_pde64(shadow_pd, fault_addr, error_code);
292 /* Was the page fault caused by the Guest's page tables? */
293 if (v3_is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
294 PrintDebug(info->vm_info, info, "Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n",
295 *(uint_t *)&guest_pde_access, *(uint_t *)&error_code);
296 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
297 PrintError(info->vm_info, info, "Could not inject guest page fault\n");
303 if (shadow_pde_access == PT_ACCESS_USER_ERROR) {
305 // PDE Entry marked non-user
307 PrintDebug(info->vm_info, info, "Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n",
308 shadow_pde_access, guest_pde_access);
309 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
310 PrintError(info->vm_info, info, "Could not inject guest page fault\n");
315 } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) &&
316 (guest_pde->large_page == 1)) {
318 ((pde64_2MB_t *)guest_pde)->dirty = 1;
319 shadow_pde->writable = guest_pde->writable;
321 //PrintDebug(info->vm_info, info, "Returning due to large page Write Error\n");
322 //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
325 } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
326 (shadow_pde_access != PT_ACCESS_OK)) {
327 // inject page fault in guest
328 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
329 PrintError(info->vm_info, info, "Could not inject guest page fault\n");
332 PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
333 PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
337 pte64_t * shadow_pt = NULL;
338 pte64_t * guest_pt = NULL;
340 // get the next shadow page level, allocate if not present
341 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
342 // Check if we can use large pages and the guest memory is properly aligned
343 // to potentially use a large page
345 if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) {
346 addr_t guest_pa = BASE_TO_PAGE_ADDR_2MB(((pde64_2MB_t *)guest_pde)->page_base_addr);
347 uint32_t page_size = v3_get_max_page_size(info, guest_pa, LONG);
349 if (page_size == PAGE_SIZE_2MB) {
350 if (handle_2MB_shadow_pagefault_pde_64(info, fault_addr, error_code, shadow_pde_access,
351 (pde64_2MB_t *)shadow_pde, (pde64_2MB_t *)guest_pde) == -1) {
352 PrintError(info->vm_info, info, "Error handling large pagefault with large page\n");
358 // Fallthrough to handle the region with small pages
361 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
362 shadow_pt = (pte64_t *)V3_VAddr((void *)shdw_page->page_pa);
364 PrintDebug(info->vm_info, info, "Creating new shadow PT: %p\n", shadow_pt);
366 shadow_pde->present = 1;
367 shadow_pde->user_page = guest_pde->user_page;
369 if (guest_pde->large_page == 0) {
370 shadow_pde->writable = guest_pde->writable;
372 // This large page flag is temporary until we can get a working cache....
373 ((pde64_2MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
375 if (error_code.write) {
376 shadow_pde->writable = guest_pde->writable;
377 ((pde64_2MB_t *)guest_pde)->dirty = 1;
379 shadow_pde->writable = 0;
380 ((pde64_2MB_t *)guest_pde)->dirty = 0;
384 // VMM Specific options
385 shadow_pde->write_through = guest_pde->write_through;
386 shadow_pde->cache_disable = guest_pde->cache_disable;
387 shadow_pde->global_page = guest_pde->global_page;
390 guest_pde->accessed = 1;
392 shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
394 shadow_pt = (pte64_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
397 // Continue processing at the next level
398 if (guest_pde->large_page == 0) {
399 if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
400 // Machine check the guest
401 PrintError(info->vm_info, info, "Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
402 v3_raise_exception(info, MC_EXCEPTION);
406 if (handle_pte_shadow_pagefault_64(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
407 PrintError(info->vm_info, info, "Error handling Page fault caused by PDE\n");
411 if (handle_2MB_shadow_pagefault_pte_64(info, fault_addr, error_code, shadow_pt, (pde64_2MB_t *)guest_pde) == -1) {
412 PrintError(info->vm_info, info, "Error handling large pagefault with small page\n");
421 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
422 pte64_t * shadow_pt, pte64_t * guest_pt) {
423 pt_access_status_t guest_pte_access;
424 pt_access_status_t shadow_pte_access;
425 pte64_t * guest_pte = (pte64_t *)&(guest_pt[PTE64_INDEX(fault_addr)]);;
426 pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
427 addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
428 // struct shadow_page_state * state = &(info->shdw_pg_state);
430 PrintDebug(info->vm_info, info, "Handling PTE fault\n");
432 struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_pa);
436 if (shdw_reg == NULL) {
437 // Inject a machine check in the guest
438 PrintError(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
439 v3_raise_exception(info, MC_EXCEPTION);
443 // Check the guest page permissions
444 guest_pte_access = v3_can_access_pte64(guest_pt, fault_addr, error_code);
446 // Check the shadow page permissions
447 shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
449 /* Was the page fault caused by the Guest's page tables? */
450 if (v3_is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
452 PrintDebug(info->vm_info, info, "Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n",
453 guest_pte_access, *(uint_t*)&error_code);
455 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
456 PrintError(info->vm_info, info, "Could not inject guest page fault\n");
464 if (shadow_pte_access == PT_ACCESS_OK) {
465 // Inconsistent state...
466 // Guest Re-Entry will flush page tables and everything should now work
467 PrintDebug(info->vm_info, info, "Inconsistent state... Guest re-entry should flush tlb\n");
471 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
472 // Page Table Entry Not Present
473 PrintDebug(info->vm_info, info, "guest_pa =%p\n", (void *)guest_pa);
475 #ifdef V3_CONFIG_TM_FUNC
477 if (v3_tm_handle_pf_64(info, error_code, fault_addr, &page_to_use) == -1) {
482 if ((shdw_reg->flags.alloced == 1) ||
483 (shdw_reg->flags.read == 1)) {
484 addr_t shadow_pa = 0;
486 if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) {
487 PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_pa);
491 #ifdef V3_CONFIG_TM_FUNC
492 v3_tm_handle_usr_tlb_miss(info, error_code, page_to_use, &shadow_pa);
495 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
497 shadow_pte->present = guest_pte->present;
498 shadow_pte->user_page = guest_pte->user_page;
500 //set according to VMM policy
501 shadow_pte->write_through = guest_pte->write_through;
502 shadow_pte->cache_disable = guest_pte->cache_disable;
503 shadow_pte->global_page = guest_pte->global_page;
506 guest_pte->accessed = 1;
508 if (guest_pte->dirty == 1) {
509 shadow_pte->writable = guest_pte->writable;
510 } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
511 shadow_pte->writable = guest_pte->writable;
512 guest_pte->dirty = 1;
513 } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
514 shadow_pte->writable = 0;
518 // Write hooks trump all, and are set Read Only
519 if (shdw_reg->flags.write == 0) {
520 shadow_pte->writable = 0;
523 #ifdef V3_CONFIG_TM_FUNC
524 v3_tm_handle_read_fault(info, error_code, shadow_pte);
527 // Pass to unhandled call back
528 if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
529 PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
533 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
534 guest_pte->dirty = 1;
537 if (shdw_reg->flags.write == 1) {
538 PrintDebug(info->vm_info, info, "Shadow PTE Write Error\n");
539 shadow_pte->writable = guest_pte->writable;
541 if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
542 PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
552 // Inject page fault into the guest
553 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
554 PrintError(info->vm_info, info, "Could not inject guest page fault\n");
557 PrintError(info->vm_info, info, "PTE Page fault fell through... Not sure if this should ever happen\n");
558 PrintError(info->vm_info, info, "Manual Says to inject page fault into guest\n");
566 static int handle_2MB_shadow_pagefault_pde_64(struct guest_info * info,
567 addr_t fault_addr, pf_error_t error_code,
568 pt_access_status_t shadow_pde_access,
569 pde64_2MB_t * large_shadow_pde, pde64_2MB_t * large_guest_pde)
571 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
572 // struct shadow_page_state * state = &(info->shdw_pg_state);
574 PrintDebug(info->vm_info, info, "Handling 2MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
575 PrintDebug(info->vm_info, info, "LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
577 struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
579 if (shdw_reg == NULL) {
580 // Inject a machine check in the guest
581 PrintError(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
582 v3_raise_exception(info, MC_EXCEPTION);
586 if (shadow_pde_access == PT_ACCESS_OK) {
587 // Inconsistent state...
588 // Guest Re-Entry will flush tables and everything should now workd
589 PrintDebug(info->vm_info, info, "Inconsistent state... Guest re-entry should flush tlb\n");
590 //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
595 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
596 // Get the guest physical address of the fault
598 if ((shdw_reg->flags.alloced == 1) ||
599 (shdw_reg->flags.read == 1)) {
600 addr_t shadow_pa = 0;
602 if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
603 PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
607 large_guest_pde->vmm_info = V3_LARGE_PG; /* For invalidations */
608 large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_2MB(shadow_pa);
609 large_shadow_pde->large_page = 1;
610 large_shadow_pde->present = 1;
611 large_shadow_pde->user_page = 1;
613 if (shdw_reg->flags.write == 0) {
614 large_shadow_pde->writable = 0;
616 large_shadow_pde->writable = 1;
619 //set according to VMM policy
620 large_shadow_pde->write_through = large_guest_pde->write_through;
621 large_shadow_pde->cache_disable = large_guest_pde->cache_disable;
622 large_shadow_pde->global_page = large_guest_pde->global_page;
626 if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
627 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
631 } else if (shadow_pde_access == PT_ACCESS_WRITE_ERROR) {
632 if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
633 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
637 PrintError(info->vm_info, info, "Error in large page fault handler...\n");
638 PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
642 // PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
643 PrintDebug(info->vm_info, info, "Returning from large page->large page fault handler\n");
647 static int handle_2MB_shadow_pagefault_pte_64(struct guest_info * info,
648 addr_t fault_addr, pf_error_t error_code,
649 pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde)
651 pt_access_status_t shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
652 pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
653 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
654 // struct shadow_page_state * state = &(info->shdw_pg_state);
656 PrintDebug(info->vm_info, info, "Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
657 PrintDebug(info->vm_info, info, "ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
659 struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
662 if (shdw_reg == NULL) {
663 // Inject a machine check in the guest
664 PrintError(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
665 v3_raise_exception(info, MC_EXCEPTION);
669 if (shadow_pte_access == PT_ACCESS_OK) {
670 // Inconsistent state...
671 // Guest Re-Entry will flush tables and everything should now workd
672 PrintDebug(info->vm_info, info, "Inconsistent state... Guest re-entry should flush tlb\n");
673 //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
678 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
679 // Get the guest physical address of the fault
681 if ((shdw_reg->flags.alloced == 1) ||
682 (shdw_reg->flags.read == 1)) {
683 addr_t shadow_pa = 0;
685 if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
686 PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
690 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
692 shadow_pte->present = 1;
694 /* We are assuming that the PDE entry has precedence
695 * so the Shadow PDE will mirror the guest PDE settings,
696 * and we don't have to worry about them here
699 shadow_pte->user_page = 1;
701 if (shdw_reg->flags.write == 0) {
702 shadow_pte->writable = 0;
704 shadow_pte->writable = 1;
707 //set according to VMM policy
708 shadow_pte->write_through = large_guest_pde->write_through;
709 shadow_pte->cache_disable = large_guest_pde->cache_disable;
710 shadow_pte->global_page = large_guest_pde->global_page;
714 if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
715 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
719 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
720 if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
721 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
725 PrintError(info->vm_info, info, "Error in large page fault handler...\n");
726 PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
730 // PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
731 PrintDebug(info->vm_info, info, "Returning from large page->small page fault handler\n");
738 static int invalidation_cb_64(struct guest_info * info, page_type_t type,
739 addr_t vaddr, addr_t page_ptr, addr_t page_pa,
740 void * private_data) {
745 pml4e64_t * pml = (pml4e64_t *)page_ptr;
747 if (pml[PML4E64_INDEX(vaddr)].present == 0) {
754 pdpe64_t * pdp = (pdpe64_t *)page_ptr;
755 pdpe64_t * pdpe = &(pdp[PDPE64_INDEX(vaddr)]);
757 if (pdpe->present == 0) {
761 if (pdpe->vmm_info == V3_LARGE_PG) {
762 PrintError(info->vm_info, info, "1 Gigabyte pages not supported\n");
773 pde64_t * pd = (pde64_t *)page_ptr;
774 pde64_t * pde = &(pd[PDE64_INDEX(vaddr)]);
776 if (pde->present == 0) {
780 if (pde->vmm_info == V3_LARGE_PG) {
789 pte64_t * pt = (pte64_t *)page_ptr;
791 pt[PTE64_INDEX(vaddr)].present = 0;
796 PrintError(info->vm_info, info, "Invalid Page Type\n");
801 // should not get here
802 PrintError(info->vm_info, info, "Should not get here....\n");
807 static inline int handle_shadow_invlpg_64(struct guest_info * info, addr_t vaddr) {
808 PrintDebug(info->vm_info, info, "INVLPG64 - %p\n",(void*)vaddr);
810 int ret = v3_drill_host_pt_64(info, info->ctrl_regs.cr3, vaddr, invalidation_cb_64, NULL);
812 PrintError(info->vm_info, info, "Page table drill returned error.... \n");
813 PrintHostPageTree(info, vaddr, info->ctrl_regs.cr3);
816 return (ret == -1) ? -1 : 0;