return 0;
}
-
static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
pde64_t * shadow_pd, pde64_t * guest_pd) {
pt_access_status_t guest_pde_access;
return 0;
}
- // Handle as a shadow large page if possible
- if (guest_pde->large_page
- && (info->vm_info->mem_align >= PAGE_SIZE_2MB)) {
- if (handle_2MB_shadow_pagefault_pde_64(info, fault_addr, error_code, shadow_pde_access,
- (pde64_2MB_t *)shadow_pde, (pde64_2MB_t *)guest_pde) == -1) {
- PrintError("Error handling large pagefault with large page\n");
- return -1;
- } else {
- return 0;
- }
- }
-
pte64_t * shadow_pt = NULL;
pte64_t * guest_pt = NULL;
// get the next shadow page level, allocate if not present
if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
+ // Check if we can use large pages and the guest memory is properly aligned
+ // to potentially use a large page
+
+ if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) {
+ addr_t guest_pa = BASE_TO_PAGE_ADDR_2MB(((pde64_2MB_t *)guest_pde)->page_base_addr);
+ uint32_t page_size = v3_get_max_page_size(info, guest_pa, LONG);
+
+ if (page_size == PAGE_SIZE_2MB) {
+ if (handle_2MB_shadow_pagefault_pde_64(info, fault_addr, error_code, shadow_pde_access,
+ (pde64_2MB_t *)shadow_pde, (pde64_2MB_t *)guest_pde) == -1) {
+ PrintError("Error handling large pagefault with large page\n");
+ return -1;
+ }
+
+ return 0;
+ }
+ // Fallthrough to handle the region with small pages
+ }
+
struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
shadow_pt = (pte64_t *)V3_VAddr((void *)shdw_page->page_pa);
shadow_pde->present = 1;
shadow_pde->user_page = guest_pde->user_page;
-
if (guest_pde->large_page == 0) {
shadow_pde->writable = guest_pde->writable;
} else {