* *
*/
-static int handle_2MB_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
- pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde);
+static int handle_2MB_shadow_pagefault_pde_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
+ pt_access_status_t shadow_pde_access, pde64_2MB_t * shadow_pt,
+ pde64_2MB_t * large_guest_pde);
+static int handle_2MB_shadow_pagefault_pte_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
+ pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde);
static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
pte64_t * shadow_pt, pte64_t * guest_pt);
return 0;
}
+ // Handle as a shadow large page if possible
+ if (guest_pde->large_page
+ && (info->vm_info->mem_align >= PAGE_SIZE_2MB)) {
+ if (handle_2MB_shadow_pagefault_pde_64(info, fault_addr, error_code, shadow_pde_access,
+ (pde64_2MB_t *)shadow_pde, (pde64_2MB_t *)guest_pde) == -1) {
+ PrintError("Error handling large pagefault with large page\n");
+ return -1;
+ } else {
+ return 0;
+ }
+ }
pte64_t * shadow_pt = NULL;
pte64_t * guest_pt = NULL;
- // Get the next shadow page level, allocate if not present
-
+ // get the next shadow page level, allocate if not present
if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
shadow_pt = (pte64_t *)V3_VAddr((void *)shdw_page->page_pa);
return -1;
}
} else {
- if (handle_2MB_shadow_pagefault_64(info, fault_addr, error_code, shadow_pt, (pde64_2MB_t *)guest_pde) == -1) {
- PrintError("Error handling large pagefault\n");
+ if (handle_2MB_shadow_pagefault_pte_64(info, fault_addr, error_code, shadow_pt, (pde64_2MB_t *)guest_pde) == -1) {
+ PrintError("Error handling large pagefault with small page\n");
return -1;
}
}
}
+static int handle_2MB_shadow_pagefault_pde_64(struct guest_info * info,
+ addr_t fault_addr, pf_error_t error_code,
+ pt_access_status_t shadow_pde_access,
+ pde64_2MB_t * large_shadow_pde, pde64_2MB_t * large_guest_pde)
+{
+ addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
+ // struct shadow_page_state * state = &(info->shdw_pg_state);
+
+ PrintDebug("Handling 2MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
+ PrintDebug("LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
+
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
+
+ if (shdw_reg == NULL) {
+ // Inject a machine check in the guest
+ PrintError("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
+ v3_raise_exception(info, MC_EXCEPTION);
+ return 0;
+ }
+
+ if (shadow_pde_access == PT_ACCESS_OK) {
+ // Inconsistent state...
+ // Guest Re-Entry will flush tables and everything should now workd
+ PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
+ //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
+ return 0;
+ }
+
+
+ if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
+ // Get the guest physical address of the fault
+
+ if ((shdw_reg->flags.alloced == 1) ||
+ (shdw_reg->flags.read == 1)) {
+ addr_t shadow_pa = 0;
+
+ if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
+ PrintError("could not translate page fault address (%p)\n", (void *)guest_fault_pa);
+ return -1;
+ }
+
+ large_guest_pde->vmm_info = V3_LARGE_PG; /* For invalidations */
+ large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_2MB(shadow_pa);
+ large_shadow_pde->large_page = 1;
+ large_shadow_pde->present = 1;
+ large_shadow_pde->user_page = 1;
+
+ if (shdw_reg->flags.write == 0) {
+ large_shadow_pde->writable = 0;
+ } else {
+ large_shadow_pde->writable = 1;
+ }
+
+ //set according to VMM policy
+ large_shadow_pde->write_through = large_guest_pde->write_through;
+ large_shadow_pde->cache_disable = large_guest_pde->cache_disable;
+ large_shadow_pde->global_page = large_guest_pde->global_page;
+ //
+
+ } else {
+ if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
+ PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+ return -1;
+ }
+ }
+ } else if (shadow_pde_access == PT_ACCESS_WRITE_ERROR) {
+ if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
+ PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+ return -1;
+ }
+ } else {
+ PrintError("Error in large page fault handler...\n");
+ PrintError("This case should have been handled at the top level handler\n");
+ return -1;
+ }
+
+ // PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
+ PrintDebug("Returning from large page->large page fault handler\n");
+ return 0;
+}
-static int handle_2MB_shadow_pagefault_64(struct guest_info * info,
- addr_t fault_addr, pf_error_t error_code,
- pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde)
+static int handle_2MB_shadow_pagefault_pte_64(struct guest_info * info,
+ addr_t fault_addr, pf_error_t error_code,
+ pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde)
{
pt_access_status_t shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
}
// PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
- PrintDebug("Returning from large page fault handler\n");
+ PrintDebug("Returning from large page->small page fault handler\n");
return 0;
}
return cfg;
}
+
+static inline uint32_t get_alignment(char * align_str) {
+ // default is 4KB alignment
+ uint32_t alignment = PAGE_SIZE_4KB;
+
+ if (align_str != NULL) {
+ if (strcasecmp(align_str, "2MB") == 0) {
+ alignment = PAGE_SIZE_2MB;
+ } else if (strcasecmp(align_str, "4MB") == 0) {
+ alignment = PAGE_SIZE_4MB;
+ }
+ }
+
+#ifndef CONFIG_ALIGNED_PG_ALLOC
+ if (alignment != PAGE_SIZE_4KB) {
+ PrintError("Aligned page allocations are not supported in this host (requested alignment=%d)\n", alignment);
+ PrintError("Ignoring alignment request\n");
+ }
+#endif
+
+ return alignment;
+}
static int pre_config_vm(struct v3_vm_info * vm, v3_cfg_tree_t * vm_cfg) {
char * memory_str = v3_cfg_val(vm_cfg, "memory");
char * schedule_hz_str = v3_cfg_val(vm_cfg, "schedule_hz");
char * vm_class = v3_cfg_val(vm_cfg, "class");
+ char * align_str = v3_cfg_val(v3_cfg_subtree(vm_cfg, "memory"), "alignment");
uint32_t sched_hz = 100; // set the schedule frequency to 100 HZ
if (!memory_str) {
}
PrintDebug("Memory=%s\n", memory_str);
+ if (align_str) {
+ PrintDebug("Alignment=%s\n", align_str);
+ } else {
+ PrintDebug("Alignment defaulted to 4KB.\n");
+ }
// Amount of ram the Guest will have, always in MB
vm->mem_size = atoi(memory_str) * 1024 * 1024;
-
+ vm->mem_align = get_alignment(align_str);
+
+ PrintDebug("Alignment computed as 0x%x\n", vm->mem_align);
+
if (strcasecmp(vm_class, "PC") == 0) {
vm->vm_class = V3_PC_VM;
} else {
return -1;
}
-
-static inline uint32_t get_alignment(char * align_str) {
- if (align_str != NULL) {
- if (strncasecmp(align_str, "2MB", strlen("2MB")) == 0) {
- return PAGE_SIZE_2MB;
- } else if (strncasecmp(align_str, "4MB", strlen("4MB")) == 0) {
- return PAGE_SIZE_4MB;
- }
- }
-
- // default is 4KB alignment
- return PAGE_SIZE_4KB;
-}
-
int v3_init_mem_map(struct v3_vm_info * vm) {
struct v3_mem_map * map = &(vm->mem_map);
- v3_cfg_tree_t * pg_cfg = v3_cfg_subtree(vm->cfg_data->cfg, "memory");
- uint32_t alignment = get_alignment(v3_cfg_val(pg_cfg, "alignment"));
addr_t mem_pages = vm->mem_size >> 12;
memset(&(map->base_region), 0, sizeof(struct v3_mem_region));
map->mem_regions.rb_node = NULL;
-
// There is an underlying region that contains all of the guest memory
// PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
map->base_region.guest_start = 0;
map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
-#ifdef ALIGNED_PG_ALLOC
- map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, alignment);
+#ifdef CONFIG_ALIGNED_PG_ALLOC
+ map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, vm->mem_align);
#else
- if (alignment != PAGE_SIZE_4KB) {
- PrintError("Aligned page allocations are not supported in this host (requested alignment=%d)\n", alignment);
- PrintError("Ignoring alignment request\n");
- }
map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
#endif