X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_mem.c;h=dd262c0d02c75d1b20d753e5de0654fd4eb76d52;hb=cfd5e43722796b4951faf721c64f3a74ab81ff2b;hp=cb3f1446f7d4675080d50fbda1813497b8bb00d8;hpb=9959b08bfa0a2a0ee51866199c5b1d7b2b690237;p=palacios.git diff --git a/palacios/src/palacios/vmm_mem.c b/palacios/src/palacios/vmm_mem.c index cb3f144..dd262c0 100644 --- a/palacios/src/palacios/vmm_mem.c +++ b/palacios/src/palacios/vmm_mem.c @@ -27,9 +27,6 @@ #include -static inline -struct v3_shadow_region * insert_shadow_region(struct v3_vm_info * vm, - struct v3_shadow_region * region); static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) { @@ -41,32 +38,51 @@ static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void return 0; } +static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa, + struct v3_mem_region * reg, pf_error_t access_info) { -int v3_init_mem_map(struct v3_vm_info * vm) { - struct v3_mem_map * map = &(vm->mem_map); - addr_t mem_pages = vm->mem_size >> 12; + PrintError("Unhandled memory access error (gpa=%p, gva=%p, error_code=%d)\n", + (void *)guest_pa, (void *)guest_va, *(uint32_t *)&access_info); - memset(&(map->base_region), 0, sizeof(struct v3_shadow_region)); + v3_print_mem_map(core->vm_info); - map->shdw_regions.rb_node = NULL; + v3_print_guest_state(core); + + return -1; +} +int v3_init_mem_map(struct v3_vm_info * vm) { + struct v3_mem_map * map = &(vm->mem_map); + addr_t mem_pages = vm->mem_size >> 12; - map->hook_hvas = V3_VAddr(V3_AllocPages(vm->num_cores)); + memset(&(map->base_region), 0, sizeof(struct v3_mem_region)); + map->mem_regions.rb_node = NULL; // There is an underlying region that contains all of the guest memory // PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size); + // 2MB page alignment needed for 2MB hardware nested paging map->base_region.guest_start = 0; map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB; - map->base_region.host_type = SHDW_REGION_ALLOCATED; + +#ifdef V3_CONFIG_ALIGNED_PG_ALLOC + map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, vm->mem_align); +#else map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages); +#endif + + // Clear the memory... + memset(V3_VAddr((void *)map->base_region.host_addr), 0, mem_pages * PAGE_SIZE_4KB); + map->base_region.flags.read = 1; map->base_region.flags.write = 1; map->base_region.flags.exec = 1; map->base_region.flags.base = 1; map->base_region.flags.alloced = 1; + + map->base_region.unhandled = unhandled_err; if ((void *)map->base_region.host_addr == NULL) { PrintError("Could not allocate Guest memory\n"); @@ -81,114 +97,60 @@ int v3_init_mem_map(struct v3_vm_info * vm) { } -static inline addr_t get_hook_hva(struct guest_info * info) { - return (addr_t)(info->vm_info->mem_map.hook_hvas + (PAGE_SIZE_4KB * info->cpu_id)); -} - -void v3_delete_shadow_map(struct v3_vm_info * vm) { - struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions)); - struct v3_shadow_region * reg; +void v3_delete_mem_map(struct v3_vm_info * vm) { + struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions)); + struct v3_mem_region * reg; struct rb_node * tmp_node = NULL; + addr_t mem_pages = vm->mem_size >> 12; while (node) { - reg = rb_entry(node, struct v3_shadow_region, tree_node); + reg = rb_entry(node, struct v3_mem_region, tree_node); tmp_node = node; node = v3_rb_next(node); - v3_delete_shadow_region(vm, reg); + v3_delete_mem_region(vm, reg); } - V3_FreePage((void *)(vm->mem_map.base_region.host_addr)); - V3_FreePage(V3_PAddr((void *)(vm->mem_map.hook_hvas))); + V3_FreePages((void *)(vm->mem_map.base_region.host_addr), mem_pages); } - - -int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id, - addr_t guest_addr_start, - addr_t guest_addr_end, - addr_t host_addr) -{ - struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region)); - memset(entry, 0, sizeof(struct v3_shadow_region)); +struct v3_mem_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id, + addr_t guest_addr_start, addr_t guest_addr_end) { + + struct v3_mem_region * entry = (struct v3_mem_region *)V3_Malloc(sizeof(struct v3_mem_region)); + memset(entry, 0, sizeof(struct v3_mem_region)); entry->guest_start = guest_addr_start; entry->guest_end = guest_addr_end; - entry->host_type = SHDW_REGION_ALLOCATED; - entry->host_addr = host_addr; - entry->write_hook = NULL; - entry->read_hook = NULL; - entry->priv_data = NULL; entry->core_id = core_id; + entry->unhandled = unhandled_err; - entry->flags.read = 1; - entry->flags.write = 1; - entry->flags.exec = 1; - entry->flags.alloced = 1; - - if (insert_shadow_region(vm, entry)) { - V3_Free(entry); - return -1; - } - - return 0; + return entry; } -int v3_hook_write_mem(struct v3_vm_info * vm, uint16_t core_id, - addr_t guest_addr_start, addr_t guest_addr_end, addr_t host_addr, - int (*write)(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data), - void * priv_data) { - struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region)); - memset(entry, 0, sizeof(struct v3_shadow_region)); +int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id, + addr_t guest_addr_start, + addr_t guest_addr_end, + addr_t host_addr) +{ + struct v3_mem_region * entry = NULL; + + entry = v3_create_mem_region(vm, core_id, + guest_addr_start, + guest_addr_end); - entry->guest_start = guest_addr_start; - entry->guest_end = guest_addr_end; - entry->host_type = SHDW_REGION_WRITE_HOOK; entry->host_addr = host_addr; - entry->write_hook = write; - entry->read_hook = NULL; - entry->priv_data = priv_data; - entry->core_id = core_id; - entry->flags.hook = 1; entry->flags.read = 1; + entry->flags.write = 1; entry->flags.exec = 1; entry->flags.alloced = 1; - - if (insert_shadow_region(vm, entry)) { - V3_Free(entry); - return -1; - } - - return 0; -} - -int v3_hook_full_mem(struct v3_vm_info * vm, uint16_t core_id, - addr_t guest_addr_start, addr_t guest_addr_end, - int (*read)(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data), - int (*write)(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data), - void * priv_data) { - - struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region)); - memset(entry, 0, sizeof(struct v3_shadow_region)); - - entry->guest_start = guest_addr_start; - entry->guest_end = guest_addr_end; - entry->host_type = SHDW_REGION_FULL_HOOK; - entry->host_addr = (addr_t)NULL; - entry->write_hook = write; - entry->read_hook = read; - entry->priv_data = priv_data; - entry->core_id = core_id; - - entry->flags.hook = 1; - - if (insert_shadow_region(vm, entry)) { + if (v3_insert_mem_region(vm, entry) == -1) { V3_Free(entry); return -1; } @@ -197,33 +159,17 @@ int v3_hook_full_mem(struct v3_vm_info * vm, uint16_t core_id, } -// This will unhook the memory hook registered at start address -// We do not support unhooking subregions -int v3_unhook_mem(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr_start) { - struct v3_shadow_region * reg = v3_get_shadow_region(vm, core_id, guest_addr_start); - - if (!reg->flags.hook) { - PrintError("Trying to unhook a non hooked memory region (addr=%p)\n", (void *)guest_addr_start); - return -1; - } - - v3_delete_shadow_region(vm, reg); - - return 0; -} - - static inline -struct v3_shadow_region * __insert_shadow_region(struct v3_vm_info * vm, - struct v3_shadow_region * region) { - struct rb_node ** p = &(vm->mem_map.shdw_regions.rb_node); +struct v3_mem_region * __insert_mem_region(struct v3_vm_info * vm, + struct v3_mem_region * region) { + struct rb_node ** p = &(vm->mem_map.mem_regions.rb_node); struct rb_node * parent = NULL; - struct v3_shadow_region * tmp_region; + struct v3_mem_region * tmp_region; while (*p) { parent = *p; - tmp_region = rb_entry(parent, struct v3_shadow_region, tree_node); + tmp_region = rb_entry(parent, struct v3_mem_region, tree_node); if (region->guest_end <= tmp_region->guest_start) { p = &(*p)->rb_left; @@ -238,7 +184,7 @@ struct v3_shadow_region * __insert_shadow_region(struct v3_vm_info * vm, return tmp_region; } else if (region->core_id < tmp_region->core_id) { p = &(*p)->rb_left; - } else { + } else { p = &(*p)->rb_right; } } @@ -250,17 +196,16 @@ struct v3_shadow_region * __insert_shadow_region(struct v3_vm_info * vm, } -static inline -struct v3_shadow_region * insert_shadow_region(struct v3_vm_info * vm, - struct v3_shadow_region * region) { - struct v3_shadow_region * ret; + +int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) { + struct v3_mem_region * ret; int i = 0; - if ((ret = __insert_shadow_region(vm, region))) { - return ret; + if ((ret = __insert_mem_region(vm, region))) { + return -1; } - v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.shdw_regions)); + v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.mem_regions)); @@ -297,103 +242,197 @@ struct v3_shadow_region * insert_shadow_region(struct v3_vm_info * vm, } } - return NULL; + return 0; } +struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) { + struct rb_node * n = vm->mem_map.mem_regions.rb_node; + struct v3_mem_region * reg = NULL; + while (n) { -int v3_handle_mem_hook(struct guest_info * info, addr_t guest_va, addr_t guest_pa, - struct v3_shadow_region * reg, pf_error_t access_info) { - - addr_t op_addr = 0; + reg = rb_entry(n, struct v3_mem_region, tree_node); - if (reg->flags.alloced == 0) { - op_addr = get_hook_hva(info); - } else { - op_addr = (addr_t)V3_VAddr((void *)v3_get_shadow_addr(reg, info->cpu_id, guest_pa)); + if (guest_addr < reg->guest_start) { + n = n->rb_left; + } else if (guest_addr >= reg->guest_end) { + n = n->rb_right; + } else { + if (reg->core_id == V3_MEM_CORE_ANY) { + // found relevant region, it's available on all cores + return reg; + } else if (core_id == reg->core_id) { + // found relevant region, it's available on the indicated core + return reg; + } else if (core_id < reg->core_id) { + // go left, core too big + n = n->rb_left; + } else if (core_id > reg->core_id) { + // go right, core too small + n = n->rb_right; + } else { + PrintDebug("v3_get_mem_region: Impossible!\n"); + return NULL; + } + } } - - if (access_info.write == 1) { - // Write Operation - if (v3_emulate_write_op(info, guest_va, guest_pa, op_addr, - reg->write_hook, reg->priv_data) == -1) { - PrintError("Write Full Hook emulation failed\n"); - return -1; - } - } else { - // Read Operation - - if (reg->flags.read == 1) { - PrintError("Tried to emulate read for a guest Readable page\n"); - return -1; - } + // There is not registered region, so we check if its a valid address in the base region - if (v3_emulate_read_op(info, guest_va, guest_pa, op_addr, - reg->read_hook, reg->write_hook, - reg->priv_data) == -1) { - PrintError("Read Full Hook emulation failed\n"); - return -1; - } + if (guest_addr > vm->mem_map.base_region.guest_end) { + PrintError("Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p) (core=0x%x)\n", + (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end, core_id); + v3_print_mem_map(vm); + return NULL; } - - return 0; + return &(vm->mem_map.base_region); } +/* This returns the next memory region based on a given address. + * If the address falls inside a sub region, that region is returned. + * If the address falls outside a sub region, the next sub region is returned + * NOTE that we have to be careful about core_ids here... + */ +static struct v3_mem_region * get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) { + struct rb_node * n = vm->mem_map.mem_regions.rb_node; + struct v3_mem_region * reg = NULL; + struct v3_mem_region * parent = NULL; -struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) { - struct rb_node * n = vm->mem_map.shdw_regions.rb_node; - struct v3_shadow_region * reg = NULL; + if (n == NULL) { + return NULL; + } while (n) { - reg = rb_entry(n, struct v3_shadow_region, tree_node); + + reg = rb_entry(n, struct v3_mem_region, tree_node); if (guest_addr < reg->guest_start) { n = n->rb_left; } else if (guest_addr >= reg->guest_end) { n = n->rb_right; } else { - if ((core_id == reg->core_id) || - (reg->core_id == V3_MEM_CORE_ANY)) { - return reg; - } else { + if (reg->core_id == V3_MEM_CORE_ANY) { + // found relevant region, it's available on all cores + return reg; + } else if (core_id == reg->core_id) { + // found relevant region, it's available on the indicated core + return reg; + } else if (core_id < reg->core_id) { + // go left, core too big + n = n->rb_left; + } else if (core_id > reg->core_id) { + // go right, core too small n = n->rb_right; + } else { + PrintError("v3_get_mem_region: Impossible!\n"); + return NULL; } } + + if ((reg->core_id == core_id) || (reg->core_id == V3_MEM_CORE_ANY)) { + parent = reg; + } } - // There is not registered region, so we check if its a valid address in the base region + if (parent->guest_start > guest_addr) { + return parent; + } else if (parent->guest_end < guest_addr) { + struct rb_node * node = &(parent->tree_node); + + while ((node = v3_rb_next(node)) != NULL) { + struct v3_mem_region * next_reg = rb_entry(node, struct v3_mem_region, tree_node); + + if ((next_reg->core_id == V3_MEM_CORE_ANY) || + (next_reg->core_id == core_id)) { + + // This check is not strictly necessary, but it makes it clearer + if (next_reg->guest_start > guest_addr) { + return next_reg; + } + } + } + } + + return NULL; +} + + - if (guest_addr > vm->mem_map.base_region.guest_end) { - PrintError("Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n", - (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end); - v3_print_mem_map(vm); +/* Given an address region of memory, find if there are any regions that overlap with it. + * This checks that the range lies in a single region, and returns that region if it does, + * this can be either the base region or a sub region. + * IF there are multiple regions in the range then it returns NULL + */ +static struct v3_mem_region * get_overlapping_region(struct v3_vm_info * vm, uint16_t core_id, + addr_t start_gpa, addr_t end_gpa) { + struct v3_mem_region * start_region = v3_get_mem_region(vm, core_id, start_gpa); + + if (start_region == NULL) { + PrintError("Invalid memory region\n"); return NULL; } - - return &(vm->mem_map.base_region); + + + if (start_region->guest_end < end_gpa) { + // Region ends before range + return NULL; + } else if (start_region->flags.base == 0) { + // sub region overlaps range + return start_region; + } else { + // Base region, now we have to scan forward for the next sub region + struct v3_mem_region * next_reg = get_next_mem_region(vm, core_id, start_gpa); + + if (next_reg == NULL) { + // no sub regions after start_addr, base region is ok + return start_region; + } else if (next_reg->guest_start >= end_gpa) { + // Next sub region begins outside range + return start_region; + } else { + return NULL; + } + } + + + // Should never get here + return NULL; } -void v3_delete_shadow_region(struct v3_vm_info * vm, struct v3_shadow_region * reg) { + +void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) { int i = 0; if (reg == NULL) { return; } + + v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions)); + + + + // If the guest isn't running then there shouldn't be anything to invalidate. + // Page tables should __always__ be created on demand during execution + // NOTE: This is a sanity check, and can be removed if that assumption changes + if (vm->run_state != VM_RUNNING) { + V3_Free(reg); + return; + } + for (i = 0; i < vm->num_cores; i++) { struct guest_info * info = &(vm->cores[i]); @@ -427,8 +466,6 @@ void v3_delete_shadow_region(struct v3_vm_info * vm, struct v3_shadow_region * r } } - v3_rb_erase(&(reg->tree_node), &(vm->mem_map.shdw_regions)); - V3_Free(reg); // flush virtual page tables @@ -436,30 +473,83 @@ void v3_delete_shadow_region(struct v3_vm_info * vm, struct v3_shadow_region * r } +// Determine if a given address can be handled by a large page of the requested size +uint32_t v3_get_max_page_size(struct guest_info * core, addr_t page_addr, v3_cpu_mode_t mode) { + addr_t pg_start = 0; + addr_t pg_end = 0; + uint32_t page_size = PAGE_SIZE_4KB; + struct v3_mem_region * reg = NULL; + + switch (mode) { + case PROTECTED: + if (core->use_large_pages == 1) { + pg_start = PAGE_ADDR_4MB(page_addr); + pg_end = (pg_start + PAGE_SIZE_4MB); + + reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end); + if ((reg) && ((reg->host_addr % PAGE_SIZE_4MB) == 0)) { + page_size = PAGE_SIZE_4MB; + } + } + break; + case PROTECTED_PAE: + if (core->use_large_pages == 1) { + pg_start = PAGE_ADDR_2MB(page_addr); + pg_end = (pg_start + PAGE_SIZE_2MB); + reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end); -addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, uint16_t core_id, addr_t guest_addr) { - if (reg && (reg->flags.alloced == 1)) { - return (guest_addr - reg->guest_start) + reg->host_addr; - } else { - // PrintError("MEM Region Invalid\n"); - return 0; + if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) { + page_size = PAGE_SIZE_2MB; + } + } + break; + case LONG: + case LONG_32_COMPAT: + case LONG_16_COMPAT: + if (core->use_giant_pages == 1) { + pg_start = PAGE_ADDR_1GB(page_addr); + pg_end = (pg_start + PAGE_SIZE_1GB); + + reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end); + + if ((reg) && ((reg->host_addr % PAGE_SIZE_1GB) == 0)) { + page_size = PAGE_SIZE_1GB; + break; + } + } + + if (core->use_large_pages == 1) { + pg_start = PAGE_ADDR_2MB(page_addr); + pg_end = (pg_start + PAGE_SIZE_2MB); + + reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end); + + if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) { + page_size = PAGE_SIZE_2MB; + } + } + break; + default: + PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core))); + return -1; } + return page_size; } void v3_print_mem_map(struct v3_vm_info * vm) { - struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions)); - struct v3_shadow_region * reg = &(vm->mem_map.base_region); + struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions)); + struct v3_mem_region * reg = &(vm->mem_map.base_region); int i = 0; - V3_Print("Memory Layout:\n"); + V3_Print("Memory Layout (all cores):\n"); - V3_Print("Base Region: 0x%p - 0x%p -> 0x%p\n", + V3_Print("Base Region (all cores): 0x%p - 0x%p -> 0x%p\n", (void *)(reg->guest_start), (void *)(reg->guest_end - 1), (void *)(reg->host_addr)); @@ -471,17 +561,17 @@ void v3_print_mem_map(struct v3_vm_info * vm) { } do { - reg = rb_entry(node, struct v3_shadow_region, tree_node); + reg = rb_entry(node, struct v3_mem_region, tree_node); V3_Print("%d: 0x%p - 0x%p -> 0x%p\n", i, (void *)(reg->guest_start), (void *)(reg->guest_end - 1), (void *)(reg->host_addr)); - V3_Print("\t(flags=%x) (WriteHook = 0x%p) (ReadHook = 0x%p)\n", - reg->flags.value, - (void *)(reg->write_hook), - (void *)(reg->read_hook)); + V3_Print("\t(flags=0x%x) (core=0x%x) (unhandled = 0x%p)\n", + reg->flags.value, + reg->core_id, + reg->unhandled); i++; } while ((node = v3_rb_next(node)));