X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_mem.c;h=dd262c0d02c75d1b20d753e5de0654fd4eb76d52;hb=cfd5e43722796b4951faf721c64f3a74ab81ff2b;hp=ab8fc2cbb42341ef60c50d93fff6d0c258bbaefe;hpb=2c0f0d4f13ebf9071bede458a0d682513082beb3;p=palacios.git diff --git a/palacios/src/palacios/vmm_mem.c b/palacios/src/palacios/vmm_mem.c index ab8fc2c..dd262c0 100644 --- a/palacios/src/palacios/vmm_mem.c +++ b/palacios/src/palacios/vmm_mem.c @@ -41,7 +41,8 @@ static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa, struct v3_mem_region * reg, pf_error_t access_info) { - PrintError("Unhandled memory access error\n"); + PrintError("Unhandled memory access error (gpa=%p, gva=%p, error_code=%d)\n", + (void *)guest_pa, (void *)guest_va, *(uint32_t *)&access_info); v3_print_mem_map(core->vm_info); @@ -50,8 +51,6 @@ static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest return -1; } - - int v3_init_mem_map(struct v3_vm_info * vm) { struct v3_mem_map * map = &(vm->mem_map); addr_t mem_pages = vm->mem_size >> 12; @@ -60,13 +59,22 @@ int v3_init_mem_map(struct v3_vm_info * vm) { map->mem_regions.rb_node = NULL; - // There is an underlying region that contains all of the guest memory // PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size); + // 2MB page alignment needed for 2MB hardware nested paging map->base_region.guest_start = 0; map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB; + +#ifdef V3_CONFIG_ALIGNED_PG_ALLOC + map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, vm->mem_align); +#else map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages); +#endif + + // Clear the memory... + memset(V3_VAddr((void *)map->base_region.host_addr), 0, mem_pages * PAGE_SIZE_4KB); + map->base_region.flags.read = 1; map->base_region.flags.write = 1; @@ -93,6 +101,7 @@ void v3_delete_mem_map(struct v3_vm_info * vm) { struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions)); struct v3_mem_region * reg; struct rb_node * tmp_node = NULL; + addr_t mem_pages = vm->mem_size >> 12; while (node) { reg = rb_entry(node, struct v3_mem_region, tree_node); @@ -102,7 +111,7 @@ void v3_delete_mem_map(struct v3_vm_info * vm) { v3_delete_mem_region(vm, reg); } - V3_FreePage((void *)(vm->mem_map.base_region.host_addr)); + V3_FreePages((void *)(vm->mem_map.base_region.host_addr), mem_pages); } @@ -136,7 +145,6 @@ int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id, entry->host_addr = host_addr; - entry->flags.read = 1; entry->flags.write = 1; entry->flags.exec = 1; @@ -154,7 +162,7 @@ int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id, static inline struct v3_mem_region * __insert_mem_region(struct v3_vm_info * vm, - struct v3_mem_region * region) { + struct v3_mem_region * region) { struct rb_node ** p = &(vm->mem_map.mem_regions.rb_node); struct rb_node * parent = NULL; struct v3_mem_region * tmp_region; @@ -288,44 +296,123 @@ struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_i -/* Search the "hooked" memory regions for a region that ends after the given address. If the - * address is invalid, return NULL. Else, return the first region found or the base region if no - * region ends after the given address. +/* This returns the next memory region based on a given address. + * If the address falls inside a sub region, that region is returned. + * If the address falls outside a sub region, the next sub region is returned + * NOTE that we have to be careful about core_ids here... */ -struct v3_mem_region * v3_get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) { +static struct v3_mem_region * get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) { struct rb_node * n = vm->mem_map.mem_regions.rb_node; struct v3_mem_region * reg = NULL; + struct v3_mem_region * parent = NULL; + + if (n == NULL) { + return NULL; + } - // Keep going to the right in the tree while the address is greater than the current region's - // end address. while (n) { - reg = rb_entry(n, struct v3_mem_region, tree_node); - if (guest_addr >= reg->guest_end) { // reg is [start,end) - n = n->rb_right; - } else { - if ((core_id == reg->core_id) || (reg->core_id == V3_MEM_CORE_ANY)) { + + reg = rb_entry(n, struct v3_mem_region, tree_node); + + if (guest_addr < reg->guest_start) { + n = n->rb_left; + } else if (guest_addr >= reg->guest_end) { + n = n->rb_right; + } else { + if (reg->core_id == V3_MEM_CORE_ANY) { + // found relevant region, it's available on all cores return reg; - } else { + } else if (core_id == reg->core_id) { + // found relevant region, it's available on the indicated core + return reg; + } else if (core_id < reg->core_id) { + // go left, core too big + n = n->rb_left; + } else if (core_id > reg->core_id) { + // go right, core too small n = n->rb_right; + } else { + PrintError("v3_get_mem_region: Impossible!\n"); + return NULL; } - } + } + + if ((reg->core_id == core_id) || (reg->core_id == V3_MEM_CORE_ANY)) { + parent = reg; + } } - // There is no registered region, so we check if it's a valid address in the base region - if (guest_addr >= vm->mem_map.base_region.guest_end) { - PrintError("%s: Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n", - __FUNCTION__, (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end); - v3_print_mem_map(vm); - return NULL; + if (parent->guest_start > guest_addr) { + return parent; + } else if (parent->guest_end < guest_addr) { + struct rb_node * node = &(parent->tree_node); + + while ((node = v3_rb_next(node)) != NULL) { + struct v3_mem_region * next_reg = rb_entry(node, struct v3_mem_region, tree_node); + + if ((next_reg->core_id == V3_MEM_CORE_ANY) || + (next_reg->core_id == core_id)) { + + // This check is not strictly necessary, but it makes it clearer + if (next_reg->guest_start > guest_addr) { + return next_reg; + } + } + } } - return &(vm->mem_map.base_region); + return NULL; +} + + + + +/* Given an address region of memory, find if there are any regions that overlap with it. + * This checks that the range lies in a single region, and returns that region if it does, + * this can be either the base region or a sub region. + * IF there are multiple regions in the range then it returns NULL + */ +static struct v3_mem_region * get_overlapping_region(struct v3_vm_info * vm, uint16_t core_id, + addr_t start_gpa, addr_t end_gpa) { + struct v3_mem_region * start_region = v3_get_mem_region(vm, core_id, start_gpa); + + if (start_region == NULL) { + PrintError("Invalid memory region\n"); + return NULL; + } + + + if (start_region->guest_end < end_gpa) { + // Region ends before range + return NULL; + } else if (start_region->flags.base == 0) { + // sub region overlaps range + return start_region; + } else { + // Base region, now we have to scan forward for the next sub region + struct v3_mem_region * next_reg = get_next_mem_region(vm, core_id, start_gpa); + + if (next_reg == NULL) { + // no sub regions after start_addr, base region is ok + return start_region; + } else if (next_reg->guest_start >= end_gpa) { + // Next sub region begins outside range + return start_region; + } else { + return NULL; + } + } + + + // Should never get here + return NULL; } + void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) { int i = 0; @@ -333,6 +420,19 @@ void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) { return; } + + v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions)); + + + + // If the guest isn't running then there shouldn't be anything to invalidate. + // Page tables should __always__ be created on demand during execution + // NOTE: This is a sanity check, and can be removed if that assumption changes + if (vm->run_state != VM_RUNNING) { + V3_Free(reg); + return; + } + for (i = 0; i < vm->num_cores; i++) { struct guest_info * info = &(vm->cores[i]); @@ -366,8 +466,6 @@ void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) { } } - v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions)); - V3_Free(reg); // flush virtual page tables @@ -375,6 +473,72 @@ void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) { } +// Determine if a given address can be handled by a large page of the requested size +uint32_t v3_get_max_page_size(struct guest_info * core, addr_t page_addr, v3_cpu_mode_t mode) { + addr_t pg_start = 0; + addr_t pg_end = 0; + uint32_t page_size = PAGE_SIZE_4KB; + struct v3_mem_region * reg = NULL; + + switch (mode) { + case PROTECTED: + if (core->use_large_pages == 1) { + pg_start = PAGE_ADDR_4MB(page_addr); + pg_end = (pg_start + PAGE_SIZE_4MB); + + reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end); + + if ((reg) && ((reg->host_addr % PAGE_SIZE_4MB) == 0)) { + page_size = PAGE_SIZE_4MB; + } + } + break; + case PROTECTED_PAE: + if (core->use_large_pages == 1) { + pg_start = PAGE_ADDR_2MB(page_addr); + pg_end = (pg_start + PAGE_SIZE_2MB); + + reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end); + + if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) { + page_size = PAGE_SIZE_2MB; + } + } + break; + case LONG: + case LONG_32_COMPAT: + case LONG_16_COMPAT: + if (core->use_giant_pages == 1) { + pg_start = PAGE_ADDR_1GB(page_addr); + pg_end = (pg_start + PAGE_SIZE_1GB); + + reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end); + + if ((reg) && ((reg->host_addr % PAGE_SIZE_1GB) == 0)) { + page_size = PAGE_SIZE_1GB; + break; + } + } + + if (core->use_large_pages == 1) { + pg_start = PAGE_ADDR_2MB(page_addr); + pg_end = (pg_start + PAGE_SIZE_2MB); + + reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end); + + if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) { + page_size = PAGE_SIZE_2MB; + } + } + break; + default: + PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core))); + return -1; + } + + return page_size; +} + void v3_print_mem_map(struct v3_vm_info * vm) {