X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_mem.c;h=7bf00199634c4578e93cceb42f59b4294dfa850d;hb=a4fd5bcc79e7cdf9a3bd879294566bff0666ced7;hp=d1d2b4371109cfbc4d72de1c2a5b7b6e08d3b4f9;hpb=16db015d4bcec7b3f9e25eb8dbb6796c4d85550a;p=palacios.git diff --git a/palacios/src/palacios/vmm_mem.c b/palacios/src/palacios/vmm_mem.c index d1d2b43..7bf0019 100644 --- a/palacios/src/palacios/vmm_mem.c +++ b/palacios/src/palacios/vmm_mem.c @@ -27,13 +27,16 @@ #include #include +#include + +uint64_t v3_mem_block_size = V3_CONFIG_MEM_BLOCK_SIZE; struct v3_mem_region * v3_get_base_region(struct v3_vm_info * vm, addr_t gpa) { struct v3_mem_map * map = &(vm->mem_map); - uint32_t block_index = gpa / V3_CONFIG_MEM_BLOCK_SIZE; + uint32_t block_index = gpa / v3_mem_block_size; - if (gpa > (map->num_base_regions * V3_CONFIG_MEM_BLOCK_SIZE) || + if ((gpa >= (map->num_base_regions * v3_mem_block_size)) || (block_index >= map->num_base_regions)) { PrintError(vm, VCORE_NONE, "Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p)\n", (void *)gpa, (void *)vm->mem_size); @@ -102,15 +105,45 @@ static int gpa_to_node_from_cfg(struct v3_vm_info * vm, addr_t gpa) { return -1; } +// +// This code parallels that in vmm_shadow_paging.c:v3_init_shdw_impl() +// and vmm_config.c:determine_paging_mode. The determination of which +// paging mode will be used is determined much later than the allocation of +// the guest memory regions, so we need to do this here to decide if they +// need to be below 4 GB or not. +static int will_use_shadow_paging(struct v3_vm_info *vm) +{ + v3_cfg_tree_t * pg_cfg = v3_cfg_subtree(vm->cfg_data->cfg, "paging"); + char * pg_mode = v3_cfg_val(pg_cfg, "mode"); + + if (pg_mode == NULL) { + return 1; // did not ask, get shadow + } else { + if (strcasecmp(pg_mode, "nested") == 0) { + extern v3_cpu_arch_t v3_mach_type; + if ((v3_mach_type == V3_SVM_REV3_CPU) || + (v3_mach_type == V3_VMX_EPT_CPU) || + (v3_mach_type == V3_VMX_EPT_UG_CPU)) { + return 0; // ask for nested, get nested + } else { + return 1; // ask for nested, get shadow + } + } else if (strcasecmp(pg_mode, "shadow") != 0) { + return 1; // ask for shadow, get shadow + } else { + return 1; // ask for something else, get shadow + } + } +} int v3_init_mem_map(struct v3_vm_info * vm) { struct v3_mem_map * map = &(vm->mem_map); - addr_t block_pages = V3_CONFIG_MEM_BLOCK_SIZE >> 12; + addr_t block_pages = v3_mem_block_size >> 12; int i = 0; - map->num_base_regions = (vm->mem_size / V3_CONFIG_MEM_BLOCK_SIZE) + \ - ((vm->mem_size % V3_CONFIG_MEM_BLOCK_SIZE) > 0); + map->num_base_regions = (vm->mem_size / v3_mem_block_size) + \ + ((vm->mem_size % v3_mem_block_size) > 0); map->mem_regions.rb_node = NULL; @@ -128,10 +161,10 @@ int v3_init_mem_map(struct v3_vm_info * vm) { for (i = 0; i < map->num_base_regions; i++) { struct v3_mem_region * region = &(map->base_regions[i]); int node_id = -1; - + // 2MB page alignment needed for 2MB hardware nested paging - region->guest_start = V3_CONFIG_MEM_BLOCK_SIZE * i; - region->guest_end = region->guest_start + V3_CONFIG_MEM_BLOCK_SIZE; + region->guest_start = v3_mem_block_size * i; + region->guest_end = region->guest_start + v3_mem_block_size; // We assume that the xml config was smart enough to align the layout to the block size // If they didn't we're going to ignore their settings @@ -139,27 +172,31 @@ int v3_init_mem_map(struct v3_vm_info * vm) { node_id = gpa_to_node_from_cfg(vm, region->guest_start); V3_Print(vm, VCORE_NONE, "Allocating block %d on node %d\n", i, node_id); - - if (node_id != -1) { - region->host_addr = (addr_t)V3_AllocPagesNode(block_pages, node_id); - } else { - region->host_addr = (addr_t)V3_AllocPages(block_pages); - } + region->host_addr = (addr_t)V3_AllocPagesExtended(block_pages, + PAGE_SIZE_4KB, + node_id, + 0); // no constraints + if ((void *)region->host_addr == NULL) { PrintError(vm, VCORE_NONE, "Could not allocate guest memory\n"); return -1; } // Clear the memory... - memset(V3_VAddr((void *)region->host_addr), 0, V3_CONFIG_MEM_BLOCK_SIZE); + memset(V3_VAddr((void *)region->host_addr), 0, v3_mem_block_size); + + // Note assigned numa ID could be different than our request... + region->numa_id = v3_numa_hpa_to_node(region->host_addr); region->flags.read = 1; region->flags.write = 1; region->flags.exec = 1; region->flags.base = 1; region->flags.alloced = 1; + region->flags.limit32 = will_use_shadow_paging(vm); + region->unhandled = unhandled_err; } @@ -174,7 +211,7 @@ void v3_delete_mem_map(struct v3_vm_info * vm) { struct rb_node * node = v3_rb_first(&(map->mem_regions)); struct v3_mem_region * reg; struct rb_node * tmp_node = NULL; - addr_t block_pages = V3_CONFIG_MEM_BLOCK_SIZE >> 12; + addr_t block_pages = v3_mem_block_size >> 12; int i = 0; while (node) { @@ -315,13 +352,13 @@ int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info); if (mem_mode == PHYSICAL_MEM) { - rc |= v3_invalidate_passthrough_addr_range(info, region->guest_start, region->guest_end-1); + rc |= v3_invalidate_passthrough_addr_range(info, region->guest_start, region->guest_end-1,NULL,NULL); } else { rc |= v3_invalidate_shadow_pts(info); } } else if (info->shdw_pg_mode == NESTED_PAGING) { - rc |= v3_invalidate_nested_addr_range(info, region->guest_start, region->guest_end-1); + rc |= v3_invalidate_nested_addr_range(info, region->guest_start, region->guest_end-1,NULL,NULL); } } @@ -453,7 +490,8 @@ static struct v3_mem_region * get_overlapping_region(struct v3_vm_info * vm, uin struct v3_mem_region * start_region = v3_get_mem_region(vm, core_id, start_gpa); if (start_region == NULL) { - PrintError(vm, VCORE_NONE, "Invalid memory region\n"); + PrintError(vm, VCORE_NONE, "No overlapping region for core=%d, start_gpa=%p\n", core_id, (void*)start_gpa); + v3_print_mem_map(vm); return NULL; } @@ -521,13 +559,13 @@ void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) { v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info); if (mem_mode == PHYSICAL_MEM) { - rc |= v3_invalidate_passthrough_addr_range(info,reg->guest_start, reg->guest_end-1); + rc |= v3_invalidate_passthrough_addr_range(info,reg->guest_start, reg->guest_end-1,NULL,NULL); } else { rc |= v3_invalidate_shadow_pts(info); } } else if (info->shdw_pg_mode == NESTED_PAGING) { - rc |= v3_invalidate_nested_addr_range(info,reg->guest_start, reg->guest_end-1); + rc |= v3_invalidate_nested_addr_range(info,reg->guest_start, reg->guest_end-1,NULL,NULL); } } @@ -650,3 +688,20 @@ void v3_print_mem_map(struct v3_vm_info * vm) { } while ((node = v3_rb_next(node))); } + +void v3_init_mem() +{ + char *arg = v3_lookup_option("mem_block_size"); + + if (arg) { + v3_mem_block_size = atoi(arg); + V3_Print(VM_NONE,VCORE_NONE,"memory block size set to %llu bytes\n",v3_mem_block_size); + } else { + V3_Print(VM_NONE,VCORE_NONE,"default memory block size of %llu bytes is in use\n",v3_mem_block_size); + } +} + +void v3_deinit_mem() +{ + // currently nothing +}