From: Jack Lange Date: Wed, 31 Jul 2013 21:56:38 +0000 (-0500) Subject: Added non-contiguous memory region support. X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=commitdiff_plain;h=16db015d4bcec7b3f9e25eb8dbb6796c4d85550a;p=palacios.git Added non-contiguous memory region support. --- diff --git a/Kconfig b/Kconfig index 71b85eb..eebccc4 100644 --- a/Kconfig +++ b/Kconfig @@ -116,7 +116,13 @@ config QUIX86_DEBUG endchoice menu "Supported host OS features" - + +config MEM_BLOCK_SIZE + int "Allocation size for underlying VM memory" + default 134217728 + help + This is the size in bytes of the underlying memory allocations used for the base memory regions. + A good default value is 128MB (134217728 or 0x8000000 bytes) config ALIGNED_PG_ALLOC bool "Host support for aligned page allocations" diff --git a/linux_module/mm.c b/linux_module/mm.c index 83c69ae..3edbd23 100644 --- a/linux_module/mm.c +++ b/linux_module/mm.c @@ -109,7 +109,7 @@ static uintptr_t alloc_contig_pgs(u64 num_pages, u32 alignment) { // alignment is in bytes -uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment) { +uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment, int node) { uintptr_t addr = 0; if (num_pages < OFFLINE_POOL_THRESHOLD) { diff --git a/linux_module/mm.h b/linux_module/mm.h index dfe5d55..21e9855 100644 --- a/linux_module/mm.h +++ b/linux_module/mm.h @@ -7,7 +7,7 @@ -uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment); +uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment, int node_id); void free_palacios_pg(uintptr_t base_addr); void free_palacios_pgs(uintptr_t base_addr, u64 num_pages); diff --git a/linux_module/palacios-stubs.c b/linux_module/palacios-stubs.c index a7f1607..7ad4ad8 100644 --- a/linux_module/palacios-stubs.c +++ b/linux_module/palacios-stubs.c @@ -165,7 +165,7 @@ void palacios_print_scoped(void * vm, int vcore, const char *fmt, ...) { * Allocates a contiguous region of pages of the requested size. * Returns the physical address of the first page in the region. */ -void *palacios_allocate_pages(int num_pages, unsigned int alignment) { +void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id) { void * pg_addr = NULL; if (num_pages<=0) { @@ -173,7 +173,7 @@ void *palacios_allocate_pages(int num_pages, unsigned int alignment) { return NULL; } - pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment); + pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment, node_id); if (!pg_addr) { ERROR("ALERT ALERT Page allocation has FAILED Warning\n"); diff --git a/linux_module/palacios.h b/linux_module/palacios.h index 3562165..163b468 100644 --- a/linux_module/palacios.h +++ b/linux_module/palacios.h @@ -131,7 +131,7 @@ struct proc_dir_entry *palacios_get_procdir(void); // The idea is that everything uses the same stubs void palacios_print_scoped(void *vm, int vcore, const char *fmt, ...); #define palacios_print(...) palacios_print_scoped(0,-1, __VA_ARGS__) -void *palacios_allocate_pages(int num_pages, unsigned int alignment); +void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id); void palacios_free_pages(void *page_addr, int num_pages); void *palacios_alloc(unsigned int size); void *palacios_alloc_extended(unsigned int size, unsigned int flags); diff --git a/palacios/include/palacios/vmm.h b/palacios/include/palacios/vmm.h index 5379928..a8f321c 100644 --- a/palacios/include/palacios/vmm.h +++ b/palacios/include/palacios/vmm.h @@ -60,7 +60,7 @@ int v3_get_vcore(struct guest_info *); extern struct v3_os_hooks * os_hooks; \ void * ptr = 0; \ if ((os_hooks) && (os_hooks)->allocate_pages) { \ - ptr = (os_hooks)->allocate_pages(num_pages,PAGE_SIZE_4KB); \ + ptr = (os_hooks)->allocate_pages(num_pages,PAGE_SIZE_4KB,-1); \ } \ ptr; \ }) @@ -71,12 +71,24 @@ int v3_get_vcore(struct guest_info *); extern struct v3_os_hooks * os_hooks; \ void * ptr = 0; \ if ((os_hooks) && (os_hooks)->allocate_pages) { \ - ptr = (os_hooks)->allocate_pages(num_pages,align); \ + ptr = (os_hooks)->allocate_pages(num_pages,align,-1); \ } \ ptr; \ }) +#define V3_AllocPagesNode(num_pages, node_id) \ + ({ \ + extern struct v3_os_hooks * os_hooks; \ + void * ptr = 0; \ + if ((os_hooks) && (os_hooks)->allocate_pages) { \ + ptr = (os_hooks)->allocate_pages(num_pages, PAGE_SIZE_4KB, node_id); \ + } \ + ptr; \ + }) + + + #define V3_FreePages(page, num_pages) \ do { \ extern struct v3_os_hooks * os_hooks; \ @@ -299,7 +311,7 @@ struct v3_os_hooks { void (*print)(void *vm, int vcore, const char * format, ...) __attribute__ ((format (printf, 3, 4))); - void *(*allocate_pages)(int num_pages, unsigned int alignment); + void *(*allocate_pages)(int num_pages, unsigned int alignment, int node_id); void (*free_pages)(void * page, int num_pages); void *(*malloc)(unsigned int size); @@ -356,7 +368,12 @@ typedef enum {V3_VCORE_CPU_UNKNOWN, V3_VCORE_CPU_REAL, V3_VCORE_CPU_PROTECTED, V typedef enum {V3_VCORE_MEM_STATE_UNKNOWN, V3_VCORE_MEM_STATE_SHADOW, V3_VCORE_MEM_STATE_NESTED} v3_vcore_mem_state_t; typedef enum {V3_VCORE_MEM_MODE_UNKNOWN, V3_VCORE_MEM_MODE_PHYSICAL, V3_VCORE_MEM_MODE_VIRTUAL} v3_vcore_mem_mode_t; -struct v3_vcore_state { + +struct v3_vm_base_state { + v3_vm_state_t state; +}; + +struct v3_vm_vcore_state { v3_vcore_state_t state; v3_vcore_cpu_mode_t cpu_mode; v3_vcore_mem_state_t mem_state; @@ -366,12 +383,20 @@ struct v3_vcore_state { unsigned long long num_exits; }; -struct v3_vm_state { - v3_vm_state_t state; - void * mem_base_paddr; - unsigned long long mem_size; - unsigned long num_vcores; - struct v3_vcore_state vcore[0]; +struct v3_vm_core_state { + unsigned long num_vcores; + struct v3_vm_vcore_state vcore[]; +}; + +struct v3_vm_mem_region { + void *host_paddr; + unsigned long long size; +}; + +struct v3_vm_mem_state { + unsigned long long mem_size; + unsigned long num_regions; + struct v3_vm_mem_region region[]; }; char *v3_lookup_option(char *name); @@ -396,7 +421,11 @@ int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu); int v3_free_vm(struct v3_vm_info * vm); -int v3_get_state_vm(struct v3_vm_info *vm, struct v3_vm_state *out); +int v3_get_state_vm(struct v3_vm_info *vm, + struct v3_vm_base_state *base, + struct v3_vm_core_state *core, + struct v3_vm_mem_state *mem); + int v3_deliver_irq(struct v3_vm_info * vm, struct v3_interrupt * intr); diff --git a/palacios/include/palacios/vmm_mem.h b/palacios/include/palacios/vmm_mem.h index 6974344..e4505db 100644 --- a/palacios/include/palacios/vmm_mem.h +++ b/palacios/include/palacios/vmm_mem.h @@ -71,16 +71,20 @@ struct v3_mem_region { void * priv_data; - int core_id; + int core_id; // The virtual core this region is assigned to (-1 means all cores) + int numa_id; // The NUMA node this region is allocated from struct rb_node tree_node; // This for memory regions mapped to the global map }; struct v3_mem_map { - struct v3_mem_region base_region; struct rb_root mem_regions; + + uint32_t num_base_regions; + struct v3_mem_region * base_regions; + }; @@ -106,6 +110,7 @@ int v3_add_shadow_mem(struct v3_vm_info * vm, uint16_t core_id, struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr); +struct v3_mem_region * v3_get_base_region(struct v3_vm_info * vm, addr_t gpa); uint32_t v3_get_max_page_size(struct guest_info * core, addr_t fault_addr, v3_cpu_mode_t mode); diff --git a/palacios/src/palacios/vmm.c b/palacios/src/palacios/vmm.c index a162da5..f28266c 100644 --- a/palacios/src/palacios/vmm.c +++ b/palacios/src/palacios/vmm.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -273,7 +274,7 @@ static int start_core(void * p) int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { - uint32_t i; + uint32_t i,j; uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier uint32_t avail_cores = 0; int vcore_id = 0; @@ -288,16 +289,18 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { // Do not run if any core is using shadow paging and we are out of 4 GB bounds for (i=0;inum_cores;i++) { if (vm->cores[i].shdw_pg_mode == SHADOW_PAGING) { - if ((vm->mem_map.base_region.host_addr + vm->mem_size ) >= 0x100000000ULL) { - PrintError(vm, VCORE_NONE, "Base memory region exceeds 4 GB boundary with shadow paging enabled on core %d.\n",i); - PrintError(vm, VCORE_NONE, "Any use of non-64 bit mode in the guest is likely to fail in this configuration.\n"); - PrintError(vm, VCORE_NONE, "If you would like to proceed anyway, remove this check and recompile Palacios.\n"); - PrintError(vm, VCORE_NONE, "Alternatively, change this VM to use nested paging.\n"); - return -1; + for (j=0;jmem_map.num_base_regions;j++) { + if ((vm->mem_map.base_regions[i].host_addr + V3_CONFIG_MEM_BLOCK_SIZE) >= 0x100000000ULL) { + PrintError(vm, VCORE_NONE, "Base memory region %d exceeds 4 GB boundary with shadow paging enabled on core %d.\n",j, i); + PrintError(vm, VCORE_NONE, "Any use of non-64 bit mode in the guest is likely to fail in this configuration.\n"); + PrintError(vm, VCORE_NONE, "If you would like to proceed anyway, remove this check and recompile Palacios.\n"); + PrintError(vm, VCORE_NONE, "Alternatively, change this VM to use nested paging.\n"); + return -1; + } } } } - + /// CHECK IF WE ARE MULTICORE ENABLED.... V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores); @@ -633,59 +636,68 @@ int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) { } -int v3_get_state_vm(struct v3_vm_info *vm, struct v3_vm_state *s) +int v3_get_state_vm(struct v3_vm_info *vm, + struct v3_vm_base_state *base, + struct v3_vm_core_state *core, + struct v3_vm_mem_state *mem) { - uint32_t i; - uint32_t numcores = s->num_vcores > vm->num_cores ? vm->num_cores : s->num_vcores; - - switch (vm->run_state) { - case VM_INVALID: s->state = V3_VM_INVALID; break; - case VM_RUNNING: s->state = V3_VM_RUNNING; break; - case VM_STOPPED: s->state = V3_VM_STOPPED; break; - case VM_PAUSED: s->state = V3_VM_PAUSED; break; - case VM_ERROR: s->state = V3_VM_ERROR; break; - case VM_SIMULATING: s->state = V3_VM_SIMULATING; break; - default: s->state = V3_VM_UNKNOWN; break; - } + uint32_t i; + uint32_t numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores; + uint32_t numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions; + + + switch (vm->run_state) { + case VM_INVALID: base->state = V3_VM_INVALID; break; + case VM_RUNNING: base->state = V3_VM_RUNNING; break; + case VM_STOPPED: base->state = V3_VM_STOPPED; break; + case VM_PAUSED: base->state = V3_VM_PAUSED; break; + case VM_ERROR: base->state = V3_VM_ERROR; break; + case VM_SIMULATING: base->state = V3_VM_SIMULATING; break; + default: base->state = V3_VM_UNKNOWN; break; + } + + for (i=0;icores[i].core_run_state) { + case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break; + case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break; + case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break; + default: core->vcore[i].state = V3_VCORE_UNKNOWN; break; + } + switch (vm->cores[i].cpu_mode) { + case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break; + case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break; + case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break; + case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break; + case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break; + case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break; + default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break; + } + switch (vm->cores[i].shdw_pg_mode) { + case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break; + case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break; + default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break; + } + switch (vm->cores[i].mem_mode) { + case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break; + case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break; + default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break; + } + + core->vcore[i].pcore=vm->cores[i].pcpu_id; + core->vcore[i].last_rip=(void*)(vm->cores[i].rip); + core->vcore[i].num_exits=vm->cores[i].num_exits; + } + + core->num_vcores=numcores; - s->mem_base_paddr = (void*)(vm->mem_map.base_region.host_addr); - s->mem_size = vm->mem_size; - - s->num_vcores = numcores; - - for (i=0;icores[i].core_run_state) { - case CORE_INVALID: s->vcore[i].state = V3_VCORE_INVALID; break; - case CORE_RUNNING: s->vcore[i].state = V3_VCORE_RUNNING; break; - case CORE_STOPPED: s->vcore[i].state = V3_VCORE_STOPPED; break; - default: s->vcore[i].state = V3_VCORE_UNKNOWN; break; - } - switch (vm->cores[i].cpu_mode) { - case REAL: s->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break; - case PROTECTED: s->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break; - case PROTECTED_PAE: s->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break; - case LONG: s->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break; - case LONG_32_COMPAT: s->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break; - case LONG_16_COMPAT: s->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break; - default: s->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break; - } - switch (vm->cores[i].shdw_pg_mode) { - case SHADOW_PAGING: s->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break; - case NESTED_PAGING: s->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break; - default: s->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break; - } - switch (vm->cores[i].mem_mode) { - case PHYSICAL_MEM: s->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break; - case VIRTUAL_MEM: s->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break; - default: s->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break; - } - - s->vcore[i].pcore=vm->cores[i].pcpu_id; - s->vcore[i].last_rip=(void*)(vm->cores[i].rip); - s->vcore[i].num_exits=vm->cores[i].num_exits; - } + for (i=0;imem_map.num_base_regions;i++) { + mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr); + mem->region[i].size = V3_CONFIG_MEM_BLOCK_SIZE; + } - return 0; + mem->num_regions=numregions; + + return 0; } diff --git a/palacios/src/palacios/vmm_mem.c b/palacios/src/palacios/vmm_mem.c index 29579e5..d1d2b43 100644 --- a/palacios/src/palacios/vmm_mem.c +++ b/palacios/src/palacios/vmm_mem.c @@ -29,14 +29,32 @@ +struct v3_mem_region * v3_get_base_region(struct v3_vm_info * vm, addr_t gpa) { + struct v3_mem_map * map = &(vm->mem_map); + uint32_t block_index = gpa / V3_CONFIG_MEM_BLOCK_SIZE; + + if (gpa > (map->num_base_regions * V3_CONFIG_MEM_BLOCK_SIZE) || + (block_index >= map->num_base_regions)) { + PrintError(vm, VCORE_NONE, "Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p)\n", + (void *)gpa, (void *)vm->mem_size); + v3_print_mem_map(vm); + + return NULL; + } + + + return &(map->base_regions[block_index]); +} + static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) { + /* PrintDebug(info->vm_info, info,"V3Vee: Memory offset hypercall (offset=%p)\n", (void *)(info->vm_info->mem_map.base_region.host_addr)); info->vm_regs.rbx = info->vm_info->mem_map.base_region.host_addr; - - return 0; + */ + return -1; } static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa, @@ -52,43 +70,98 @@ static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest return -1; } +static int gpa_to_node_from_cfg(struct v3_vm_info * vm, addr_t gpa) { + v3_cfg_tree_t * layout_cfg = v3_cfg_subtree(vm->cfg_data->cfg, "mem_layout"); + v3_cfg_tree_t * region_desc = v3_cfg_subtree(layout_cfg, "region"); + + while (region_desc) { + char * start_addr_str = v3_cfg_val(region_desc, "start_addr"); + char * end_addr_str = v3_cfg_val(region_desc, "end_addr"); + char * node_id_str = v3_cfg_val(region_desc, "node"); + + addr_t start_addr = 0; + addr_t end_addr = 0; + int node_id = 0; + + if ((!start_addr_str) || (!end_addr_str) || (!node_id_str)) { + PrintError(vm, VCORE_NONE, "Invalid memory layout in configuration\n"); + return -1; + } + + start_addr = atox(start_addr_str); + end_addr = atox(end_addr_str); + node_id = atoi(node_id_str); + + if ((gpa >= start_addr) && (gpa < end_addr)) { + return node_id; + } + + region_desc = v3_cfg_next_branch(region_desc); + } + + return -1; +} + + + int v3_init_mem_map(struct v3_vm_info * vm) { struct v3_mem_map * map = &(vm->mem_map); - addr_t mem_pages = vm->mem_size >> 12; - - memset(&(map->base_region), 0, sizeof(struct v3_mem_region)); + addr_t block_pages = V3_CONFIG_MEM_BLOCK_SIZE >> 12; + int i = 0; - map->mem_regions.rb_node = NULL; + map->num_base_regions = (vm->mem_size / V3_CONFIG_MEM_BLOCK_SIZE) + \ + ((vm->mem_size % V3_CONFIG_MEM_BLOCK_SIZE) > 0); - // There is an underlying region that contains all of the guest memory - // PrintDebug(info->vm_info, info, "Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size); - // 2MB page alignment needed for 2MB hardware nested paging - map->base_region.guest_start = 0; - map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB; + map->mem_regions.rb_node = NULL; -#ifdef V3_CONFIG_ALIGNED_PG_ALLOC - map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, vm->mem_align); -#else - map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages); -#endif + map->base_regions = V3_Malloc(sizeof(struct v3_mem_region) * map->num_base_regions); - if ((void*)map->base_region.host_addr == NULL) { - PrintError(vm, VCORE_NONE,"Could not allocate guest memory\n"); - return -1; + if (map->base_regions == NULL) { + PrintError(vm, VCORE_NONE, "Could not allocate base region array\n"); + return -1; } - // Clear the memory... - memset(V3_VAddr((void *)map->base_region.host_addr), 0, mem_pages * PAGE_SIZE_4KB); + memset(map->base_regions, 0, sizeof(struct v3_mem_region) * map->num_base_regions); + + + for (i = 0; i < map->num_base_regions; i++) { + struct v3_mem_region * region = &(map->base_regions[i]); + int node_id = -1; + + // 2MB page alignment needed for 2MB hardware nested paging + region->guest_start = V3_CONFIG_MEM_BLOCK_SIZE * i; + region->guest_end = region->guest_start + V3_CONFIG_MEM_BLOCK_SIZE; + + // We assume that the xml config was smart enough to align the layout to the block size + // If they didn't we're going to ignore their settings + // and use whatever node the first byte of the block is assigned to + node_id = gpa_to_node_from_cfg(vm, region->guest_start); + + V3_Print(vm, VCORE_NONE, "Allocating block %d on node %d\n", i, node_id); + + if (node_id != -1) { + region->host_addr = (addr_t)V3_AllocPagesNode(block_pages, node_id); + } else { + region->host_addr = (addr_t)V3_AllocPages(block_pages); + } + + if ((void *)region->host_addr == NULL) { + PrintError(vm, VCORE_NONE, "Could not allocate guest memory\n"); + return -1; + } + // Clear the memory... + memset(V3_VAddr((void *)region->host_addr), 0, V3_CONFIG_MEM_BLOCK_SIZE); - map->base_region.flags.read = 1; - map->base_region.flags.write = 1; - map->base_region.flags.exec = 1; - map->base_region.flags.base = 1; - map->base_region.flags.alloced = 1; - - map->base_region.unhandled = unhandled_err; + region->flags.read = 1; + region->flags.write = 1; + region->flags.exec = 1; + region->flags.base = 1; + region->flags.alloced = 1; + + region->unhandled = unhandled_err; + } v3_register_hypercall(vm, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL); @@ -97,11 +170,13 @@ int v3_init_mem_map(struct v3_vm_info * vm) { void v3_delete_mem_map(struct v3_vm_info * vm) { - struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions)); + struct v3_mem_map * map = &(vm->mem_map); + struct rb_node * node = v3_rb_first(&(map->mem_regions)); struct v3_mem_region * reg; struct rb_node * tmp_node = NULL; - addr_t mem_pages = vm->mem_size >> 12; - + addr_t block_pages = V3_CONFIG_MEM_BLOCK_SIZE >> 12; + int i = 0; + while (node) { reg = rb_entry(node, struct v3_mem_region, tree_node); tmp_node = node; @@ -110,7 +185,13 @@ void v3_delete_mem_map(struct v3_vm_info * vm) { v3_delete_mem_region(vm, reg); } - V3_FreePages((void *)(vm->mem_map.base_region.host_addr), mem_pages); + for (i = 0; i < map->num_base_regions; i++) { + struct v3_mem_region * region = &(map->base_regions[i]); + V3_FreePages((void *)(region->host_addr), block_pages); + } + + V3_Free(map->base_regions); + } @@ -285,15 +366,7 @@ struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_i // There is not registered region, so we check if its a valid address in the base region - if (guest_addr > vm->mem_map.base_region.guest_end) { - PrintError(vm, VCORE_NONE, "Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p) (core=0x%x)\n", - (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end, core_id); - v3_print_mem_map(vm); - - return NULL; - } - - return &(vm->mem_map.base_region); + return v3_get_base_region(vm, guest_addr); } @@ -535,18 +608,25 @@ uint32_t v3_get_max_page_size(struct guest_info * core, addr_t page_addr, v3_cpu void v3_print_mem_map(struct v3_vm_info * vm) { + struct v3_mem_map * map = &(vm->mem_map); struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions)); - struct v3_mem_region * reg = &(vm->mem_map.base_region); + struct v3_mem_region * reg = NULL; int i = 0; V3_Print(vm, VCORE_NONE, "Memory Layout (all cores):\n"); + V3_Print(vm, VCORE_NONE, "Base Memory: (%d regions)\n", map->num_base_regions); + + for (i = 0; i < map->num_base_regions; i++) { + reg = &(map->base_regions[i]); - V3_Print(vm, VCORE_NONE, "Base Region (all cores): 0x%p - 0x%p -> 0x%p\n", - (void *)(reg->guest_start), - (void *)(reg->guest_end - 1), - (void *)(reg->host_addr)); + V3_Print(vm, VCORE_NONE, "Base Region[%d] (all cores): 0x%p - 0x%p -> 0x%p\n", + i, + (void *)(reg->guest_start), + (void *)(reg->guest_end - 1), + (void *)(reg->host_addr)); + } // If the memory map is empty, don't print it if (node == NULL) {