#include <interfaces/vmm_numa.h>
+#ifdef V3_CONFIG_SWAPPING
+#include <palacios/vmm_swapping.h>
+#endif
+
uint64_t v3_mem_block_size = V3_CONFIG_MEM_BLOCK_SIZE;
+
+
struct v3_mem_region * v3_get_base_region(struct v3_vm_info * vm, addr_t gpa) {
+
+ //PrintDebug(VM_NONE, VCORE_NONE, "get_base_region called");
struct v3_mem_map * map = &(vm->mem_map);
uint32_t block_index = gpa / v3_mem_block_size;
-
+ struct v3_mem_region *reg;
if ((gpa >= (map->num_base_regions * v3_mem_block_size)) ||
(block_index >= map->num_base_regions)) {
PrintError(vm, VCORE_NONE, "Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p)\n",
return NULL;
}
+ reg = &(map->base_regions[block_index]);
- return &(map->base_regions[block_index]);
+#ifdef V3_CONFIG_SWAPPING
+ if(vm->swap_state.enable_swapping) {
+ if (reg->flags.swapped) {
+ if (v3_swap_in_region(vm,reg)) {
+ PrintError(vm, VCORE_NONE, "Unable to swap in region GPA=%p..%p!!!\n",(void*)reg->guest_start,(void*)reg->guest_end);
+ v3_print_mem_map(vm);
+ return NULL;
+ }
+ }
+ }
+ v3_touch_region(vm,reg);
+#endif
+
+ return reg;
}
+
static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
/*
PrintDebug(info->vm_info, info,"V3Vee: Memory offset hypercall (offset=%p)\n",
} else {
return 1; // ask for nested, get shadow
}
- } else if (strcasecmp(pg_mode, "shadow") != 0) {
+ } else if (strcasecmp(pg_mode, "shadow") != 0) {
return 1; // ask for shadow, get shadow
} else {
return 1; // ask for something else, get shadow
}
}
+#define CEIL_DIV(x,y) (((x)/(y)) + !!((x)%(y)))
+
int v3_init_mem_map(struct v3_vm_info * vm) {
struct v3_mem_map * map = &(vm->mem_map);
addr_t block_pages = v3_mem_block_size >> 12;
int i = 0;
+ uint64_t num_base_regions_host_mem;
+
+ map->num_base_regions = CEIL_DIV(vm->mem_size, v3_mem_block_size);
- map->num_base_regions = (vm->mem_size / v3_mem_block_size) + \
- ((vm->mem_size % v3_mem_block_size) > 0);
+ num_base_regions_host_mem=map->num_base_regions; // without swapping
+ PrintDebug(VM_NONE, VCORE_NONE, "v3_init_mem_map: num_base_regions:%d",map->num_base_regions);
map->mem_regions.rb_node = NULL;
- map->base_regions = V3_Malloc(sizeof(struct v3_mem_region) * map->num_base_regions);
+#ifdef V3_CONFIG_SWAPPING
+ if (vm->swap_state.enable_swapping) {
+ num_base_regions_host_mem = CEIL_DIV(vm->swap_state.host_mem_size, v3_mem_block_size);
+ }
+#endif
+ PrintDebug(VM_NONE, VCORE_NONE, "v3_init_mem_map: %llu base regions will be allocated of %llu base regions in guest\n",
+ (uint64_t)num_base_regions_host_mem, (uint64_t)map->num_base_regions);
+
+ map->base_regions = V3_VMalloc(sizeof(struct v3_mem_region) * map->num_base_regions);
if (map->base_regions == NULL) {
PrintError(vm, VCORE_NONE, "Could not allocate base region array\n");
return -1;
}
memset(map->base_regions, 0, sizeof(struct v3_mem_region) * map->num_base_regions);
-
for (i = 0; i < map->num_base_regions; i++) {
+
+
struct v3_mem_region * region = &(map->base_regions[i]);
int node_id = -1;
// 2MB page alignment needed for 2MB hardware nested paging
+ // If swapping is enabled, the host memory will be allocated to low address regions at initialization
region->guest_start = v3_mem_block_size * i;
region->guest_end = region->guest_start + v3_mem_block_size;
// and use whatever node the first byte of the block is assigned to
node_id = gpa_to_node_from_cfg(vm, region->guest_start);
- V3_Print(vm, VCORE_NONE, "Allocating block %d on node %d\n", i, node_id);
-
- region->host_addr = (addr_t)V3_AllocPagesExtended(block_pages,
- PAGE_SIZE_4KB,
- node_id,
- will_use_shadow_paging(vm) ?
- V3_ALLOC_PAGES_CONSTRAINT_4GB : 0 );
-
- if ((void *)region->host_addr == NULL) {
- PrintError(vm, VCORE_NONE, "Could not allocate guest memory\n");
- return -1;
- }
- // Clear the memory...
- memset(V3_VAddr((void *)region->host_addr), 0, v3_mem_block_size);
+ if (i < num_base_regions_host_mem) {
+ //The regions within num_base_regions_in_mem are allocated in host memory
+ V3_Print(vm, VCORE_NONE, "Allocating block %d on node %d\n", i, node_id);
+
+#ifdef V3_CONFIG_SWAPPING
+ // nothing to do - memset will have done it.
+#endif
+
+ region->host_addr = (addr_t)V3_AllocPagesExtended(block_pages,
+ PAGE_SIZE_4KB,
+ node_id,
+ vm->resource_control.pg_filter_func,
+ vm->resource_control.pg_filter_state);
+
+ if ((void *)region->host_addr == NULL) {
+ PrintError(vm, VCORE_NONE, "Could not allocate guest memory\n");
+ return -1;
+ }
+
+ // Clear the memory...
+ memset(V3_VAddr((void *)region->host_addr), 0, v3_mem_block_size);
+
+ } else {
+
+#ifdef V3_CONFIG_SWAPPING
+ if(vm->swap_state.enable_swapping) {
+ // The regions beyond num_base_regions_in_mem are allocated on disk to start
+ region->flags.swapped = 1;
+ region->host_addr=(addr_t) 0;
+ // other flags / state correctly set up by zeroing the region earlier
+ }
+#endif
+
+ }
+
// Note assigned numa ID could be different than our request...
+ // Also note that when swapping is used, the numa info will
+ // reflect the numa id of address 0x0 for unallocated regions
+ //
region->numa_id = v3_numa_hpa_to_node(region->host_addr);
region->flags.read = 1;
region->flags.alloced = 1;
region->flags.limit32 = will_use_shadow_paging(vm);
-
region->unhandled = unhandled_err;
}
for (i = 0; i < map->num_base_regions; i++) {
struct v3_mem_region * region = &(map->base_regions[i]);
+#ifdef V3_CONFIG_SWAPPING
+ if (vm->swap_state.enable_swapping) {
+ if (!region->flags.swapped) {
+ V3_FreePages((void *)(region->host_addr), block_pages);
+ } // otherwise this is not allocated space
+ }
+#else
V3_FreePages((void *)(region->host_addr), block_pages);
+#endif
}
- V3_Free(map->base_regions);
-
+ V3_VFree(map->base_regions);
}
v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
if (mem_mode == PHYSICAL_MEM) {
- rc |= v3_invalidate_passthrough_addr_range(info, region->guest_start, region->guest_end-1);
+ rc |= v3_invalidate_passthrough_addr_range(info, region->guest_start, region->guest_end-1,NULL,NULL);
} else {
rc |= v3_invalidate_shadow_pts(info);
}
} else if (info->shdw_pg_mode == NESTED_PAGING) {
- rc |= v3_invalidate_nested_addr_range(info, region->guest_start, region->guest_end-1);
+ rc |= v3_invalidate_nested_addr_range(info, region->guest_start, region->guest_end-1,NULL,NULL);
}
}
}
- if (parent->guest_start > guest_addr) {
+ if (!parent || parent->guest_start > guest_addr) {
return parent;
} else if (parent->guest_end < guest_addr) {
struct rb_node * node = &(parent->tree_node);
v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
if (mem_mode == PHYSICAL_MEM) {
- rc |= v3_invalidate_passthrough_addr_range(info,reg->guest_start, reg->guest_end-1);
+ rc |= v3_invalidate_passthrough_addr_range(info,reg->guest_start, reg->guest_end-1,NULL,NULL);
} else {
rc |= v3_invalidate_shadow_pts(info);
}
} else if (info->shdw_pg_mode == NESTED_PAGING) {
- rc |= v3_invalidate_nested_addr_range(info,reg->guest_start, reg->guest_end-1);
+ rc |= v3_invalidate_nested_addr_range(info,reg->guest_start, reg->guest_end-1,NULL,NULL);
}
}