}
+static inline uint32_t get_alignment(char * align_str) {
+ if (align_str != NULL) {
+ if (strncasecmp(align_str, "2MB", strlen("2MB")) == 0) {
+ return PAGE_SIZE_2MB;
+ } else if (strncasecmp(align_str, "4MB", strlen("4MB")) == 0) {
+ return PAGE_SIZE_4MB;
+ }
+ }
+
+ // default is 4KB alignment
+ return PAGE_SIZE_4KB;
+}
int v3_init_mem_map(struct v3_vm_info * vm) {
struct v3_mem_map * map = &(vm->mem_map);
+ v3_cfg_tree_t * pg_cfg = v3_cfg_subtree(vm->cfg_data->cfg, "memory");
+ uint32_t alignment = get_alignment(v3_cfg_val(pg_cfg, "alignment"));
addr_t mem_pages = vm->mem_size >> 12;
memset(&(map->base_region), 0, sizeof(struct v3_mem_region));
map->base_region.guest_start = 0;
map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
+
+#ifdef ALIGNED_PG_ALLOC
+ map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, alignment);
+#else
+ if (alignment != PAGE_SIZE_4KB) {
+ PrintError("Aligned page allocations are not supported in this host (requested alignment=%d)\n", alignment);
+ PrintError("Ignoring alignment request\n");
+ }
map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
+#endif
map->base_region.flags.read = 1;
map->base_region.flags.write = 1;
-int v3_insert_mem_region(struct v3_vm_info * vm,
- struct v3_mem_region * region) {
+int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) {
struct v3_mem_region * ret;
int i = 0;
if (reg->core_id == V3_MEM_CORE_ANY) {
// found relevant region, it's available on all cores
return reg;
- } else if (core_id==reg->core_id) {
+ } else if (core_id == reg->core_id) {
// found relevant region, it's available on the indicated core
return reg;
} else if (core_id < reg->core_id) {
return NULL;
}
-
-
+
+ return &(vm->mem_map.base_region);
+}
+
+
+
+/* Search the "hooked" memory regions for a region that ends after the given address. If the
+ * address is invalid, return NULL. Else, return the first region found or the base region if no
+ * region ends after the given address.
+ */
+struct v3_mem_region * v3_get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
+ struct rb_node * n = vm->mem_map.mem_regions.rb_node;
+ struct v3_mem_region * reg = NULL;
+
+ // Keep going to the right in the tree while the address is greater than the current region's
+ // end address.
+ while (n) {
+ reg = rb_entry(n, struct v3_mem_region, tree_node);
+ if (guest_addr >= reg->guest_end) { // reg is [start,end)
+ n = n->rb_right;
+ } else {
+ if ((core_id == reg->core_id) || (reg->core_id == V3_MEM_CORE_ANY)) {
+ return reg;
+ } else {
+ n = n->rb_right;
+ }
+ }
+ }
+
+ // There is no registered region, so we check if it's a valid address in the base region
+
+ if (guest_addr >= vm->mem_map.base_region.guest_end) {
+ PrintError("%s: Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
+ __FUNCTION__, (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
+ v3_print_mem_map(vm);
+ return NULL;
+ }
+
return &(vm->mem_map.base_region);
}