}
+static inline uint32_t get_alignment(char * align_str) {
+ if (align_str != NULL) {
+ if (strncasecmp(align_str, "2MB", strlen("2MB")) == 0) {
+ return PAGE_SIZE_2MB;
+ } else if (strncasecmp(align_str, "4MB", strlen("4MB")) == 0) {
+ return PAGE_SIZE_4MB;
+ }
+ }
+
+ // default is 4KB alignment
+ return PAGE_SIZE_4KB;
+}
int v3_init_mem_map(struct v3_vm_info * vm) {
struct v3_mem_map * map = &(vm->mem_map);
+ v3_cfg_tree_t * pg_cfg = v3_cfg_subtree(vm->cfg_data->cfg, "memory");
+ uint32_t alignment = get_alignment(v3_cfg_val(pg_cfg, "alignment"));
addr_t mem_pages = vm->mem_size >> 12;
memset(&(map->base_region), 0, sizeof(struct v3_mem_region));
map->base_region.guest_start = 0;
map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
+
+#ifdef ALIGNED_PG_ALLOC
+ map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, alignment);
+#else
+ if (alignment != PAGE_SIZE_4KB) {
+ PrintError("Aligned page allocations are not supported in this host (requested alignment=%d)\n", alignment);
+ PrintError("Ignoring alignment request\n");
+ }
map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
+#endif
map->base_region.flags.read = 1;
map->base_region.flags.write = 1;
return tmp_region;
} else if (region->core_id < tmp_region->core_id) {
p = &(*p)->rb_left;
- } else {
+ } else {
p = &(*p)->rb_right;
}
}
-int v3_insert_mem_region(struct v3_vm_info * vm,
- struct v3_mem_region * region) {
+int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) {
struct v3_mem_region * ret;
int i = 0;
struct v3_mem_region * reg = NULL;
while (n) {
+
reg = rb_entry(n, struct v3_mem_region, tree_node);
if (guest_addr < reg->guest_start) {
} else if (guest_addr >= reg->guest_end) {
n = n->rb_right;
} else {
- if ((core_id == reg->core_id) ||
- (reg->core_id == V3_MEM_CORE_ANY)) {
- return reg;
- } else {
+ if (reg->core_id == V3_MEM_CORE_ANY) {
+ // found relevant region, it's available on all cores
+ return reg;
+ } else if (core_id == reg->core_id) {
+ // found relevant region, it's available on the indicated core
+ return reg;
+ } else if (core_id < reg->core_id) {
+ // go left, core too big
+ n = n->rb_left;
+ } else if (core_id > reg->core_id) {
+ // go right, core too small
n = n->rb_right;
+ } else {
+ PrintDebug("v3_get_mem_region: Impossible!\n");
+ return NULL;
}
}
}
// There is not registered region, so we check if its a valid address in the base region
if (guest_addr > vm->mem_map.base_region.guest_end) {
- PrintError("Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
- (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
+ PrintError("Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p) (core=0x%x)\n",
+ (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end, core_id);
v3_print_mem_map(vm);
return NULL;
}
-
+
+ return &(vm->mem_map.base_region);
+}
+
+
+
+/* Search the "hooked" memory regions for a region that ends after the given address. If the
+ * address is invalid, return NULL. Else, return the first region found or the base region if no
+ * region ends after the given address.
+ */
+struct v3_mem_region * v3_get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
+ struct rb_node * n = vm->mem_map.mem_regions.rb_node;
+ struct v3_mem_region * reg = NULL;
+
+ // Keep going to the right in the tree while the address is greater than the current region's
+ // end address.
+ while (n) {
+ reg = rb_entry(n, struct v3_mem_region, tree_node);
+ if (guest_addr >= reg->guest_end) { // reg is [start,end)
+ n = n->rb_right;
+ } else {
+ if ((core_id == reg->core_id) || (reg->core_id == V3_MEM_CORE_ANY)) {
+ return reg;
+ } else {
+ n = n->rb_right;
+ }
+ }
+ }
+
+ // There is no registered region, so we check if it's a valid address in the base region
+
+ if (guest_addr >= vm->mem_map.base_region.guest_end) {
+ PrintError("%s: Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
+ __FUNCTION__, (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
+ v3_print_mem_map(vm);
+ return NULL;
+ }
+
return &(vm->mem_map.base_region);
}
-
-addr_t v3_get_shadow_addr(struct v3_mem_region * reg, uint16_t core_id, addr_t guest_addr) {
- if (reg && (reg->flags.alloced == 1)) {
- return (guest_addr - reg->guest_start) + reg->host_addr;
- } else {
- // PrintError("MEM Region Invalid\n");
- return 0;
- }
-
-}
-
-
-
void v3_print_mem_map(struct v3_vm_info * vm) {
struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
struct v3_mem_region * reg = &(vm->mem_map.base_region);
int i = 0;
- V3_Print("Memory Layout:\n");
+ V3_Print("Memory Layout (all cores):\n");
- V3_Print("Base Region: 0x%p - 0x%p -> 0x%p\n",
+ V3_Print("Base Region (all cores): 0x%p - 0x%p -> 0x%p\n",
(void *)(reg->guest_start),
(void *)(reg->guest_end - 1),
(void *)(reg->host_addr));
(void *)(reg->guest_end - 1),
(void *)(reg->host_addr));
- V3_Print("\t(flags=%x) (unhandled = 0x%p)\n",
- reg->flags.value,
- reg->unhandled);
+ V3_Print("\t(flags=0x%x) (core=0x%x) (unhandled = 0x%p)\n",
+ reg->flags.value,
+ reg->core_id,
+ reg->unhandled);
i++;
} while ((node = v3_rb_next(node)));