}
static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa,
- struct v3_shadow_region * reg, pf_error_t access_info) {
+ struct v3_mem_region * reg, pf_error_t access_info) {
PrintError("Unhandled memory access error\n");
struct v3_mem_map * map = &(vm->mem_map);
addr_t mem_pages = vm->mem_size >> 12;
- memset(&(map->base_region), 0, sizeof(struct v3_shadow_region));
+ memset(&(map->base_region), 0, sizeof(struct v3_mem_region));
- map->shdw_regions.rb_node = NULL;
+ map->mem_regions.rb_node = NULL;
// There is an underlying region that contains all of the guest memory
// PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
+ // 2MB page alignment needed for 2MB hardware nested paging
map->base_region.guest_start = 0;
map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
- map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
+ map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, PAGE_SIZE_2MB);
map->base_region.flags.read = 1;
map->base_region.flags.write = 1;
void v3_delete_mem_map(struct v3_vm_info * vm) {
- struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions));
- struct v3_shadow_region * reg;
+ struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
+ struct v3_mem_region * reg;
struct rb_node * tmp_node = NULL;
while (node) {
- reg = rb_entry(node, struct v3_shadow_region, tree_node);
+ reg = rb_entry(node, struct v3_mem_region, tree_node);
tmp_node = node;
node = v3_rb_next(node);
- v3_delete_shadow_region(vm, reg);
+ v3_delete_mem_region(vm, reg);
}
V3_FreePage((void *)(vm->mem_map.base_region.host_addr));
}
-struct v3_shadow_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
+struct v3_mem_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
addr_t guest_addr_start, addr_t guest_addr_end) {
- struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
- memset(entry, 0, sizeof(struct v3_shadow_region));
+ struct v3_mem_region * entry = (struct v3_mem_region *)V3_Malloc(sizeof(struct v3_mem_region));
+ memset(entry, 0, sizeof(struct v3_mem_region));
entry->guest_start = guest_addr_start;
entry->guest_end = guest_addr_end;
addr_t guest_addr_end,
addr_t host_addr)
{
- struct v3_shadow_region * entry = NULL;
+ struct v3_mem_region * entry = NULL;
entry = v3_create_mem_region(vm, core_id,
guest_addr_start,
entry->flags.exec = 1;
entry->flags.alloced = 1;
- if (v3_insert_shadow_region(vm, entry) == -1) {
+ if (v3_insert_mem_region(vm, entry) == -1) {
V3_Free(entry);
return -1;
}
static inline
-struct v3_shadow_region * __insert_shadow_region(struct v3_vm_info * vm,
- struct v3_shadow_region * region) {
- struct rb_node ** p = &(vm->mem_map.shdw_regions.rb_node);
+struct v3_mem_region * __insert_mem_region(struct v3_vm_info * vm,
+ struct v3_mem_region * region) {
+ struct rb_node ** p = &(vm->mem_map.mem_regions.rb_node);
struct rb_node * parent = NULL;
- struct v3_shadow_region * tmp_region;
+ struct v3_mem_region * tmp_region;
while (*p) {
parent = *p;
- tmp_region = rb_entry(parent, struct v3_shadow_region, tree_node);
+ tmp_region = rb_entry(parent, struct v3_mem_region, tree_node);
if (region->guest_end <= tmp_region->guest_start) {
p = &(*p)->rb_left;
return tmp_region;
} else if (region->core_id < tmp_region->core_id) {
p = &(*p)->rb_left;
- } else {
+ } else {
p = &(*p)->rb_right;
}
}
-int v3_insert_shadow_region(struct v3_vm_info * vm,
- struct v3_shadow_region * region) {
- struct v3_shadow_region * ret;
+int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) {
+ struct v3_mem_region * ret;
int i = 0;
- if ((ret = __insert_shadow_region(vm, region))) {
+ if ((ret = __insert_mem_region(vm, region))) {
return -1;
}
- v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.shdw_regions));
+ v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.mem_regions));
-struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
- struct rb_node * n = vm->mem_map.shdw_regions.rb_node;
- struct v3_shadow_region * reg = NULL;
+struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
+ struct rb_node * n = vm->mem_map.mem_regions.rb_node;
+ struct v3_mem_region * reg = NULL;
while (n) {
- reg = rb_entry(n, struct v3_shadow_region, tree_node);
+
+ reg = rb_entry(n, struct v3_mem_region, tree_node);
if (guest_addr < reg->guest_start) {
n = n->rb_left;
} else if (guest_addr >= reg->guest_end) {
n = n->rb_right;
} else {
- if ((core_id == reg->core_id) ||
- (reg->core_id == V3_MEM_CORE_ANY)) {
- return reg;
- } else {
+ if (reg->core_id == V3_MEM_CORE_ANY) {
+ // found relevant region, it's available on all cores
+ return reg;
+ } else if (core_id == reg->core_id) {
+ // found relevant region, it's available on the indicated core
+ return reg;
+ } else if (core_id < reg->core_id) {
+ // go left, core too big
+ n = n->rb_left;
+ } else if (core_id > reg->core_id) {
+ // go right, core too small
n = n->rb_right;
+ } else {
+ PrintDebug("v3_get_mem_region: Impossible!\n");
+ return NULL;
}
}
}
// There is not registered region, so we check if its a valid address in the base region
if (guest_addr > vm->mem_map.base_region.guest_end) {
- PrintError("Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
- (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
+ PrintError("Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p) (core=0x%x)\n",
+ (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end, core_id);
v3_print_mem_map(vm);
return NULL;
}
-
+
return &(vm->mem_map.base_region);
}
+/* Search the "hooked" memory regions for a region that ends after the given address. If the
+ * address is invalid, return NULL. Else, return the first region found or the base region if no
+ * region ends after the given address.
+ */
+struct v3_mem_region * v3_get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
+ struct rb_node * n = vm->mem_map.mem_regions.rb_node;
+ struct v3_mem_region * reg = NULL;
+
+ // Keep going to the right in the tree while the address is greater than the current region's
+ // end address.
+ while (n) {
+ reg = rb_entry(n, struct v3_mem_region, tree_node);
+ if (guest_addr >= reg->guest_end) { // reg is [start,end)
+ n = n->rb_right;
+ } else {
+ if ((core_id == reg->core_id) || (reg->core_id == V3_MEM_CORE_ANY)) {
+ return reg;
+ } else {
+ n = n->rb_right;
+ }
+ }
+ }
+
+ // There is no registered region, so we check if it's a valid address in the base region
+
+ if (guest_addr >= vm->mem_map.base_region.guest_end) {
+ PrintError("%s: Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
+ __FUNCTION__, (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
+ v3_print_mem_map(vm);
+ return NULL;
+ }
+
+ return &(vm->mem_map.base_region);
+}
+
+
+/* Search the "hooked" memory regions for a region that ends after the given address. If the
+ * address is invalid, return NULL. Else, return the first region found or the base region if no
+ * region ends after the given address.
+ */
+struct v3_mem_region * v3_get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
+ struct rb_node * n = vm->mem_map.mem_regions.rb_node;
+ struct v3_mem_region * reg = NULL;
-void v3_delete_shadow_region(struct v3_vm_info * vm, struct v3_shadow_region * reg) {
+ // Keep going to the right in the tree while the address is greater than the current region's
+ // end address.
+ while (n) {
+ reg = rb_entry(n, struct v3_mem_region, tree_node);
+ if (guest_addr >= reg->guest_end) { // reg is [start,end)
+ n = n->rb_right;
+ } else {
+ // PAD this may be buggy since there is no guarantees that
+ // the cores are in order
+ if ((core_id == reg->core_id) || (reg->core_id == V3_MEM_CORE_ANY)) {
+ return reg;
+ } else {
+ n = n->rb_right;
+ }
+ }
+ }
+
+ // There is no registered region, so we check if it's a valid address in the base region
+
+ if (guest_addr >= vm->mem_map.base_region.guest_end) {
+ PrintError("%s: Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
+ __FUNCTION__, (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
+ v3_print_mem_map(vm);
+ return NULL;
+ }
+
+ return &(vm->mem_map.base_region);
+}
+
+
+
+void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) {
int i = 0;
if (reg == NULL) {
}
}
- v3_rb_erase(&(reg->tree_node), &(vm->mem_map.shdw_regions));
+ v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
V3_Free(reg);
-
-addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, uint16_t core_id, addr_t guest_addr) {
- if (reg && (reg->flags.alloced == 1)) {
- return (guest_addr - reg->guest_start) + reg->host_addr;
- } else {
- // PrintError("MEM Region Invalid\n");
- return 0;
- }
-
-}
-
-
-
void v3_print_mem_map(struct v3_vm_info * vm) {
- struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions));
- struct v3_shadow_region * reg = &(vm->mem_map.base_region);
+ struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
+ struct v3_mem_region * reg = &(vm->mem_map.base_region);
int i = 0;
- V3_Print("Memory Layout:\n");
+ V3_Print("Memory Layout (all cores):\n");
- V3_Print("Base Region: 0x%p - 0x%p -> 0x%p\n",
+ V3_Print("Base Region (all cores): 0x%p - 0x%p -> 0x%p\n",
(void *)(reg->guest_start),
(void *)(reg->guest_end - 1),
(void *)(reg->host_addr));
}
do {
- reg = rb_entry(node, struct v3_shadow_region, tree_node);
+ reg = rb_entry(node, struct v3_mem_region, tree_node);
V3_Print("%d: 0x%p - 0x%p -> 0x%p\n", i,
(void *)(reg->guest_start),
(void *)(reg->guest_end - 1),
(void *)(reg->host_addr));
- V3_Print("\t(flags=%x) (unhandled = 0x%p)\n",
- reg->flags.value,
- reg->unhandled);
+ V3_Print("\t(flags=0x%x) (core=0x%x) (unhandled = 0x%p)\n",
+ reg->flags.value,
+ reg->core_id,
+ reg->unhandled);
i++;
} while ((node = v3_rb_next(node)));