return -1;
}
-
-static inline uint32_t get_alignment(char * align_str) {
- if (align_str != NULL) {
- if (strncasecmp(align_str, "2MB", strlen("2MB")) == 0) {
- return PAGE_SIZE_2MB;
- } else if (strncasecmp(align_str, "4MB", strlen("4MB")) == 0) {
- return PAGE_SIZE_4MB;
- }
- }
-
- // default is 4KB alignment
- return PAGE_SIZE_4KB;
-}
-
int v3_init_mem_map(struct v3_vm_info * vm) {
struct v3_mem_map * map = &(vm->mem_map);
- v3_cfg_tree_t * pg_cfg = v3_cfg_subtree(vm->cfg_data->cfg, "memory");
- uint32_t alignment = get_alignment(v3_cfg_val(pg_cfg, "alignment"));
addr_t mem_pages = vm->mem_size >> 12;
memset(&(map->base_region), 0, sizeof(struct v3_mem_region));
map->mem_regions.rb_node = NULL;
-
// There is an underlying region that contains all of the guest memory
// PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
+ // 2MB page alignment needed for 2MB hardware nested paging
map->base_region.guest_start = 0;
map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
-#ifdef ALIGNED_PG_ALLOC
- map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, alignment);
+#ifdef CONFIG_ALIGNED_PG_ALLOC
+ map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, vm->mem_align);
#else
- if (alignment != PAGE_SIZE_4KB) {
- PrintError("Aligned page allocations are not supported in this host (requested alignment=%d)\n", alignment);
- PrintError("Ignoring alignment request\n");
- }
map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
#endif
struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
struct v3_mem_region * reg;
struct rb_node * tmp_node = NULL;
+ addr_t mem_pages = vm->mem_size >> 12;
while (node) {
reg = rb_entry(node, struct v3_mem_region, tree_node);
v3_delete_mem_region(vm, reg);
}
- V3_FreePage((void *)(vm->mem_map.base_region.host_addr));
+ V3_FreePages((void *)(vm->mem_map.base_region.host_addr), mem_pages);
}
entry->host_addr = host_addr;
-
entry->flags.read = 1;
entry->flags.write = 1;
entry->flags.exec = 1;
static inline
struct v3_mem_region * __insert_mem_region(struct v3_vm_info * vm,
- struct v3_mem_region * region) {
+ struct v3_mem_region * region) {
struct rb_node ** p = &(vm->mem_map.mem_regions.rb_node);
struct rb_node * parent = NULL;
struct v3_mem_region * tmp_region;
-/* Given an address, find the successor region. If the address is within a region, return that
- * region. Input is an address, because the address may not have a region associated with it.
- *
- * Returns a region following or touching the given address. If address is invalid, NULL is
- * returned, else the base region is returned if no region exists at or after the given address.
+/* This returns the next memory region based on a given address.
+ * If the address falls inside a sub region, that region is returned.
+ * If the address falls outside a sub region, the next sub region is returned
+ * NOTE that we have to be careful about core_ids here...
*/
-struct v3_mem_region * v3_get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
- struct rb_node * current_n = vm->mem_map.mem_regions.rb_node;
- struct rb_node * successor_n = NULL; /* left-most node greater than guest_addr */
- struct v3_mem_region * current_r = NULL;
-
- /* current_n tries to find the region containing guest_addr, going right when smaller and left when
- * greater. Each time current_n becomes greater than guest_addr, update successor <- current_n.
- * current_n becomes successively closer to guest_addr than the previous time it was greater
- * than guest_addr.
- */
-
- /* | is address, ---- is region, + is intersection */
- while (current_n) {
- current_r = rb_entry(current_n, struct v3_mem_region, tree_node);
- if (current_r->guest_start > guest_addr) { /* | ---- */
- successor_n = current_n;
- current_n = current_n->rb_left;
+static struct v3_mem_region * get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
+ struct rb_node * n = vm->mem_map.mem_regions.rb_node;
+ struct v3_mem_region * reg = NULL;
+ struct v3_mem_region * parent = NULL;
+
+ while (n) {
+
+ reg = rb_entry(n, struct v3_mem_region, tree_node);
+
+ if (guest_addr < reg->guest_start) {
+ n = n->rb_left;
+ } else if (guest_addr >= reg->guest_end) {
+ n = n->rb_right;
} else {
- if (current_r->guest_end > guest_addr) {
- return current_r; /* +--- or --+- */
+ if (reg->core_id == V3_MEM_CORE_ANY) {
+ // found relevant region, it's available on all cores
+ return reg;
+ } else if (core_id == reg->core_id) {
+ // found relevant region, it's available on the indicated core
+ return reg;
+ } else if (core_id < reg->core_id) {
+ // go left, core too big
+ n = n->rb_left;
+ } else if (core_id > reg->core_id) {
+ // go right, core too small
+ n = n->rb_right;
+ } else {
+ PrintError("v3_get_mem_region: Impossible!\n");
+ return NULL;
}
- current_n = current_n->rb_right; /* ---- | */
+ }
+
+ if ((reg->core_id == core_id) || (reg->core_id == V3_MEM_CORE_ANY)) {
+ parent = reg;
}
}
- /* Address does not have its own region. Check if it's a valid address in the base region */
- if (guest_addr >= vm->mem_map.base_region.guest_end) {
- PrintError("%s: Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
- __FUNCTION__, (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
- v3_print_mem_map(vm);
- return NULL;
+ if (parent->guest_start > guest_addr) {
+ return parent;
+ } else if (parent->guest_end < guest_addr) {
+ struct rb_node * node = &(parent->tree_node);
+
+ while ((node = v3_rb_next(node)) != NULL) {
+ struct v3_mem_region * next_reg = rb_entry(node, struct v3_mem_region, tree_node);
+
+ if ((next_reg->core_id == V3_MEM_CORE_ANY) ||
+ (next_reg->core_id == core_id)) {
+
+ // This check is not strictly necessary, but it makes it clearer
+ if (next_reg->guest_start > guest_addr) {
+ return next_reg;
+ }
+ }
+ }
}
- return &(vm->mem_map.base_region);
+ return NULL;
}
+/* Given an address region of memory, find if there are any regions that overlap with it.
+ * This checks that the range lies in a single region, and returns that region if it does,
+ * this can be either the base region or a sub region.
+ * IF there are multiple regions in the range then it returns NULL
+ */
+static struct v3_mem_region * get_overlapping_region(struct v3_vm_info * vm, uint16_t core_id,
+ addr_t start_gpa, addr_t end_gpa) {
+ struct v3_mem_region * start_region = v3_get_mem_region(vm, core_id, start_gpa);
+
+ if (start_region == NULL) {
+ PrintError("Invalid memory region\n");
+ return NULL;
+ }
+
+
+ if (start_region->guest_end < end_gpa) {
+ // Region ends before range
+ return NULL;
+ } else if (start_region->flags.base == 0) {
+ // sub region overlaps range
+ return start_region;
+ } else {
+ // Base region, now we have to scan forward for the next sub region
+ struct v3_mem_region * next_reg = get_next_mem_region(vm, core_id, start_gpa);
+
+ if (next_reg == NULL) {
+ // no sub regions after start_addr, base region is ok
+ return start_region;
+ } else if (next_reg->guest_start >= end_gpa) {
+ // Next sub region begins outside range
+ return start_region;
+ } else {
+ return NULL;
+ }
+ }
+
+
+ // Should never get here
+ return NULL;
+}
+
+
+
+
+
void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) {
int i = 0;
return;
}
+
+ v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
+
+
+
+ // If the guest isn't running then there shouldn't be anything to invalidate.
+ // Page tables should __always__ be created on demand during execution
+ // NOTE: This is a sanity check, and can be removed if that assumption changes
+ if (vm->run_state != VM_RUNNING) {
+ V3_Free(reg);
+ return;
+ }
+
for (i = 0; i < vm->num_cores; i++) {
struct guest_info * info = &(vm->cores[i]);
}
}
- v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
-
V3_Free(reg);
// flush virtual page tables
}
+// Determine if a given address can be handled by a large page of the requested size
+uint32_t v3_get_max_page_size(struct guest_info * core, addr_t page_addr, v3_cpu_mode_t mode) {
+ addr_t pg_start = 0;
+ addr_t pg_end = 0;
+ uint32_t page_size = PAGE_SIZE_4KB;
+ struct v3_mem_region * reg = NULL;
+
+ switch (mode) {
+ case PROTECTED:
+ if (core->use_large_pages == 1) {
+ pg_start = PAGE_ADDR_4MB(page_addr);
+ pg_end = (pg_start + PAGE_SIZE_4MB);
+
+ reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+
+ if ((reg) && ((reg->host_addr % PAGE_SIZE_4MB) == 0)) {
+ page_size = PAGE_SIZE_4MB;
+ }
+ }
+ break;
+ case PROTECTED_PAE:
+ if (core->use_large_pages == 1) {
+ pg_start = PAGE_ADDR_2MB(page_addr);
+ pg_end = (pg_start + PAGE_SIZE_2MB);
+
+ reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+
+ if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
+ page_size = PAGE_SIZE_2MB;
+ }
+ }
+ break;
+ case LONG:
+ case LONG_32_COMPAT:
+ case LONG_16_COMPAT:
+ if (core->use_giant_pages == 1) {
+ pg_start = PAGE_ADDR_1GB(page_addr);
+ pg_end = (pg_start + PAGE_SIZE_1GB);
+
+ reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+
+ if ((reg) && ((reg->host_addr % PAGE_SIZE_1GB) == 0)) {
+ page_size = PAGE_SIZE_1GB;
+ break;
+ }
+ }
+
+ if (core->use_large_pages == 1) {
+ pg_start = PAGE_ADDR_2MB(page_addr);
+ pg_end = (pg_start + PAGE_SIZE_2MB);
+
+ reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+
+ if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
+ page_size = PAGE_SIZE_2MB;
+ }
+ }
+ break;
+ default:
+ PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
+ return -1;
+ }
+
+ return page_size;
+}
+
void v3_print_mem_map(struct v3_vm_info * vm) {