static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa,
struct v3_mem_region * reg, pf_error_t access_info) {
- PrintError("Unhandled memory access error\n");
+ PrintError("Unhandled memory access error (gpa=%p, gva=%p, error_code=%d)\n",
+ (void *)guest_pa, (void *)guest_va, *(uint32_t *)&access_info);
v3_print_mem_map(core->vm_info);
map->base_region.guest_start = 0;
map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
-#ifdef CONFIG_ALIGNED_PG_ALLOC
+#ifdef V3_CONFIG_ALIGNED_PG_ALLOC
map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, vm->mem_align);
#else
map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
#endif
+ // Clear the memory...
+ memset(V3_VAddr((void *)map->base_region.host_addr), 0, mem_pages * PAGE_SIZE_4KB);
+
+
map->base_region.flags.read = 1;
map->base_region.flags.write = 1;
map->base_region.flags.exec = 1;
struct v3_mem_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
addr_t guest_addr_start, addr_t guest_addr_end) {
-
- struct v3_mem_region * entry = (struct v3_mem_region *)V3_Malloc(sizeof(struct v3_mem_region));
+ struct v3_mem_region * entry = NULL;
+
+ if (guest_addr_start >= guest_addr_end) {
+ PrintError("Region start is after region end\n");
+ return NULL;
+ }
+
+ entry = (struct v3_mem_region *)V3_Malloc(sizeof(struct v3_mem_region));
memset(entry, 0, sizeof(struct v3_mem_region));
entry->guest_start = guest_addr_start;
struct v3_mem_region * reg = NULL;
struct v3_mem_region * parent = NULL;
+ if (n == NULL) {
+ return NULL;
+ }
+
while (n) {
reg = rb_entry(n, struct v3_mem_region, tree_node);
return;
}
+
+ v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
+
+
+
+ // If the guest isn't running then there shouldn't be anything to invalidate.
+ // Page tables should __always__ be created on demand during execution
+ // NOTE: This is a sanity check, and can be removed if that assumption changes
+ if (vm->run_state != VM_RUNNING) {
+ V3_Free(reg);
+ return;
+ }
+
for (i = 0; i < vm->num_cores; i++) {
struct guest_info * info = &(vm->cores[i]);
}
}
- v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
-
V3_Free(reg);
// flush virtual page tables
pg_start = PAGE_ADDR_4MB(page_addr);
pg_end = (pg_start + PAGE_SIZE_4MB);
- reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+ reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
if ((reg) && ((reg->host_addr % PAGE_SIZE_4MB) == 0)) {
page_size = PAGE_SIZE_4MB;
pg_start = PAGE_ADDR_2MB(page_addr);
pg_end = (pg_start + PAGE_SIZE_2MB);
- reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+ reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
page_size = PAGE_SIZE_2MB;
pg_start = PAGE_ADDR_1GB(page_addr);
pg_end = (pg_start + PAGE_SIZE_1GB);
- reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+ reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
if ((reg) && ((reg->host_addr % PAGE_SIZE_1GB) == 0)) {
page_size = PAGE_SIZE_1GB;
pg_start = PAGE_ADDR_2MB(page_addr);
pg_end = (pg_start + PAGE_SIZE_2MB);
- reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+ reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
page_size = PAGE_SIZE_2MB;