struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
struct v3_mem_region * reg;
struct rb_node * tmp_node = NULL;
+ addr_t mem_pages = vm->mem_size >> 12;
while (node) {
reg = rb_entry(node, struct v3_mem_region, tree_node);
v3_delete_mem_region(vm, reg);
}
- V3_FreePage((void *)(vm->mem_map.base_region.host_addr));
+ V3_FreePages((void *)(vm->mem_map.base_region.host_addr), mem_pages);
}
return;
}
+
+ v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
+
+ V3_Free(reg);
+
+
+ // If the guest isn't running then there shouldn't be anything to invalidate.
+ // Page tables should __always__ be created on demand during execution
+ // NOTE: This is a sanity check, and can be removed if that assumption changes
+ if (vm->run_state != VM_RUNNING) {
+ return;
+ }
+
for (i = 0; i < vm->num_cores; i++) {
struct guest_info * info = &(vm->cores[i]);
}
}
- v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
-
- V3_Free(reg);
-
// flush virtual page tables
// 3 cases shadow, shadow passthrough, and nested
addr_t pg_end = 0;
uint32_t page_size = PAGE_SIZE_4KB;
struct v3_mem_region * reg = NULL;
-
-
- PrintError("Getting max page size for addr %p\n", (void *)page_addr);
switch (mode) {
case PROTECTED:
return -1;
}
-
- PrintError("Returning PAGE size = %d\n", page_size);
return page_size;
}