#include <palacios/vmm_mem.h>
#include <palacios/vmm.h>
#include <palacios/vmm_util.h>
-#include <palacios/vmm_decoder.h>
+#include <palacios/vmm_emulator.h>
+#include <palacios/vm_guest.h>
+#include <palacios/vmm_shadow_paging.h>
+#include <palacios/vmm_direct_paging.h>
-void init_shadow_region(struct shadow_region * entry,
- addr_t guest_addr_start,
- addr_t guest_addr_end,
- guest_region_type_t guest_region_type,
- host_region_type_t host_region_type)
-{
- entry->guest_type = guest_region_type;
- entry->guest_start = guest_addr_start;
- entry->guest_end = guest_addr_end;
- entry->host_type = host_region_type;
- entry->host_addr = 0;
- entry->next=entry->prev = NULL;
-}
-int add_shadow_region_passthrough( struct guest_info * guest_info,
- addr_t guest_addr_start,
- addr_t guest_addr_end,
- addr_t host_addr)
-{
- struct shadow_region * entry = (struct shadow_region *)V3_Malloc(sizeof(struct shadow_region));
- init_shadow_region(entry, guest_addr_start, guest_addr_end,
- GUEST_REGION_PHYSICAL_MEMORY, HOST_REGION_PHYSICAL_MEMORY);
- entry->host_addr = host_addr;
+static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
+ PrintDebug("V3Vee: Memory offset hypercall (offset=%p)\n",
+ (void *)(info->vm_info->mem_map.base_region.host_addr));
- return add_shadow_region(&(guest_info->mem_map), entry);
-}
+ info->vm_regs.rbx = info->vm_info->mem_map.base_region.host_addr;
-int hook_guest_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
- int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data),
- int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
- void * priv_data) {
-
- struct shadow_region * entry = (struct shadow_region *)V3_Malloc(sizeof(struct shadow_region));
- struct vmm_mem_hook * hook = (struct vmm_mem_hook *)V3_Malloc(sizeof(struct vmm_mem_hook));
-
- memset(hook, 0, sizeof(struct vmm_mem_hook));
+ return 0;
+}
- hook->read = read;
- hook->write = write;
- hook->region = entry;
- hook->priv_data = priv_data;
+static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa,
+ struct v3_mem_region * reg, pf_error_t access_info) {
+ PrintError("Unhandled memory access error\n");
- init_shadow_region(entry, guest_addr_start, guest_addr_end,
- GUEST_REGION_PHYSICAL_MEMORY, HOST_REGION_HOOK);
+ v3_print_mem_map(core->vm_info);
- entry->host_addr = (addr_t)hook;
+ v3_print_guest_state(core);
- return add_shadow_region(&(info->mem_map), entry);
+ return -1;
}
+int v3_init_mem_map(struct v3_vm_info * vm) {
+ struct v3_mem_map * map = &(vm->mem_map);
+ addr_t mem_pages = vm->mem_size >> 12;
-struct vmm_mem_hook * get_mem_hook(struct guest_info * info, addr_t guest_addr) {
- struct shadow_region * region = get_shadow_region_by_addr(&(info->mem_map), guest_addr);
-
- if (region == NULL) {
- PrintDebug("Could not find shadow region for addr: %p\n", (void *)guest_addr);
- return NULL;
- }
+ memset(&(map->base_region), 0, sizeof(struct v3_mem_region));
- return (struct vmm_mem_hook *)(region->host_addr);
-}
+ map->mem_regions.rb_node = NULL;
+ // There is an underlying region that contains all of the guest memory
+ // PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
-/* mem_addr is the guest physical memory address */
-static int mem_hook_dispatch(struct guest_info * info,
- addr_t fault_gva, addr_t fault_gpa,
- pf_error_t access_info, struct vmm_mem_hook * hook)
-{
-
- // emulate and then dispatch
- // or dispatch and emulate
+ // 2MB page alignment needed for 2MB hardware nested paging
+ map->base_region.guest_start = 0;
+ map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
+#ifdef CONFIG_ALIGNED_PG_ALLOC
+ map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, vm->mem_align);
+#else
+ map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
+#endif
- if (access_info.write == 1) {
- if (v3_emulate_memory_write(info, fault_gva, hook->write, fault_gpa, hook->priv_data) == -1) {
- PrintError("Memory write emulation failed\n");
- return -1;
- }
+ map->base_region.flags.read = 1;
+ map->base_region.flags.write = 1;
+ map->base_region.flags.exec = 1;
+ map->base_region.flags.base = 1;
+ map->base_region.flags.alloced = 1;
- } else {
- if (v3_emulate_memory_read(info, fault_gva, hook->read, fault_gpa, hook->priv_data) == -1) {
- PrintError("Memory read emulation failed\n");
- return -1;
- }
- }
-
- return 0;
-}
+ map->base_region.unhandled = unhandled_err;
+ if ((void *)map->base_region.host_addr == NULL) {
+ PrintError("Could not allocate Guest memory\n");
+ return -1;
+ }
+
+ //memset(V3_VAddr((void *)map->base_region.host_addr), 0xffffffff, map->base_region.guest_end);
-int handle_special_page_fault(struct guest_info * info,
- addr_t fault_gva, addr_t fault_gpa,
- pf_error_t access_info)
-{
- struct shadow_region * reg = get_shadow_region_by_addr(&(info->mem_map), fault_gpa);
-
- switch (reg->host_type) {
- case HOST_REGION_HOOK:
- return mem_hook_dispatch(info, fault_gva, fault_gpa, access_info, (struct vmm_mem_hook *)(reg->host_addr));
- default:
- return -1;
- }
-
- return 0;
+ v3_register_hypercall(vm, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
+ return 0;
}
+void v3_delete_mem_map(struct v3_vm_info * vm) {
+ struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
+ struct v3_mem_region * reg;
+ struct rb_node * tmp_node = NULL;
+ addr_t mem_pages = vm->mem_size >> 12;
+
+ while (node) {
+ reg = rb_entry(node, struct v3_mem_region, tree_node);
+ tmp_node = node;
+ node = v3_rb_next(node);
-void init_shadow_map(struct guest_info * info) {
- struct shadow_map * map = &(info->mem_map);
-
- map->num_regions = 0;
+ v3_delete_mem_region(vm, reg);
+ }
- map->head = NULL;
+ V3_FreePages((void *)(vm->mem_map.base_region.host_addr), mem_pages);
}
-void free_shadow_map(struct shadow_map * map) {
- struct shadow_region * cursor = map->head;
- struct shadow_region * tmp = NULL;
+struct v3_mem_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
+ addr_t guest_addr_start, addr_t guest_addr_end) {
+
+ struct v3_mem_region * entry = (struct v3_mem_region *)V3_Malloc(sizeof(struct v3_mem_region));
+ memset(entry, 0, sizeof(struct v3_mem_region));
- while(cursor) {
- tmp = cursor;
- cursor = cursor->next;
- V3_Free(tmp);
- }
+ entry->guest_start = guest_addr_start;
+ entry->guest_end = guest_addr_end;
+ entry->core_id = core_id;
+ entry->unhandled = unhandled_err;
- V3_Free(map);
+ return entry;
}
-int add_shadow_region(struct shadow_map * map,
- struct shadow_region * region)
+int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id,
+ addr_t guest_addr_start,
+ addr_t guest_addr_end,
+ addr_t host_addr)
{
- struct shadow_region * cursor = map->head;
+ struct v3_mem_region * entry = NULL;
- PrintDebug("Adding Shadow Region: (0x%p-0x%p)\n",
- (void *)region->guest_start, (void *)region->guest_end);
+ entry = v3_create_mem_region(vm, core_id,
+ guest_addr_start,
+ guest_addr_end);
- if ((!cursor) || (cursor->guest_start >= region->guest_end)) {
- region->prev = NULL;
- region->next = cursor;
- map->num_regions++;
- map->head = region;
- return 0;
- }
+ entry->host_addr = host_addr;
- while (cursor) {
- // Check if it overlaps with the current cursor
- if ((cursor->guest_end > region->guest_start) && (cursor->guest_start < region->guest_start)) {
- // overlaps not allowed
- return -1;
- }
-
- if (!(cursor->next)) {
- // add to the end of the list
- cursor->next = region;
- region->prev = cursor;
- region->next = NULL;
- map->num_regions++;
- return 0;
- } else if (cursor->next->guest_start >= region->guest_end) {
- // add here
- region->next = cursor->next;
- region->prev = cursor;
-
- cursor->next->prev = region;
- cursor->next = region;
-
- map->num_regions++;
-
- return 0;
- } else if (cursor->next->guest_end <= region->guest_start) {
- cursor = cursor->next;
- } else {
- // This cannot happen!
- // we should panic here
- return -1;
- }
- }
-
- // This cannot happen
- // We should panic here
- return -1;
-}
+ entry->flags.read = 1;
+ entry->flags.write = 1;
+ entry->flags.exec = 1;
+ entry->flags.alloced = 1;
+ if (v3_insert_mem_region(vm, entry) == -1) {
+ V3_Free(entry);
+ return -1;
+ }
-int delete_shadow_region(struct shadow_map * map,
- addr_t guest_start,
- addr_t guest_end) {
- return -1;
+ return 0;
}
-struct shadow_region *get_shadow_region_by_index(struct shadow_map * map,
- uint_t index) {
- struct shadow_region * reg = map->head;
- uint_t i = 0;
-
- while (reg) {
- if (i == index) {
- return reg;
+static inline
+struct v3_mem_region * __insert_mem_region(struct v3_vm_info * vm,
+ struct v3_mem_region * region) {
+ struct rb_node ** p = &(vm->mem_map.mem_regions.rb_node);
+ struct rb_node * parent = NULL;
+ struct v3_mem_region * tmp_region;
+
+ while (*p) {
+ parent = *p;
+ tmp_region = rb_entry(parent, struct v3_mem_region, tree_node);
+
+ if (region->guest_end <= tmp_region->guest_start) {
+ p = &(*p)->rb_left;
+ } else if (region->guest_start >= tmp_region->guest_end) {
+ p = &(*p)->rb_right;
+ } else {
+ if ((region->guest_end != tmp_region->guest_end) ||
+ (region->guest_start != tmp_region->guest_start)) {
+ PrintError("Trying to map a partial overlapped core specific page...\n");
+ return tmp_region; // This is ugly...
+ } else if (region->core_id == tmp_region->core_id) {
+ return tmp_region;
+ } else if (region->core_id < tmp_region->core_id) {
+ p = &(*p)->rb_left;
+ } else {
+ p = &(*p)->rb_right;
+ }
+ }
}
- reg = reg->next;
- i++;
- }
- return NULL;
-}
-
-struct shadow_region * get_shadow_region_by_addr(struct shadow_map * map,
- addr_t addr) {
- struct shadow_region * reg = map->head;
-
- while (reg) {
- if ((reg->guest_start <= addr) && (reg->guest_end > addr)) {
- return reg;
- } else if (reg->guest_start > addr) {
- return NULL;
- } else {
- reg = reg->next;
- }
- }
- return NULL;
+ rb_link_node(&(region->tree_node), parent, p);
+
+ return NULL;
}
-host_region_type_t get_shadow_addr_type(struct guest_info * info, addr_t guest_addr) {
- struct shadow_region * reg = get_shadow_region_by_addr(&(info->mem_map), guest_addr);
- if (!reg) {
- return HOST_REGION_INVALID;
- } else {
- return reg->host_type;
- }
-}
+int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) {
+ struct v3_mem_region * ret;
+ int i = 0;
-addr_t get_shadow_addr(struct guest_info * info, addr_t guest_addr) {
- struct shadow_region * reg = get_shadow_region_by_addr(&(info->mem_map), guest_addr);
+ if ((ret = __insert_mem_region(vm, region))) {
+ return -1;
+ }
+
+ v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.mem_regions));
+
+
+
+ for (i = 0; i < vm->num_cores; i++) {
+ struct guest_info * info = &(vm->cores[i]);
+
+ // flush virtual page tables
+ // 3 cases shadow, shadow passthrough, and nested
+
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
+
+ if (mem_mode == PHYSICAL_MEM) {
+ addr_t cur_addr;
+
+ for (cur_addr = region->guest_start;
+ cur_addr < region->guest_end;
+ cur_addr += PAGE_SIZE_4KB) {
+ v3_invalidate_passthrough_addr(info, cur_addr);
+ }
+ } else {
+ v3_invalidate_shadow_pts(info);
+ }
+
+ } else if (info->shdw_pg_mode == NESTED_PAGING) {
+ addr_t cur_addr;
+
+ for (cur_addr = region->guest_start;
+ cur_addr < region->guest_end;
+ cur_addr += PAGE_SIZE_4KB) {
+
+ v3_invalidate_nested_addr(info, cur_addr);
+ }
+ }
+ }
- if (!reg) {
return 0;
- } else {
- return (guest_addr - reg->guest_start) + reg->host_addr;
- }
}
-
-
-host_region_type_t lookup_shadow_map_addr(struct shadow_map * map, addr_t guest_addr, addr_t * host_addr) {
- struct shadow_region * reg = get_shadow_region_by_addr(map, guest_addr);
-
- if (!reg) {
- // No mapping exists
- return HOST_REGION_INVALID;
- } else {
- switch (reg->host_type) {
- case HOST_REGION_PHYSICAL_MEMORY:
- *host_addr = (guest_addr - reg->guest_start) + reg->host_addr;
- return reg->host_type;
- case HOST_REGION_MEMORY_MAPPED_DEVICE:
- case HOST_REGION_UNALLOCATED:
- // ...
- default:
- *host_addr = 0;
- return reg->host_type;
+
+
+
+
+struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
+ struct rb_node * n = vm->mem_map.mem_regions.rb_node;
+ struct v3_mem_region * reg = NULL;
+
+ while (n) {
+
+ reg = rb_entry(n, struct v3_mem_region, tree_node);
+
+ if (guest_addr < reg->guest_start) {
+ n = n->rb_left;
+ } else if (guest_addr >= reg->guest_end) {
+ n = n->rb_right;
+ } else {
+ if (reg->core_id == V3_MEM_CORE_ANY) {
+ // found relevant region, it's available on all cores
+ return reg;
+ } else if (core_id == reg->core_id) {
+ // found relevant region, it's available on the indicated core
+ return reg;
+ } else if (core_id < reg->core_id) {
+ // go left, core too big
+ n = n->rb_left;
+ } else if (core_id > reg->core_id) {
+ // go right, core too small
+ n = n->rb_right;
+ } else {
+ PrintDebug("v3_get_mem_region: Impossible!\n");
+ return NULL;
+ }
+ }
}
- }
-}
-void print_shadow_map(struct shadow_map * map) {
- struct shadow_region * cur = map->head;
- int i = 0;
+ // There is not registered region, so we check if its a valid address in the base region
- PrintDebug("Memory Layout (regions: %d) \n", map->num_regions);
+ if (guest_addr > vm->mem_map.base_region.guest_end) {
+ PrintError("Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p) (core=0x%x)\n",
+ (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end, core_id);
+ v3_print_mem_map(vm);
- while (cur) {
- PrintDebug("%d: 0x%p - 0x%p (%s) -> ", i,
- (void *)cur->guest_start, (void *)(cur->guest_end - 1),
- cur->guest_type == GUEST_REGION_PHYSICAL_MEMORY ? "GUEST_REGION_PHYSICAL_MEMORY" :
- cur->guest_type == GUEST_REGION_NOTHING ? "GUEST_REGION_NOTHING" :
- cur->guest_type == GUEST_REGION_MEMORY_MAPPED_DEVICE ? "GUEST_REGION_MEMORY_MAPPED_DEVICE" :
- "UNKNOWN");
- if (cur->host_type == HOST_REGION_PHYSICAL_MEMORY ||
- cur->host_type == HOST_REGION_UNALLOCATED ||
- cur->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) {
- PrintDebug("0x%p", (void *)(cur->host_addr));
+ return NULL;
}
- PrintDebug("(%s)\n",
- cur->host_type == HOST_REGION_PHYSICAL_MEMORY ? "HOST_REGION_PHYSICAL_MEMORY" :
- cur->host_type == HOST_REGION_UNALLOCATED ? "HOST_REGION_UNALLOACTED" :
- cur->host_type == HOST_REGION_HOOK ? "HOST_REGION_HOOK" :
- cur->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE ? "HOST_REGION_MEMORY_MAPPED_DEVICE" :
- cur->host_type == HOST_REGION_REMOTE ? "HOST_REGION_REMOTE" :
- cur->host_type == HOST_REGION_SWAPPED ? "HOST_REGION_SWAPPED" :
- "UNKNOWN");
- cur = cur->next;
- i++;
- }
-}
-
-
-
+ return &(vm->mem_map.base_region);
+}
+/* This returns the next memory region based on a given address.
+ * If the address falls inside a sub region, that region is returned.
+ * If the address falls outside a sub region, the next sub region is returned
+ * NOTE that we have to be careful about core_ids here...
+ */
+static struct v3_mem_region * get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
+ struct rb_node * n = vm->mem_map.mem_regions.rb_node;
+ struct v3_mem_region * reg = NULL;
+ struct v3_mem_region * parent = NULL;
+
+ while (n) {
+
+ reg = rb_entry(n, struct v3_mem_region, tree_node);
+
+ if (guest_addr < reg->guest_start) {
+ n = n->rb_left;
+ } else if (guest_addr >= reg->guest_end) {
+ n = n->rb_right;
+ } else {
+ if (reg->core_id == V3_MEM_CORE_ANY) {
+ // found relevant region, it's available on all cores
+ return reg;
+ } else if (core_id == reg->core_id) {
+ // found relevant region, it's available on the indicated core
+ return reg;
+ } else if (core_id < reg->core_id) {
+ // go left, core too big
+ n = n->rb_left;
+ } else if (core_id > reg->core_id) {
+ // go right, core too small
+ n = n->rb_right;
+ } else {
+ PrintError("v3_get_mem_region: Impossible!\n");
+ return NULL;
+ }
+ }
+
+ if ((reg->core_id == core_id) || (reg->core_id == V3_MEM_CORE_ANY)) {
+ parent = reg;
+ }
+ }
+ if (parent->guest_start > guest_addr) {
+ return parent;
+ } else if (parent->guest_end < guest_addr) {
+ struct rb_node * node = &(parent->tree_node);
-#ifdef VMM_MEM_TEST
+ while ((node = v3_rb_next(node)) != NULL) {
+ struct v3_mem_region * next_reg = rb_entry(node, struct v3_mem_region, tree_node);
+ if ((next_reg->core_id == V3_MEM_CORE_ANY) ||
+ (next_reg->core_id == core_id)) {
-#include <stdlib.h>
-#include <stdio.h>
-#include <stdarg.h>
+ // This check is not strictly necessary, but it makes it clearer
+ if (next_reg->guest_start > guest_addr) {
+ return next_reg;
+ }
+ }
+ }
+ }
+ return NULL;
+}
-struct vmm_os_hooks * os_hooks;
+/* Given an address region of memory, find if there are any regions that overlap with it.
+ * This checks that the range lies in a single region, and returns that region if it does,
+ * this can be either the base region or a sub region.
+ * IF there are multiple regions in the range then it returns NULL
+ */
+static struct v3_mem_region * get_overlapping_region(struct v3_vm_info * vm, uint16_t core_id,
+ addr_t start_gpa, addr_t end_gpa) {
+ struct v3_mem_region * start_region = v3_get_mem_region(vm, core_id, start_gpa);
-void * TestMalloc(uint_t size) {
- return malloc(size);
-}
+ if (start_region == NULL) {
+ PrintError("Invalid memory region\n");
+ return NULL;
+ }
-void * TestAllocatePages(int size) {
- return malloc(4096 * size);
-}
+ if (start_region->guest_end < end_gpa) {
+ // Region ends before range
+ return NULL;
+ } else if (start_region->flags.base == 0) {
+ // sub region overlaps range
+ return start_region;
+ } else {
+ // Base region, now we have to scan forward for the next sub region
+ struct v3_mem_region * next_reg = get_next_mem_region(vm, core_id, start_gpa);
+
+ if (next_reg == NULL) {
+ // no sub regions after start_addr, base region is ok
+ return start_region;
+ } else if (next_reg->guest_start >= end_gpa) {
+ // Next sub region begins outside range
+ return start_region;
+ } else {
+ return NULL;
+ }
+ }
-void TestPrint(const char * fmt, ...) {
- va_list args;
- va_start(args, fmt);
- vprintf(fmt, args);
- va_end(args);
+ // Should never get here
+ return NULL;
}
-int mem_list_add_test_1( vmm_mem_list_t * list) {
- uint_t offset = 0;
- PrintDebug("\n\nTesting Memory List\n");
- init_mem_list(list);
- offset = PAGE_SIZE * 6;
- PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 10));
- add_mem_list_pages(list, offset, 10);
- print_mem_list(list);
+void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) {
+ int i = 0;
+ if (reg == NULL) {
+ return;
+ }
- offset = 0;
- PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + PAGE_SIZE * 4);
- add_mem_list_pages(list, offset, 4);
- print_mem_list(list);
- offset = PAGE_SIZE * 20;
- PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 1));
- add_mem_list_pages(list, offset, 1);
- print_mem_list(list);
+ v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
- offset = PAGE_SIZE * 21;
- PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 3));
- add_mem_list_pages(list, offset, 3);
- print_mem_list(list);
- offset = PAGE_SIZE * 10;
- PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 30));
- add_mem_list_pages(list, offset, 30);
- print_mem_list(list);
+ // If the guest isn't running then there shouldn't be anything to invalidate.
+ // Page tables should __always__ be created on demand during execution
+ // NOTE: This is a sanity check, and can be removed if that assumption changes
+ if (vm->run_state != VM_RUNNING) {
+ V3_Free(reg);
+ return;
+ }
+ for (i = 0; i < vm->num_cores; i++) {
+ struct guest_info * info = &(vm->cores[i]);
+
+ // flush virtual page tables
+ // 3 cases shadow, shadow passthrough, and nested
+
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
+
+ if (mem_mode == PHYSICAL_MEM) {
+ addr_t cur_addr;
+
+ for (cur_addr = reg->guest_start;
+ cur_addr < reg->guest_end;
+ cur_addr += PAGE_SIZE_4KB) {
+ v3_invalidate_passthrough_addr(info, cur_addr);
+ }
+ } else {
+ v3_invalidate_shadow_pts(info);
+ }
+
+ } else if (info->shdw_pg_mode == NESTED_PAGING) {
+ addr_t cur_addr;
+
+ for (cur_addr = reg->guest_start;
+ cur_addr < reg->guest_end;
+ cur_addr += PAGE_SIZE_4KB) {
+
+ v3_invalidate_nested_addr(info, cur_addr);
+ }
+ }
+ }
- offset = PAGE_SIZE * 5;
- PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 1));
- add_mem_list_pages(list, offset, 1);
- print_mem_list(list);
+ V3_Free(reg);
-
+ // flush virtual page tables
+ // 3 cases shadow, shadow passthrough, and nested
- return 0;
}
+// Determine if a given address can be handled by a large page of the requested size
+uint32_t v3_get_max_page_size(struct guest_info * core, addr_t page_addr, v3_cpu_mode_t mode) {
+ addr_t pg_start = 0;
+ addr_t pg_end = 0;
+ uint32_t page_size = PAGE_SIZE_4KB;
+ struct v3_mem_region * reg = NULL;
+
+ switch (mode) {
+ case PROTECTED:
+ if (core->use_large_pages == 1) {
+ pg_start = PAGE_ADDR_4MB(page_addr);
+ pg_end = (pg_start + PAGE_SIZE_4MB);
+
+ reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+
+ if ((reg) && ((reg->host_addr % PAGE_SIZE_4MB) == 0)) {
+ page_size = PAGE_SIZE_4MB;
+ }
+ }
+ break;
+ case PROTECTED_PAE:
+ if (core->use_large_pages == 1) {
+ pg_start = PAGE_ADDR_2MB(page_addr);
+ pg_end = (pg_start + PAGE_SIZE_2MB);
+
+ reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+
+ if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
+ page_size = PAGE_SIZE_2MB;
+ }
+ }
+ break;
+ case LONG:
+ case LONG_32_COMPAT:
+ case LONG_16_COMPAT:
+ if (core->use_giant_pages == 1) {
+ pg_start = PAGE_ADDR_1GB(page_addr);
+ pg_end = (pg_start + PAGE_SIZE_1GB);
+
+ reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+
+ if ((reg) && ((reg->host_addr % PAGE_SIZE_1GB) == 0)) {
+ page_size = PAGE_SIZE_1GB;
+ break;
+ }
+ }
+
+ if (core->use_large_pages == 1) {
+ pg_start = PAGE_ADDR_2MB(page_addr);
+ pg_end = (pg_start + PAGE_SIZE_2MB);
+
+ reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+
+ if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
+ page_size = PAGE_SIZE_2MB;
+ }
+ }
+ break;
+ default:
+ PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
+ return -1;
+ }
-int mem_layout_add_test_1(vmm_mem_layout_t * layout) {
-
-
- uint_t start = 0;
- uint_t end = 0;
-
- PrintDebug("\n\nTesting Memory Layout\n");
-
- init_mem_layout(layout);
-
- start = 0x6000;
- end = 0x10000;;
- PrintDebug("Adding 0x%x - 0x%x\n", start, end);
- add_guest_mem_range(layout, start, end);
- print_mem_layout(layout);
-
-
- start = 0x1000;
- end = 0x3000;
- PrintDebug("Adding 0x%x - 0x%x\n", start, end);
- add_guest_mem_range(layout, start, end);
- print_mem_layout(layout);
-
- start = 0x2000;
- end = 0x6000;
- PrintDebug("Adding 0x%x - 0x%x\n", start, end);
- add_guest_mem_range(layout, start, end);
- print_mem_layout(layout);
-
- start = 0x4000;
- end = 0x5000;
- PrintDebug("Adding 0x%x - 0x%x\n", start, end);
- add_guest_mem_range(layout, start, end);
- print_mem_layout(layout);
-
-
- start = 0x5000;
- end = 0x7000;
- PrintDebug("Adding 0x%x - 0x%x\n", start, end);
- add_guest_mem_range(layout, start, end);
- print_mem_layout(layout);
-
-
-
-
- return 0;
+ return page_size;
}
-int main(int argc, char ** argv) {
- struct vmm_os_hooks dummy_hooks;
- os_hooks = &dummy_hooks;
+void v3_print_mem_map(struct v3_vm_info * vm) {
+ struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
+ struct v3_mem_region * reg = &(vm->mem_map.base_region);
+ int i = 0;
- vmm_mem_layout_t layout;
- vmm_mem_list_t list;
+ V3_Print("Memory Layout (all cores):\n");
+
- os_hooks->malloc = &TestMalloc;
- os_hooks->free = &free;
- os_hooks->print_debug = &TestPrint;
- os_hooks->allocate_pages = &TestAllocatePages;
+ V3_Print("Base Region (all cores): 0x%p - 0x%p -> 0x%p\n",
+ (void *)(reg->guest_start),
+ (void *)(reg->guest_end - 1),
+ (void *)(reg->host_addr));
+
+ // If the memory map is empty, don't print it
+ if (node == NULL) {
+ return;
+ }
+ do {
+ reg = rb_entry(node, struct v3_mem_region, tree_node);
- printf("mem_list_add_test_1: %d\n", mem_list_add_test_1(&list));
- printf("layout_add_test_1: %d\n", mem_layout_add_test_1(&layout));
+ V3_Print("%d: 0x%p - 0x%p -> 0x%p\n", i,
+ (void *)(reg->guest_start),
+ (void *)(reg->guest_end - 1),
+ (void *)(reg->host_addr));
- return 0;
+ V3_Print("\t(flags=0x%x) (core=0x%x) (unhandled = 0x%p)\n",
+ reg->flags.value,
+ reg->core_id,
+ reg->unhandled);
+
+ i++;
+ } while ((node = v3_rb_next(node)));
}
-#endif
-
-
-
-
-