2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_mem.h>
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_util.h>
23 #include <palacios/vmm_emulator.h>
24 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_shadow_paging.h>
27 #include <palacios/vmm_direct_paging.h>
32 static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
33 PrintDebug("V3Vee: Memory offset hypercall (offset=%p)\n",
34 (void *)(info->vm_info->mem_map.base_region.host_addr));
36 info->vm_regs.rbx = info->vm_info->mem_map.base_region.host_addr;
41 static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa,
42 struct v3_mem_region * reg, pf_error_t access_info) {
44 PrintError("Unhandled memory access error\n");
46 v3_print_mem_map(core->vm_info);
48 v3_print_guest_state(core);
54 static inline uint32_t get_alignment(char * align_str) {
55 if (align_str != NULL) {
56 if (strncasecmp(align_str, "2MB", strlen("2MB")) == 0) {
58 } else if (strncasecmp(align_str, "4MB", strlen("4MB")) == 0) {
63 // default is 4KB alignment
67 int v3_init_mem_map(struct v3_vm_info * vm) {
68 struct v3_mem_map * map = &(vm->mem_map);
69 v3_cfg_tree_t * pg_cfg = v3_cfg_subtree(vm->cfg_data->cfg, "memory");
70 uint32_t alignment = get_alignment(v3_cfg_val(pg_cfg, "alignment"));
71 addr_t mem_pages = vm->mem_size >> 12;
73 memset(&(map->base_region), 0, sizeof(struct v3_mem_region));
75 map->mem_regions.rb_node = NULL;
78 // There is an underlying region that contains all of the guest memory
79 // PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
81 map->base_region.guest_start = 0;
82 map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
84 #ifdef ALIGNED_PG_ALLOC
85 map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, alignment);
87 if (alignment != PAGE_SIZE_4KB) {
88 PrintError("Aligned page allocations are not supported in this host (requested alignment=%d)\n", alignment);
89 PrintError("Ignoring alignment request\n");
91 map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
94 map->base_region.flags.read = 1;
95 map->base_region.flags.write = 1;
96 map->base_region.flags.exec = 1;
97 map->base_region.flags.base = 1;
98 map->base_region.flags.alloced = 1;
100 map->base_region.unhandled = unhandled_err;
102 if ((void *)map->base_region.host_addr == NULL) {
103 PrintError("Could not allocate Guest memory\n");
107 //memset(V3_VAddr((void *)map->base_region.host_addr), 0xffffffff, map->base_region.guest_end);
109 v3_register_hypercall(vm, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
115 void v3_delete_mem_map(struct v3_vm_info * vm) {
116 struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
117 struct v3_mem_region * reg;
118 struct rb_node * tmp_node = NULL;
121 reg = rb_entry(node, struct v3_mem_region, tree_node);
123 node = v3_rb_next(node);
125 v3_delete_mem_region(vm, reg);
128 V3_FreePage((void *)(vm->mem_map.base_region.host_addr));
132 struct v3_mem_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
133 addr_t guest_addr_start, addr_t guest_addr_end) {
135 struct v3_mem_region * entry = (struct v3_mem_region *)V3_Malloc(sizeof(struct v3_mem_region));
136 memset(entry, 0, sizeof(struct v3_mem_region));
138 entry->guest_start = guest_addr_start;
139 entry->guest_end = guest_addr_end;
140 entry->core_id = core_id;
141 entry->unhandled = unhandled_err;
149 int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id,
150 addr_t guest_addr_start,
151 addr_t guest_addr_end,
154 struct v3_mem_region * entry = NULL;
156 entry = v3_create_mem_region(vm, core_id,
160 entry->host_addr = host_addr;
163 entry->flags.read = 1;
164 entry->flags.write = 1;
165 entry->flags.exec = 1;
166 entry->flags.alloced = 1;
168 if (v3_insert_mem_region(vm, entry) == -1) {
179 struct v3_mem_region * __insert_mem_region(struct v3_vm_info * vm,
180 struct v3_mem_region * region) {
181 struct rb_node ** p = &(vm->mem_map.mem_regions.rb_node);
182 struct rb_node * parent = NULL;
183 struct v3_mem_region * tmp_region;
187 tmp_region = rb_entry(parent, struct v3_mem_region, tree_node);
189 if (region->guest_end <= tmp_region->guest_start) {
191 } else if (region->guest_start >= tmp_region->guest_end) {
194 if ((region->guest_end != tmp_region->guest_end) ||
195 (region->guest_start != tmp_region->guest_start)) {
196 PrintError("Trying to map a partial overlapped core specific page...\n");
197 return tmp_region; // This is ugly...
198 } else if (region->core_id == tmp_region->core_id) {
200 } else if (region->core_id < tmp_region->core_id) {
208 rb_link_node(&(region->tree_node), parent, p);
215 int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) {
216 struct v3_mem_region * ret;
219 if ((ret = __insert_mem_region(vm, region))) {
223 v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.mem_regions));
227 for (i = 0; i < vm->num_cores; i++) {
228 struct guest_info * info = &(vm->cores[i]);
230 // flush virtual page tables
231 // 3 cases shadow, shadow passthrough, and nested
233 if (info->shdw_pg_mode == SHADOW_PAGING) {
234 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
236 if (mem_mode == PHYSICAL_MEM) {
239 for (cur_addr = region->guest_start;
240 cur_addr < region->guest_end;
241 cur_addr += PAGE_SIZE_4KB) {
242 v3_invalidate_passthrough_addr(info, cur_addr);
245 v3_invalidate_shadow_pts(info);
248 } else if (info->shdw_pg_mode == NESTED_PAGING) {
251 for (cur_addr = region->guest_start;
252 cur_addr < region->guest_end;
253 cur_addr += PAGE_SIZE_4KB) {
255 v3_invalidate_nested_addr(info, cur_addr);
266 struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
267 struct rb_node * n = vm->mem_map.mem_regions.rb_node;
268 struct v3_mem_region * reg = NULL;
272 reg = rb_entry(n, struct v3_mem_region, tree_node);
274 if (guest_addr < reg->guest_start) {
276 } else if (guest_addr >= reg->guest_end) {
279 if (reg->core_id == V3_MEM_CORE_ANY) {
280 // found relevant region, it's available on all cores
282 } else if (core_id == reg->core_id) {
283 // found relevant region, it's available on the indicated core
285 } else if (core_id < reg->core_id) {
286 // go left, core too big
288 } else if (core_id > reg->core_id) {
289 // go right, core too small
292 PrintDebug("v3_get_mem_region: Impossible!\n");
299 // There is not registered region, so we check if its a valid address in the base region
301 if (guest_addr > vm->mem_map.base_region.guest_end) {
302 PrintError("Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p) (core=0x%x)\n",
303 (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end, core_id);
304 v3_print_mem_map(vm);
309 return &(vm->mem_map.base_region);
314 /* Search the "hooked" memory regions for a region that ends after the given address. If the
315 * address is invalid, return NULL. Else, return the first region found or the base region if no
316 * region ends after the given address.
318 struct v3_mem_region * v3_get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
319 struct rb_node * n = vm->mem_map.mem_regions.rb_node;
320 struct v3_mem_region * reg = NULL;
322 // Keep going to the right in the tree while the address is greater than the current region's
325 reg = rb_entry(n, struct v3_mem_region, tree_node);
326 if (guest_addr >= reg->guest_end) { // reg is [start,end)
329 if ((core_id == reg->core_id) || (reg->core_id == V3_MEM_CORE_ANY)) {
337 // There is no registered region, so we check if it's a valid address in the base region
339 if (guest_addr >= vm->mem_map.base_region.guest_end) {
340 PrintError("%s: Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
341 __FUNCTION__, (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
342 v3_print_mem_map(vm);
346 return &(vm->mem_map.base_region);
352 void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) {
359 for (i = 0; i < vm->num_cores; i++) {
360 struct guest_info * info = &(vm->cores[i]);
362 // flush virtual page tables
363 // 3 cases shadow, shadow passthrough, and nested
365 if (info->shdw_pg_mode == SHADOW_PAGING) {
366 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
368 if (mem_mode == PHYSICAL_MEM) {
371 for (cur_addr = reg->guest_start;
372 cur_addr < reg->guest_end;
373 cur_addr += PAGE_SIZE_4KB) {
374 v3_invalidate_passthrough_addr(info, cur_addr);
377 v3_invalidate_shadow_pts(info);
380 } else if (info->shdw_pg_mode == NESTED_PAGING) {
383 for (cur_addr = reg->guest_start;
384 cur_addr < reg->guest_end;
385 cur_addr += PAGE_SIZE_4KB) {
387 v3_invalidate_nested_addr(info, cur_addr);
392 v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
396 // flush virtual page tables
397 // 3 cases shadow, shadow passthrough, and nested
403 void v3_print_mem_map(struct v3_vm_info * vm) {
404 struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
405 struct v3_mem_region * reg = &(vm->mem_map.base_region);
408 V3_Print("Memory Layout (all cores):\n");
411 V3_Print("Base Region (all cores): 0x%p - 0x%p -> 0x%p\n",
412 (void *)(reg->guest_start),
413 (void *)(reg->guest_end - 1),
414 (void *)(reg->host_addr));
417 // If the memory map is empty, don't print it
423 reg = rb_entry(node, struct v3_mem_region, tree_node);
425 V3_Print("%d: 0x%p - 0x%p -> 0x%p\n", i,
426 (void *)(reg->guest_start),
427 (void *)(reg->guest_end - 1),
428 (void *)(reg->host_addr));
430 V3_Print("\t(flags=0x%x) (core=0x%x) (unhandled = 0x%p)\n",
436 } while ((node = v3_rb_next(node)));