2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_mem.h>
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_util.h>
23 #include <palacios/vmm_emulator.h>
24 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_shadow_paging.h>
27 #include <palacios/vmm_direct_paging.h>
32 static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
33 PrintDebug("V3Vee: Memory offset hypercall (offset=%p)\n",
34 (void *)(info->vm_info->mem_map.base_region.host_addr));
36 info->vm_regs.rbx = info->vm_info->mem_map.base_region.host_addr;
41 static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa,
42 struct v3_mem_region * reg, pf_error_t access_info) {
44 PrintError("Unhandled memory access error\n");
46 v3_print_mem_map(core->vm_info);
48 v3_print_guest_state(core);
53 int v3_init_mem_map(struct v3_vm_info * vm) {
54 struct v3_mem_map * map = &(vm->mem_map);
55 addr_t mem_pages = vm->mem_size >> 12;
57 memset(&(map->base_region), 0, sizeof(struct v3_mem_region));
59 map->mem_regions.rb_node = NULL;
61 // There is an underlying region that contains all of the guest memory
62 // PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
64 map->base_region.guest_start = 0;
65 map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
67 #ifdef CONFIG_ALIGNED_PG_ALLOC
68 map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, vm->mem_align);
70 map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
73 map->base_region.flags.read = 1;
74 map->base_region.flags.write = 1;
75 map->base_region.flags.exec = 1;
76 map->base_region.flags.base = 1;
77 map->base_region.flags.alloced = 1;
79 map->base_region.unhandled = unhandled_err;
81 if ((void *)map->base_region.host_addr == NULL) {
82 PrintError("Could not allocate Guest memory\n");
86 //memset(V3_VAddr((void *)map->base_region.host_addr), 0xffffffff, map->base_region.guest_end);
88 v3_register_hypercall(vm, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
94 void v3_delete_mem_map(struct v3_vm_info * vm) {
95 struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
96 struct v3_mem_region * reg;
97 struct rb_node * tmp_node = NULL;
100 reg = rb_entry(node, struct v3_mem_region, tree_node);
102 node = v3_rb_next(node);
104 v3_delete_mem_region(vm, reg);
107 V3_FreePage((void *)(vm->mem_map.base_region.host_addr));
111 struct v3_mem_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
112 addr_t guest_addr_start, addr_t guest_addr_end) {
114 struct v3_mem_region * entry = (struct v3_mem_region *)V3_Malloc(sizeof(struct v3_mem_region));
115 memset(entry, 0, sizeof(struct v3_mem_region));
117 entry->guest_start = guest_addr_start;
118 entry->guest_end = guest_addr_end;
119 entry->core_id = core_id;
120 entry->unhandled = unhandled_err;
128 int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id,
129 addr_t guest_addr_start,
130 addr_t guest_addr_end,
133 struct v3_mem_region * entry = NULL;
135 entry = v3_create_mem_region(vm, core_id,
139 entry->host_addr = host_addr;
142 entry->flags.read = 1;
143 entry->flags.write = 1;
144 entry->flags.exec = 1;
145 entry->flags.alloced = 1;
147 if (v3_insert_mem_region(vm, entry) == -1) {
158 struct v3_mem_region * __insert_mem_region(struct v3_vm_info * vm,
159 struct v3_mem_region * region) {
160 struct rb_node ** p = &(vm->mem_map.mem_regions.rb_node);
161 struct rb_node * parent = NULL;
162 struct v3_mem_region * tmp_region;
166 tmp_region = rb_entry(parent, struct v3_mem_region, tree_node);
168 if (region->guest_end <= tmp_region->guest_start) {
170 } else if (region->guest_start >= tmp_region->guest_end) {
173 if ((region->guest_end != tmp_region->guest_end) ||
174 (region->guest_start != tmp_region->guest_start)) {
175 PrintError("Trying to map a partial overlapped core specific page...\n");
176 return tmp_region; // This is ugly...
177 } else if (region->core_id == tmp_region->core_id) {
179 } else if (region->core_id < tmp_region->core_id) {
187 rb_link_node(&(region->tree_node), parent, p);
194 int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) {
195 struct v3_mem_region * ret;
198 if ((ret = __insert_mem_region(vm, region))) {
202 v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.mem_regions));
206 for (i = 0; i < vm->num_cores; i++) {
207 struct guest_info * info = &(vm->cores[i]);
209 // flush virtual page tables
210 // 3 cases shadow, shadow passthrough, and nested
212 if (info->shdw_pg_mode == SHADOW_PAGING) {
213 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
215 if (mem_mode == PHYSICAL_MEM) {
218 for (cur_addr = region->guest_start;
219 cur_addr < region->guest_end;
220 cur_addr += PAGE_SIZE_4KB) {
221 v3_invalidate_passthrough_addr(info, cur_addr);
224 v3_invalidate_shadow_pts(info);
227 } else if (info->shdw_pg_mode == NESTED_PAGING) {
230 for (cur_addr = region->guest_start;
231 cur_addr < region->guest_end;
232 cur_addr += PAGE_SIZE_4KB) {
234 v3_invalidate_nested_addr(info, cur_addr);
245 struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
246 struct rb_node * n = vm->mem_map.mem_regions.rb_node;
247 struct v3_mem_region * reg = NULL;
251 reg = rb_entry(n, struct v3_mem_region, tree_node);
253 if (guest_addr < reg->guest_start) {
255 } else if (guest_addr >= reg->guest_end) {
258 if (reg->core_id == V3_MEM_CORE_ANY) {
259 // found relevant region, it's available on all cores
261 } else if (core_id == reg->core_id) {
262 // found relevant region, it's available on the indicated core
264 } else if (core_id < reg->core_id) {
265 // go left, core too big
267 } else if (core_id > reg->core_id) {
268 // go right, core too small
271 PrintDebug("v3_get_mem_region: Impossible!\n");
278 // There is not registered region, so we check if its a valid address in the base region
280 if (guest_addr > vm->mem_map.base_region.guest_end) {
281 PrintError("Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p) (core=0x%x)\n",
282 (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end, core_id);
283 v3_print_mem_map(vm);
288 return &(vm->mem_map.base_region);
293 /* Search the "hooked" memory regions for a region that ends after the given address. If the
294 * address is invalid, return NULL. Else, return the first region found or the base region if no
295 * region ends after the given address.
297 struct v3_mem_region * v3_get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
298 struct rb_node * n = vm->mem_map.mem_regions.rb_node;
299 struct v3_mem_region * reg = NULL;
301 // Keep going to the right in the tree while the address is greater than the current region's
304 reg = rb_entry(n, struct v3_mem_region, tree_node);
305 if (guest_addr >= reg->guest_end) { // reg is [start,end)
308 if ((core_id == reg->core_id) || (reg->core_id == V3_MEM_CORE_ANY)) {
316 // There is no registered region, so we check if it's a valid address in the base region
318 if (guest_addr >= vm->mem_map.base_region.guest_end) {
319 PrintError("%s: Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
320 __FUNCTION__, (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
321 v3_print_mem_map(vm);
325 return &(vm->mem_map.base_region);
331 void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) {
338 for (i = 0; i < vm->num_cores; i++) {
339 struct guest_info * info = &(vm->cores[i]);
341 // flush virtual page tables
342 // 3 cases shadow, shadow passthrough, and nested
344 if (info->shdw_pg_mode == SHADOW_PAGING) {
345 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
347 if (mem_mode == PHYSICAL_MEM) {
350 for (cur_addr = reg->guest_start;
351 cur_addr < reg->guest_end;
352 cur_addr += PAGE_SIZE_4KB) {
353 v3_invalidate_passthrough_addr(info, cur_addr);
356 v3_invalidate_shadow_pts(info);
359 } else if (info->shdw_pg_mode == NESTED_PAGING) {
362 for (cur_addr = reg->guest_start;
363 cur_addr < reg->guest_end;
364 cur_addr += PAGE_SIZE_4KB) {
366 v3_invalidate_nested_addr(info, cur_addr);
371 v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
375 // flush virtual page tables
376 // 3 cases shadow, shadow passthrough, and nested
382 void v3_print_mem_map(struct v3_vm_info * vm) {
383 struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
384 struct v3_mem_region * reg = &(vm->mem_map.base_region);
387 V3_Print("Memory Layout (all cores):\n");
390 V3_Print("Base Region (all cores): 0x%p - 0x%p -> 0x%p\n",
391 (void *)(reg->guest_start),
392 (void *)(reg->guest_end - 1),
393 (void *)(reg->host_addr));
396 // If the memory map is empty, don't print it
402 reg = rb_entry(node, struct v3_mem_region, tree_node);
404 V3_Print("%d: 0x%p - 0x%p -> 0x%p\n", i,
405 (void *)(reg->guest_start),
406 (void *)(reg->guest_end - 1),
407 (void *)(reg->host_addr));
409 V3_Print("\t(flags=0x%x) (core=0x%x) (unhandled = 0x%p)\n",
415 } while ((node = v3_rb_next(node)));