2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_mem.h>
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_util.h>
23 #include <palacios/vmm_emulator.h>
24 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_shadow_paging.h>
27 #include <palacios/vmm_direct_paging.h>
32 static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
33 PrintDebug("V3Vee: Memory offset hypercall (offset=%p)\n",
34 (void *)(info->vm_info->mem_map.base_region.host_addr));
36 info->vm_regs.rbx = info->vm_info->mem_map.base_region.host_addr;
41 static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa,
42 struct v3_mem_region * reg, pf_error_t access_info) {
44 PrintError("Unhandled memory access error\n");
46 v3_print_mem_map(core->vm_info);
48 v3_print_guest_state(core);
53 int v3_init_mem_map(struct v3_vm_info * vm) {
54 struct v3_mem_map * map = &(vm->mem_map);
55 addr_t mem_pages = vm->mem_size >> 12;
57 memset(&(map->base_region), 0, sizeof(struct v3_mem_region));
59 map->mem_regions.rb_node = NULL;
61 // There is an underlying region that contains all of the guest memory
62 // PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
64 // 2MB page alignment needed for 2MB hardware nested paging
65 map->base_region.guest_start = 0;
66 map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
68 #ifdef CONFIG_ALIGNED_PG_ALLOC
69 map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, vm->mem_align);
71 map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
74 map->base_region.flags.read = 1;
75 map->base_region.flags.write = 1;
76 map->base_region.flags.exec = 1;
77 map->base_region.flags.base = 1;
78 map->base_region.flags.alloced = 1;
80 map->base_region.unhandled = unhandled_err;
82 if ((void *)map->base_region.host_addr == NULL) {
83 PrintError("Could not allocate Guest memory\n");
87 //memset(V3_VAddr((void *)map->base_region.host_addr), 0xffffffff, map->base_region.guest_end);
89 v3_register_hypercall(vm, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
95 void v3_delete_mem_map(struct v3_vm_info * vm) {
96 struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
97 struct v3_mem_region * reg;
98 struct rb_node * tmp_node = NULL;
101 reg = rb_entry(node, struct v3_mem_region, tree_node);
103 node = v3_rb_next(node);
105 v3_delete_mem_region(vm, reg);
108 V3_FreePage((void *)(vm->mem_map.base_region.host_addr));
112 struct v3_mem_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
113 addr_t guest_addr_start, addr_t guest_addr_end) {
115 struct v3_mem_region * entry = (struct v3_mem_region *)V3_Malloc(sizeof(struct v3_mem_region));
116 memset(entry, 0, sizeof(struct v3_mem_region));
118 entry->guest_start = guest_addr_start;
119 entry->guest_end = guest_addr_end;
120 entry->core_id = core_id;
121 entry->unhandled = unhandled_err;
129 int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id,
130 addr_t guest_addr_start,
131 addr_t guest_addr_end,
134 struct v3_mem_region * entry = NULL;
136 entry = v3_create_mem_region(vm, core_id,
140 entry->host_addr = host_addr;
143 entry->flags.read = 1;
144 entry->flags.write = 1;
145 entry->flags.exec = 1;
146 entry->flags.alloced = 1;
148 if (v3_insert_mem_region(vm, entry) == -1) {
159 struct v3_mem_region * __insert_mem_region(struct v3_vm_info * vm,
160 struct v3_mem_region * region) {
161 struct rb_node ** p = &(vm->mem_map.mem_regions.rb_node);
162 struct rb_node * parent = NULL;
163 struct v3_mem_region * tmp_region;
167 tmp_region = rb_entry(parent, struct v3_mem_region, tree_node);
169 if (region->guest_end <= tmp_region->guest_start) {
171 } else if (region->guest_start >= tmp_region->guest_end) {
174 if ((region->guest_end != tmp_region->guest_end) ||
175 (region->guest_start != tmp_region->guest_start)) {
176 PrintError("Trying to map a partial overlapped core specific page...\n");
177 return tmp_region; // This is ugly...
178 } else if (region->core_id == tmp_region->core_id) {
180 } else if (region->core_id < tmp_region->core_id) {
188 rb_link_node(&(region->tree_node), parent, p);
195 int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) {
196 struct v3_mem_region * ret;
199 if ((ret = __insert_mem_region(vm, region))) {
203 v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.mem_regions));
207 for (i = 0; i < vm->num_cores; i++) {
208 struct guest_info * info = &(vm->cores[i]);
210 // flush virtual page tables
211 // 3 cases shadow, shadow passthrough, and nested
213 if (info->shdw_pg_mode == SHADOW_PAGING) {
214 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
216 if (mem_mode == PHYSICAL_MEM) {
219 for (cur_addr = region->guest_start;
220 cur_addr < region->guest_end;
221 cur_addr += PAGE_SIZE_4KB) {
222 v3_invalidate_passthrough_addr(info, cur_addr);
225 v3_invalidate_shadow_pts(info);
228 } else if (info->shdw_pg_mode == NESTED_PAGING) {
231 for (cur_addr = region->guest_start;
232 cur_addr < region->guest_end;
233 cur_addr += PAGE_SIZE_4KB) {
235 v3_invalidate_nested_addr(info, cur_addr);
246 struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
247 struct rb_node * n = vm->mem_map.mem_regions.rb_node;
248 struct v3_mem_region * reg = NULL;
252 reg = rb_entry(n, struct v3_mem_region, tree_node);
254 if (guest_addr < reg->guest_start) {
256 } else if (guest_addr >= reg->guest_end) {
259 if (reg->core_id == V3_MEM_CORE_ANY) {
260 // found relevant region, it's available on all cores
262 } else if (core_id == reg->core_id) {
263 // found relevant region, it's available on the indicated core
265 } else if (core_id < reg->core_id) {
266 // go left, core too big
268 } else if (core_id > reg->core_id) {
269 // go right, core too small
272 PrintDebug("v3_get_mem_region: Impossible!\n");
279 // There is not registered region, so we check if its a valid address in the base region
281 if (guest_addr > vm->mem_map.base_region.guest_end) {
282 PrintError("Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p) (core=0x%x)\n",
283 (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end, core_id);
284 v3_print_mem_map(vm);
289 return &(vm->mem_map.base_region);
294 /* Given an address, find the successor region. If the address is within a region, return that
295 * region. Input is an address, because the address may not have a region associated with it.
297 * Returns a region following or touching the given address. If address is invalid, NULL is
298 * returned, else the base region is returned if no region exists at or after the given address.
300 struct v3_mem_region * v3_get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
301 struct rb_node * current_n = vm->mem_map.mem_regions.rb_node;
302 struct rb_node * successor_n = NULL; /* left-most node greater than guest_addr */
303 struct v3_mem_region * current_r = NULL;
305 /* current_n tries to find the region containing guest_addr, going right when smaller and left when
306 * greater. Each time current_n becomes greater than guest_addr, update successor <- current_n.
307 * current_n becomes successively closer to guest_addr than the previous time it was greater
311 /* | is address, ---- is region, + is intersection */
313 current_r = rb_entry(current_n, struct v3_mem_region, tree_node);
314 if (current_r->guest_start > guest_addr) { /* | ---- */
315 successor_n = current_n;
316 current_n = current_n->rb_left;
318 if (current_r->guest_end > guest_addr) {
319 return current_r; /* +--- or --+- */
321 current_n = current_n->rb_right; /* ---- | */
325 /* Address does not have its own region. Check if it's a valid address in the base region */
327 if (guest_addr >= vm->mem_map.base_region.guest_end) {
328 PrintError("%s: Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
329 __FUNCTION__, (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
330 v3_print_mem_map(vm);
334 return &(vm->mem_map.base_region);
340 void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) {
347 for (i = 0; i < vm->num_cores; i++) {
348 struct guest_info * info = &(vm->cores[i]);
350 // flush virtual page tables
351 // 3 cases shadow, shadow passthrough, and nested
353 if (info->shdw_pg_mode == SHADOW_PAGING) {
354 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
356 if (mem_mode == PHYSICAL_MEM) {
359 for (cur_addr = reg->guest_start;
360 cur_addr < reg->guest_end;
361 cur_addr += PAGE_SIZE_4KB) {
362 v3_invalidate_passthrough_addr(info, cur_addr);
365 v3_invalidate_shadow_pts(info);
368 } else if (info->shdw_pg_mode == NESTED_PAGING) {
371 for (cur_addr = reg->guest_start;
372 cur_addr < reg->guest_end;
373 cur_addr += PAGE_SIZE_4KB) {
375 v3_invalidate_nested_addr(info, cur_addr);
380 v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
384 // flush virtual page tables
385 // 3 cases shadow, shadow passthrough, and nested
389 // Determine if a given address can be handled by a large page of the requested size
390 uint32_t v3_get_max_page_size(struct guest_info * core, addr_t fault_addr, uint32_t req_size) {
391 addr_t pg_start = 0UL, pg_end = 0UL; // large page containing the faulting addres
392 struct v3_mem_region * pg_next_reg = NULL; // next immediate mem reg after page start addr
393 uint32_t page_size = PAGE_SIZE_4KB;
395 /* If the guest has been configured for large pages, then we must check for hooked regions of
396 * memory which may overlap with the large page containing the faulting address (due to
397 * potentially differing access policies in place for e.g. i/o devices and APIC). A large page
398 * can be used if a) no region overlaps the page [or b) a region does overlap but fully contains
399 * the page]. The [bracketed] text pertains to the #if 0'd code below, state D. TODO modify this
400 * note if someone decides to enable this optimization. It can be tested with the SeaStar
403 * Examples: (CAPS regions are returned by v3_get_next_mem_region; state A returns the base reg)
405 * |region| |region| 2MiB mapped (state A)
406 * |reg| |REG| 2MiB mapped (state B)
407 * |region| |reg| |REG| |region| |reg| 4KiB mapped (state C)
408 * |reg| |reg| |--REGION---| [2MiB mapped (state D)]
409 * |--------------------------------------------| RAM
411 * |----|----|----|----|----|page|----|----|----| 2MB pages
412 * >>>>>>>>>>>>>>>>>>>> search space
416 // guest page maps to a host page + offset (so when we shift, it aligns with a host page)
419 return PAGE_SIZE_4KB;
421 pg_start = PAGE_ADDR_2MB(fault_addr);
422 pg_end = (pg_start + PAGE_SIZE_2MB);
425 pg_start = PAGE_ADDR_4MB(fault_addr);
426 pg_end = (pg_start + PAGE_SIZE_4MB);
429 pg_start = PAGE_ADDR_1GB(fault_addr);
430 pg_end = (pg_start + PAGE_SIZE_1GB);
433 PrintError("Invalid large page size requested.\n");
437 //PrintDebug("%s: page [%p,%p) contains address\n", __FUNCTION__, (void *)pg_start, (void *)pg_end);
439 pg_next_reg = v3_get_next_mem_region(core->vm_info, core->cpu_id, pg_start);
441 if (pg_next_reg == NULL) {
442 PrintError("%s: Error: address not in base region, %p\n", __FUNCTION__, (void *)fault_addr);
443 return PAGE_SIZE_4KB;
446 if (pg_next_reg->flags.base == 1) {
447 page_size = req_size; // State A
448 //PrintDebug("%s: base region [%p,%p) contains page.\n", __FUNCTION__,
449 // (void *)pg_next_reg->guest_start, (void *)pg_next_reg->guest_end);
451 #if 0 // State B/C and D optimization
452 if ((pg_next_reg->guest_end >= pg_end) &&
453 ((pg_next_reg->guest_start >= pg_end) || (pg_next_reg->guest_start <= pg_start))) {
454 page_size = req_size;
457 PrintDebug("%s: region [%p,%p) %s partially overlap with page\n", __FUNCTION__,
458 (void *)pg_next_reg->guest_start, (void *)pg_next_reg->guest_end,
459 (page_size == req_size) ? "does not" : "does");
462 if (pg_next_reg->guest_start >= pg_end) {
464 page_size = req_size;
467 PrintDebug("%s: region [%p,%p) %s overlap with page\n", __FUNCTION__,
468 (void *)pg_next_reg->guest_start, (void *)pg_next_reg->guest_end,
469 (page_size == req_size) ? "does not" : "does");
477 // For an address on a page of size page_size, compute the actual alignment
478 // of the physical page it maps to
479 uint32_t v3_compute_page_alignment(addr_t page_addr)
481 if (PAGE_OFFSET_1GB(page_addr) == 0) {
482 return PAGE_SIZE_1GB;
483 } else if (PAGE_OFFSET_4MB(page_addr) == 0) {
484 return PAGE_SIZE_4MB;
485 } else if (PAGE_OFFSET_2MB(page_addr) == 0) {
486 return PAGE_SIZE_2MB;
487 } else if (PAGE_OFFSET_4KB(page_addr) == 0) {
488 return PAGE_SIZE_4KB;
490 PrintError("Non-page aligned address passed to %s.\n", __FUNCTION__);
495 void v3_print_mem_map(struct v3_vm_info * vm) {
496 struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
497 struct v3_mem_region * reg = &(vm->mem_map.base_region);
500 V3_Print("Memory Layout (all cores):\n");
503 V3_Print("Base Region (all cores): 0x%p - 0x%p -> 0x%p\n",
504 (void *)(reg->guest_start),
505 (void *)(reg->guest_end - 1),
506 (void *)(reg->host_addr));
509 // If the memory map is empty, don't print it
515 reg = rb_entry(node, struct v3_mem_region, tree_node);
517 V3_Print("%d: 0x%p - 0x%p -> 0x%p\n", i,
518 (void *)(reg->guest_start),
519 (void *)(reg->guest_end - 1),
520 (void *)(reg->host_addr));
522 V3_Print("\t(flags=0x%x) (core=0x%x) (unhandled = 0x%p)\n",
528 } while ((node = v3_rb_next(node)));