2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_mem.h>
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_util.h>
23 #include <palacios/vmm_emulator.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_debug.h>
27 #include <palacios/vmm_shadow_paging.h>
28 #include <palacios/vmm_direct_paging.h>
30 #include <interfaces/vmm_numa.h>
32 #ifdef V3_CONFIG_SWAPPING
33 #include <palacios/vmm_swapping.h>
36 uint64_t v3_mem_block_size = V3_CONFIG_MEM_BLOCK_SIZE;
41 struct v3_mem_region * v3_get_base_region(struct v3_vm_info * vm, addr_t gpa) {
43 //PrintDebug(VM_NONE, VCORE_NONE, "get_base_region called");
44 struct v3_mem_map * map = &(vm->mem_map);
45 uint32_t block_index = gpa / v3_mem_block_size;
46 struct v3_mem_region *reg;
47 if ((gpa >= (map->num_base_regions * v3_mem_block_size)) ||
48 (block_index >= map->num_base_regions)) {
49 PrintError(vm, VCORE_NONE, "Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p)\n",
50 (void *)gpa, (void *)vm->mem_size);
56 reg = &(map->base_regions[block_index]);
58 #ifdef V3_CONFIG_SWAPPING
59 if(vm->swap_state.enable_swapping) {
60 if (reg->flags.swapped) {
61 if (v3_swap_in_region(vm,reg)) {
62 PrintError(vm, VCORE_NONE, "Unable to swap in region GPA=%p..%p!!!\n",(void*)reg->guest_start,(void*)reg->guest_end);
68 v3_touch_region(vm,reg);
76 static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
78 PrintDebug(info->vm_info, info,"V3Vee: Memory offset hypercall (offset=%p)\n",
79 (void *)(info->vm_info->mem_map.base_region.host_addr));
81 info->vm_regs.rbx = info->vm_info->mem_map.base_region.host_addr;
86 static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa,
87 struct v3_mem_region * reg, pf_error_t access_info) {
89 PrintError(core->vm_info, core, "Unhandled memory access error (gpa=%p, gva=%p, error_code=%d)\n",
90 (void *)guest_pa, (void *)guest_va, *(uint32_t *)&access_info);
92 v3_print_mem_map(core->vm_info);
94 v3_print_guest_state(core);
99 static int gpa_to_node_from_cfg(struct v3_vm_info * vm, addr_t gpa) {
100 v3_cfg_tree_t * layout_cfg = v3_cfg_subtree(vm->cfg_data->cfg, "mem_layout");
101 v3_cfg_tree_t * region_desc = v3_cfg_subtree(layout_cfg, "region");
103 while (region_desc) {
104 char * start_addr_str = v3_cfg_val(region_desc, "start_addr");
105 char * end_addr_str = v3_cfg_val(region_desc, "end_addr");
106 char * node_id_str = v3_cfg_val(region_desc, "node");
108 addr_t start_addr = 0;
112 if ((!start_addr_str) || (!end_addr_str) || (!node_id_str)) {
113 PrintError(vm, VCORE_NONE, "Invalid memory layout in configuration\n");
117 start_addr = atox(start_addr_str);
118 end_addr = atox(end_addr_str);
119 node_id = atoi(node_id_str);
121 if ((gpa >= start_addr) && (gpa < end_addr)) {
125 region_desc = v3_cfg_next_branch(region_desc);
132 // This code parallels that in vmm_shadow_paging.c:v3_init_shdw_impl()
133 // and vmm_config.c:determine_paging_mode. The determination of which
134 // paging mode will be used is determined much later than the allocation of
135 // the guest memory regions, so we need to do this here to decide if they
136 // need to be below 4 GB or not.
137 static int will_use_shadow_paging(struct v3_vm_info *vm)
139 v3_cfg_tree_t * pg_cfg = v3_cfg_subtree(vm->cfg_data->cfg, "paging");
140 char * pg_mode = v3_cfg_val(pg_cfg, "mode");
142 if (pg_mode == NULL) {
143 return 1; // did not ask, get shadow
145 if (strcasecmp(pg_mode, "nested") == 0) {
146 extern v3_cpu_arch_t v3_mach_type;
147 if ((v3_mach_type == V3_SVM_REV3_CPU) ||
148 (v3_mach_type == V3_VMX_EPT_CPU) ||
149 (v3_mach_type == V3_VMX_EPT_UG_CPU)) {
150 return 0; // ask for nested, get nested
152 return 1; // ask for nested, get shadow
154 } else if (strcasecmp(pg_mode, "shadow") != 0) {
155 return 1; // ask for shadow, get shadow
157 return 1; // ask for something else, get shadow
162 #define CEIL_DIV(x,y) (((x)/(y)) + !!((x)%(y)))
165 int v3_init_mem_map(struct v3_vm_info * vm) {
166 struct v3_mem_map * map = &(vm->mem_map);
167 addr_t block_pages = v3_mem_block_size >> 12;
169 uint64_t num_base_regions_host_mem;
171 map->num_base_regions = CEIL_DIV(vm->mem_size, v3_mem_block_size);
173 num_base_regions_host_mem=map->num_base_regions; // without swapping
175 PrintDebug(VM_NONE, VCORE_NONE, "v3_init_mem_map: num_base_regions:%d",map->num_base_regions);
177 map->mem_regions.rb_node = NULL;
179 #ifdef V3_CONFIG_SWAPPING
180 if (vm->swap_state.enable_swapping) {
181 num_base_regions_host_mem = CEIL_DIV(vm->swap_state.host_mem_size, v3_mem_block_size);
185 PrintDebug(VM_NONE, VCORE_NONE, "v3_init_mem_map: %llu base regions will be allocated of %llu base regions in guest\n",
186 (uint64_t)num_base_regions_host_mem, (uint64_t)map->num_base_regions);
188 map->base_regions = V3_VMalloc(sizeof(struct v3_mem_region) * map->num_base_regions);
189 if (map->base_regions == NULL) {
190 PrintError(vm, VCORE_NONE, "Could not allocate base region array\n");
194 memset(map->base_regions, 0, sizeof(struct v3_mem_region) * map->num_base_regions);
196 for (i = 0; i < map->num_base_regions; i++) {
199 struct v3_mem_region * region = &(map->base_regions[i]);
202 // 2MB page alignment needed for 2MB hardware nested paging
203 // If swapping is enabled, the host memory will be allocated to low address regions at initialization
204 region->guest_start = v3_mem_block_size * i;
205 region->guest_end = region->guest_start + v3_mem_block_size;
207 // We assume that the xml config was smart enough to align the layout to the block size
208 // If they didn't we're going to ignore their settings
209 // and use whatever node the first byte of the block is assigned to
210 node_id = gpa_to_node_from_cfg(vm, region->guest_start);
213 if (i < num_base_regions_host_mem) {
214 //The regions within num_base_regions_in_mem are allocated in host memory
215 V3_Print(vm, VCORE_NONE, "Allocating block %d on node %d\n", i, node_id);
217 #ifdef V3_CONFIG_SWAPPING
218 // nothing to do - memset will have done it.
221 region->host_addr = (addr_t)V3_AllocPagesExtended(block_pages,
224 vm->resource_control.pg_filter_func,
225 vm->resource_control.pg_filter_state);
227 if ((void *)region->host_addr == NULL) {
228 PrintError(vm, VCORE_NONE, "Could not allocate guest memory\n");
232 // Clear the memory...
233 memset(V3_VAddr((void *)region->host_addr), 0, v3_mem_block_size);
237 #ifdef V3_CONFIG_SWAPPING
238 if(vm->swap_state.enable_swapping) {
239 // The regions beyond num_base_regions_in_mem are allocated on disk to start
240 region->flags.swapped = 1;
241 region->host_addr=(addr_t) 0;
242 // other flags / state correctly set up by zeroing the region earlier
249 // Note assigned numa ID could be different than our request...
250 // Also note that when swapping is used, the numa info will
251 // reflect the numa id of address 0x0 for unallocated regions
253 region->numa_id = v3_numa_hpa_to_node(region->host_addr);
255 region->flags.read = 1;
256 region->flags.write = 1;
257 region->flags.exec = 1;
258 region->flags.base = 1;
259 region->flags.alloced = 1;
260 region->flags.limit32 = will_use_shadow_paging(vm);
262 region->unhandled = unhandled_err;
265 v3_register_hypercall(vm, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
271 void v3_delete_mem_map(struct v3_vm_info * vm) {
272 struct v3_mem_map * map = &(vm->mem_map);
273 struct rb_node * node = v3_rb_first(&(map->mem_regions));
274 struct v3_mem_region * reg;
275 struct rb_node * tmp_node = NULL;
276 addr_t block_pages = v3_mem_block_size >> 12;
280 reg = rb_entry(node, struct v3_mem_region, tree_node);
282 node = v3_rb_next(node);
284 v3_delete_mem_region(vm, reg);
287 for (i = 0; i < map->num_base_regions; i++) {
288 struct v3_mem_region * region = &(map->base_regions[i]);
289 #ifdef V3_CONFIG_SWAPPING
290 if (vm->swap_state.enable_swapping) {
291 if (!region->flags.swapped) {
292 V3_FreePages((void *)(region->host_addr), block_pages);
293 } // otherwise this is not allocated space
296 V3_FreePages((void *)(region->host_addr), block_pages);
300 V3_VFree(map->base_regions);
304 struct v3_mem_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
305 addr_t guest_addr_start, addr_t guest_addr_end) {
306 struct v3_mem_region * entry = NULL;
308 if (guest_addr_start >= guest_addr_end) {
309 PrintError(vm, VCORE_NONE, "Region start is after region end\n");
313 entry = (struct v3_mem_region *)V3_Malloc(sizeof(struct v3_mem_region));
316 PrintError(vm, VCORE_NONE, "Cannot allocate in creating a memory region\n");
320 memset(entry, 0, sizeof(struct v3_mem_region));
322 entry->guest_start = guest_addr_start;
323 entry->guest_end = guest_addr_end;
324 entry->core_id = core_id;
325 entry->unhandled = unhandled_err;
333 int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id,
334 addr_t guest_addr_start,
335 addr_t guest_addr_end,
338 struct v3_mem_region * entry = NULL;
340 entry = v3_create_mem_region(vm, core_id,
344 entry->host_addr = host_addr;
346 entry->flags.read = 1;
347 entry->flags.write = 1;
348 entry->flags.exec = 1;
349 entry->flags.alloced = 1;
351 if (v3_insert_mem_region(vm, entry) == -1) {
362 struct v3_mem_region * __insert_mem_region(struct v3_vm_info * vm,
363 struct v3_mem_region * region) {
364 struct rb_node ** p = &(vm->mem_map.mem_regions.rb_node);
365 struct rb_node * parent = NULL;
366 struct v3_mem_region * tmp_region;
370 tmp_region = rb_entry(parent, struct v3_mem_region, tree_node);
372 if (region->guest_end <= tmp_region->guest_start) {
374 } else if (region->guest_start >= tmp_region->guest_end) {
377 if ((region->guest_end != tmp_region->guest_end) ||
378 (region->guest_start != tmp_region->guest_start)) {
379 PrintError(vm, VCORE_NONE, "Trying to map a partial overlapped core specific page...\n");
380 return tmp_region; // This is ugly...
381 } else if (region->core_id == tmp_region->core_id) {
382 PrintError(vm, VCORE_NONE, "Trying to map a core-overlapping page\n");
384 } else if (region->core_id < tmp_region->core_id) {
392 rb_link_node(&(region->tree_node), parent, p);
399 int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) {
400 struct v3_mem_region * ret;
404 if ((ret = __insert_mem_region(vm, region))) {
405 PrintError(vm, VCORE_NONE, "Internal insert failed returned region is from 0x%p to 0x%p on vcore %d\n", (void*)(ret->guest_start), (void*)(ret->guest_end), ret->core_id);
409 v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.mem_regions));
414 for (i = 0; i < vm->num_cores; i++) {
415 struct guest_info * info = &(vm->cores[i]);
417 // flush virtual page tables
418 // 3 cases shadow, shadow passthrough, and nested
420 if (info->shdw_pg_mode == SHADOW_PAGING) {
421 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
423 if (mem_mode == PHYSICAL_MEM) {
424 rc |= v3_invalidate_passthrough_addr_range(info, region->guest_start, region->guest_end-1,NULL,NULL);
426 rc |= v3_invalidate_shadow_pts(info);
429 } else if (info->shdw_pg_mode == NESTED_PAGING) {
430 rc |= v3_invalidate_nested_addr_range(info, region->guest_start, region->guest_end-1,NULL,NULL);
440 struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
441 struct rb_node * n = vm->mem_map.mem_regions.rb_node;
442 struct v3_mem_region * reg = NULL;
446 reg = rb_entry(n, struct v3_mem_region, tree_node);
448 if (guest_addr < reg->guest_start) {
450 } else if (guest_addr >= reg->guest_end) {
453 if (reg->core_id == V3_MEM_CORE_ANY) {
454 // found relevant region, it's available on all cores
456 } else if (core_id == reg->core_id) {
457 // found relevant region, it's available on the indicated core
459 } else if (core_id < reg->core_id) {
460 // go left, core too big
462 } else if (core_id > reg->core_id) {
463 // go right, core too small
466 PrintDebug(vm, VCORE_NONE, "v3_get_mem_region: Impossible!\n");
473 // There is not registered region, so we check if its a valid address in the base region
475 return v3_get_base_region(vm, guest_addr);
480 /* This returns the next memory region based on a given address.
481 * If the address falls inside a sub region, that region is returned.
482 * If the address falls outside a sub region, the next sub region is returned
483 * NOTE that we have to be careful about core_ids here...
485 static struct v3_mem_region * get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
486 struct rb_node * n = vm->mem_map.mem_regions.rb_node;
487 struct v3_mem_region * reg = NULL;
488 struct v3_mem_region * parent = NULL;
496 reg = rb_entry(n, struct v3_mem_region, tree_node);
498 if (guest_addr < reg->guest_start) {
500 } else if (guest_addr >= reg->guest_end) {
503 if (reg->core_id == V3_MEM_CORE_ANY) {
504 // found relevant region, it's available on all cores
506 } else if (core_id == reg->core_id) {
507 // found relevant region, it's available on the indicated core
509 } else if (core_id < reg->core_id) {
510 // go left, core too big
512 } else if (core_id > reg->core_id) {
513 // go right, core too small
516 PrintError(vm, VCORE_NONE, "v3_get_mem_region: Impossible!\n");
521 if ((reg->core_id == core_id) || (reg->core_id == V3_MEM_CORE_ANY)) {
527 if (parent->guest_start > guest_addr) {
529 } else if (parent->guest_end < guest_addr) {
530 struct rb_node * node = &(parent->tree_node);
532 while ((node = v3_rb_next(node)) != NULL) {
533 struct v3_mem_region * next_reg = rb_entry(node, struct v3_mem_region, tree_node);
535 if ((next_reg->core_id == V3_MEM_CORE_ANY) ||
536 (next_reg->core_id == core_id)) {
538 // This check is not strictly necessary, but it makes it clearer
539 if (next_reg->guest_start > guest_addr) {
552 /* Given an address region of memory, find if there are any regions that overlap with it.
553 * This checks that the range lies in a single region, and returns that region if it does,
554 * this can be either the base region or a sub region.
555 * IF there are multiple regions in the range then it returns NULL
557 static struct v3_mem_region * get_overlapping_region(struct v3_vm_info * vm, uint16_t core_id,
558 addr_t start_gpa, addr_t end_gpa) {
559 struct v3_mem_region * start_region = v3_get_mem_region(vm, core_id, start_gpa);
561 if (start_region == NULL) {
562 PrintError(vm, VCORE_NONE, "No overlapping region for core=%d, start_gpa=%p\n", core_id, (void*)start_gpa);
563 v3_print_mem_map(vm);
568 if (start_region->guest_end < end_gpa) {
569 // Region ends before range
571 } else if (start_region->flags.base == 0) {
572 // sub region overlaps range
575 // Base region, now we have to scan forward for the next sub region
576 struct v3_mem_region * next_reg = get_next_mem_region(vm, core_id, start_gpa);
578 if (next_reg == NULL) {
579 // no sub regions after start_addr, base region is ok
581 } else if (next_reg->guest_start >= end_gpa) {
582 // Next sub region begins outside range
590 // Should never get here
598 void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) {
607 v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
611 // If the guest isn't running then there shouldn't be anything to invalidate.
612 // Page tables should __always__ be created on demand during execution
613 // NOTE: This is a sanity check, and can be removed if that assumption changes
614 if (vm->run_state != VM_RUNNING) {
621 for (i = 0; i < vm->num_cores; i++) {
622 struct guest_info * info = &(vm->cores[i]);
624 // flush virtual page tables
625 // 3 cases shadow, shadow passthrough, and nested
627 if (info->shdw_pg_mode == SHADOW_PAGING) {
628 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
630 if (mem_mode == PHYSICAL_MEM) {
631 rc |= v3_invalidate_passthrough_addr_range(info,reg->guest_start, reg->guest_end-1,NULL,NULL);
633 rc |= v3_invalidate_shadow_pts(info);
636 } else if (info->shdw_pg_mode == NESTED_PAGING) {
637 rc |= v3_invalidate_nested_addr_range(info,reg->guest_start, reg->guest_end-1,NULL,NULL);
643 // flush virtual page tables
644 // 3 cases shadow, shadow passthrough, and nested
646 if (rc) { PrintError(vm, VCORE_NONE, "Error in deleting memory region\n"); }
649 // Determine if a given address can be handled by a large page of the requested size
650 uint32_t v3_get_max_page_size(struct guest_info * core, addr_t page_addr, v3_cpu_mode_t mode) {
653 uint32_t page_size = PAGE_SIZE_4KB;
654 struct v3_mem_region * reg = NULL;
658 if (core->use_large_pages == 1) {
659 pg_start = PAGE_ADDR_4MB(page_addr);
660 pg_end = (pg_start + PAGE_SIZE_4MB);
662 reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
664 if ((reg) && ((reg->host_addr % PAGE_SIZE_4MB) == 0)) {
665 page_size = PAGE_SIZE_4MB;
670 if (core->use_large_pages == 1) {
671 pg_start = PAGE_ADDR_2MB(page_addr);
672 pg_end = (pg_start + PAGE_SIZE_2MB);
674 reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
676 if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
677 page_size = PAGE_SIZE_2MB;
684 if (core->use_giant_pages == 1) {
685 pg_start = PAGE_ADDR_1GB(page_addr);
686 pg_end = (pg_start + PAGE_SIZE_1GB);
688 reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
690 if ((reg) && ((reg->host_addr % PAGE_SIZE_1GB) == 0)) {
691 page_size = PAGE_SIZE_1GB;
696 if (core->use_large_pages == 1) {
697 pg_start = PAGE_ADDR_2MB(page_addr);
698 pg_end = (pg_start + PAGE_SIZE_2MB);
700 reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
702 if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
703 page_size = PAGE_SIZE_2MB;
708 PrintError(core->vm_info, core, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
717 void v3_print_mem_map(struct v3_vm_info * vm) {
718 struct v3_mem_map * map = &(vm->mem_map);
719 struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
720 struct v3_mem_region * reg = NULL;
723 V3_Print(vm, VCORE_NONE, "Memory Layout (all cores):\n");
725 V3_Print(vm, VCORE_NONE, "Base Memory: (%d regions)\n", map->num_base_regions);
727 for (i = 0; i < map->num_base_regions; i++) {
728 reg = &(map->base_regions[i]);
730 V3_Print(vm, VCORE_NONE, "Base Region[%d] (all cores): 0x%p - 0x%p -> 0x%p\n",
732 (void *)(reg->guest_start),
733 (void *)(reg->guest_end - 1),
734 (void *)(reg->host_addr));
738 // If the memory map is empty, don't print it
744 reg = rb_entry(node, struct v3_mem_region, tree_node);
746 V3_Print(vm, VCORE_NONE, "%d: 0x%p - 0x%p -> 0x%p\n", i,
747 (void *)(reg->guest_start),
748 (void *)(reg->guest_end - 1),
749 (void *)(reg->host_addr));
751 V3_Print(vm, VCORE_NONE, "\t(flags=0x%x) (core=0x%x) (unhandled = 0x%p)\n",
757 } while ((node = v3_rb_next(node)));
763 char *arg = v3_lookup_option("mem_block_size");
766 v3_mem_block_size = atoi(arg);
767 V3_Print(VM_NONE,VCORE_NONE,"memory block size set to %llu bytes\n",v3_mem_block_size);
769 V3_Print(VM_NONE,VCORE_NONE,"default memory block size of %llu bytes is in use\n",v3_mem_block_size);