2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_mem.h>
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_util.h>
23 #include <palacios/vmm_emulator.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_debug.h>
27 #include <palacios/vmm_shadow_paging.h>
28 #include <palacios/vmm_direct_paging.h>
32 struct v3_mem_region * v3_get_base_region(struct v3_vm_info * vm, addr_t gpa) {
33 struct v3_mem_map * map = &(vm->mem_map);
34 uint32_t block_index = gpa / V3_CONFIG_MEM_BLOCK_SIZE;
36 if (gpa > (map->num_base_regions * V3_CONFIG_MEM_BLOCK_SIZE) ||
37 (block_index >= map->num_base_regions)) {
38 PrintError(vm, VCORE_NONE, "Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p)\n",
39 (void *)gpa, (void *)vm->mem_size);
46 return &(map->base_regions[block_index]);
50 static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
52 PrintDebug(info->vm_info, info,"V3Vee: Memory offset hypercall (offset=%p)\n",
53 (void *)(info->vm_info->mem_map.base_region.host_addr));
55 info->vm_regs.rbx = info->vm_info->mem_map.base_region.host_addr;
60 static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa,
61 struct v3_mem_region * reg, pf_error_t access_info) {
63 PrintError(core->vm_info, core, "Unhandled memory access error (gpa=%p, gva=%p, error_code=%d)\n",
64 (void *)guest_pa, (void *)guest_va, *(uint32_t *)&access_info);
66 v3_print_mem_map(core->vm_info);
68 v3_print_guest_state(core);
73 static int gpa_to_node_from_cfg(struct v3_vm_info * vm, addr_t gpa) {
74 v3_cfg_tree_t * layout_cfg = v3_cfg_subtree(vm->cfg_data->cfg, "mem_layout");
75 v3_cfg_tree_t * region_desc = v3_cfg_subtree(layout_cfg, "region");
78 char * start_addr_str = v3_cfg_val(region_desc, "start_addr");
79 char * end_addr_str = v3_cfg_val(region_desc, "end_addr");
80 char * node_id_str = v3_cfg_val(region_desc, "node");
82 addr_t start_addr = 0;
86 if ((!start_addr_str) || (!end_addr_str) || (!node_id_str)) {
87 PrintError(vm, VCORE_NONE, "Invalid memory layout in configuration\n");
91 start_addr = atox(start_addr_str);
92 end_addr = atox(end_addr_str);
93 node_id = atoi(node_id_str);
95 if ((gpa >= start_addr) && (gpa < end_addr)) {
99 region_desc = v3_cfg_next_branch(region_desc);
107 int v3_init_mem_map(struct v3_vm_info * vm) {
108 struct v3_mem_map * map = &(vm->mem_map);
109 addr_t block_pages = V3_CONFIG_MEM_BLOCK_SIZE >> 12;
112 map->num_base_regions = (vm->mem_size / V3_CONFIG_MEM_BLOCK_SIZE) + \
113 ((vm->mem_size % V3_CONFIG_MEM_BLOCK_SIZE) > 0);
116 map->mem_regions.rb_node = NULL;
118 map->base_regions = V3_Malloc(sizeof(struct v3_mem_region) * map->num_base_regions);
120 if (map->base_regions == NULL) {
121 PrintError(vm, VCORE_NONE, "Could not allocate base region array\n");
125 memset(map->base_regions, 0, sizeof(struct v3_mem_region) * map->num_base_regions);
128 for (i = 0; i < map->num_base_regions; i++) {
129 struct v3_mem_region * region = &(map->base_regions[i]);
132 // 2MB page alignment needed for 2MB hardware nested paging
133 region->guest_start = V3_CONFIG_MEM_BLOCK_SIZE * i;
134 region->guest_end = region->guest_start + V3_CONFIG_MEM_BLOCK_SIZE;
136 // We assume that the xml config was smart enough to align the layout to the block size
137 // If they didn't we're going to ignore their settings
138 // and use whatever node the first byte of the block is assigned to
139 node_id = gpa_to_node_from_cfg(vm, region->guest_start);
141 V3_Print(vm, VCORE_NONE, "Allocating block %d on node %d\n", i, node_id);
144 region->host_addr = (addr_t)V3_AllocPagesNode(block_pages, node_id);
146 region->host_addr = (addr_t)V3_AllocPages(block_pages);
149 if ((void *)region->host_addr == NULL) {
150 PrintError(vm, VCORE_NONE, "Could not allocate guest memory\n");
154 // Clear the memory...
155 memset(V3_VAddr((void *)region->host_addr), 0, V3_CONFIG_MEM_BLOCK_SIZE);
157 region->flags.read = 1;
158 region->flags.write = 1;
159 region->flags.exec = 1;
160 region->flags.base = 1;
161 region->flags.alloced = 1;
163 region->unhandled = unhandled_err;
166 v3_register_hypercall(vm, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
172 void v3_delete_mem_map(struct v3_vm_info * vm) {
173 struct v3_mem_map * map = &(vm->mem_map);
174 struct rb_node * node = v3_rb_first(&(map->mem_regions));
175 struct v3_mem_region * reg;
176 struct rb_node * tmp_node = NULL;
177 addr_t block_pages = V3_CONFIG_MEM_BLOCK_SIZE >> 12;
181 reg = rb_entry(node, struct v3_mem_region, tree_node);
183 node = v3_rb_next(node);
185 v3_delete_mem_region(vm, reg);
188 for (i = 0; i < map->num_base_regions; i++) {
189 struct v3_mem_region * region = &(map->base_regions[i]);
190 V3_FreePages((void *)(region->host_addr), block_pages);
193 V3_Free(map->base_regions);
198 struct v3_mem_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
199 addr_t guest_addr_start, addr_t guest_addr_end) {
200 struct v3_mem_region * entry = NULL;
202 if (guest_addr_start >= guest_addr_end) {
203 PrintError(vm, VCORE_NONE, "Region start is after region end\n");
207 entry = (struct v3_mem_region *)V3_Malloc(sizeof(struct v3_mem_region));
210 PrintError(vm, VCORE_NONE, "Cannot allocate in creating a memory region\n");
214 memset(entry, 0, sizeof(struct v3_mem_region));
216 entry->guest_start = guest_addr_start;
217 entry->guest_end = guest_addr_end;
218 entry->core_id = core_id;
219 entry->unhandled = unhandled_err;
227 int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id,
228 addr_t guest_addr_start,
229 addr_t guest_addr_end,
232 struct v3_mem_region * entry = NULL;
234 entry = v3_create_mem_region(vm, core_id,
238 entry->host_addr = host_addr;
240 entry->flags.read = 1;
241 entry->flags.write = 1;
242 entry->flags.exec = 1;
243 entry->flags.alloced = 1;
245 if (v3_insert_mem_region(vm, entry) == -1) {
256 struct v3_mem_region * __insert_mem_region(struct v3_vm_info * vm,
257 struct v3_mem_region * region) {
258 struct rb_node ** p = &(vm->mem_map.mem_regions.rb_node);
259 struct rb_node * parent = NULL;
260 struct v3_mem_region * tmp_region;
264 tmp_region = rb_entry(parent, struct v3_mem_region, tree_node);
266 if (region->guest_end <= tmp_region->guest_start) {
268 } else if (region->guest_start >= tmp_region->guest_end) {
271 if ((region->guest_end != tmp_region->guest_end) ||
272 (region->guest_start != tmp_region->guest_start)) {
273 PrintError(vm, VCORE_NONE, "Trying to map a partial overlapped core specific page...\n");
274 return tmp_region; // This is ugly...
275 } else if (region->core_id == tmp_region->core_id) {
276 PrintError(vm, VCORE_NONE, "Trying to map a core-overlapping page\n");
278 } else if (region->core_id < tmp_region->core_id) {
286 rb_link_node(&(region->tree_node), parent, p);
293 int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) {
294 struct v3_mem_region * ret;
298 if ((ret = __insert_mem_region(vm, region))) {
299 PrintError(vm, VCORE_NONE, "Internal insert failed returned region is from 0x%p to 0x%p on vcore %d\n", (void*)(ret->guest_start), (void*)(ret->guest_end), ret->core_id);
303 v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.mem_regions));
308 for (i = 0; i < vm->num_cores; i++) {
309 struct guest_info * info = &(vm->cores[i]);
311 // flush virtual page tables
312 // 3 cases shadow, shadow passthrough, and nested
314 if (info->shdw_pg_mode == SHADOW_PAGING) {
315 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
317 if (mem_mode == PHYSICAL_MEM) {
318 rc |= v3_invalidate_passthrough_addr_range(info, region->guest_start, region->guest_end-1);
320 rc |= v3_invalidate_shadow_pts(info);
323 } else if (info->shdw_pg_mode == NESTED_PAGING) {
324 rc |= v3_invalidate_nested_addr_range(info, region->guest_start, region->guest_end-1);
334 struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
335 struct rb_node * n = vm->mem_map.mem_regions.rb_node;
336 struct v3_mem_region * reg = NULL;
340 reg = rb_entry(n, struct v3_mem_region, tree_node);
342 if (guest_addr < reg->guest_start) {
344 } else if (guest_addr >= reg->guest_end) {
347 if (reg->core_id == V3_MEM_CORE_ANY) {
348 // found relevant region, it's available on all cores
350 } else if (core_id == reg->core_id) {
351 // found relevant region, it's available on the indicated core
353 } else if (core_id < reg->core_id) {
354 // go left, core too big
356 } else if (core_id > reg->core_id) {
357 // go right, core too small
360 PrintDebug(vm, VCORE_NONE, "v3_get_mem_region: Impossible!\n");
367 // There is not registered region, so we check if its a valid address in the base region
369 return v3_get_base_region(vm, guest_addr);
374 /* This returns the next memory region based on a given address.
375 * If the address falls inside a sub region, that region is returned.
376 * If the address falls outside a sub region, the next sub region is returned
377 * NOTE that we have to be careful about core_ids here...
379 static struct v3_mem_region * get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
380 struct rb_node * n = vm->mem_map.mem_regions.rb_node;
381 struct v3_mem_region * reg = NULL;
382 struct v3_mem_region * parent = NULL;
390 reg = rb_entry(n, struct v3_mem_region, tree_node);
392 if (guest_addr < reg->guest_start) {
394 } else if (guest_addr >= reg->guest_end) {
397 if (reg->core_id == V3_MEM_CORE_ANY) {
398 // found relevant region, it's available on all cores
400 } else if (core_id == reg->core_id) {
401 // found relevant region, it's available on the indicated core
403 } else if (core_id < reg->core_id) {
404 // go left, core too big
406 } else if (core_id > reg->core_id) {
407 // go right, core too small
410 PrintError(vm, VCORE_NONE, "v3_get_mem_region: Impossible!\n");
415 if ((reg->core_id == core_id) || (reg->core_id == V3_MEM_CORE_ANY)) {
421 if (parent->guest_start > guest_addr) {
423 } else if (parent->guest_end < guest_addr) {
424 struct rb_node * node = &(parent->tree_node);
426 while ((node = v3_rb_next(node)) != NULL) {
427 struct v3_mem_region * next_reg = rb_entry(node, struct v3_mem_region, tree_node);
429 if ((next_reg->core_id == V3_MEM_CORE_ANY) ||
430 (next_reg->core_id == core_id)) {
432 // This check is not strictly necessary, but it makes it clearer
433 if (next_reg->guest_start > guest_addr) {
446 /* Given an address region of memory, find if there are any regions that overlap with it.
447 * This checks that the range lies in a single region, and returns that region if it does,
448 * this can be either the base region or a sub region.
449 * IF there are multiple regions in the range then it returns NULL
451 static struct v3_mem_region * get_overlapping_region(struct v3_vm_info * vm, uint16_t core_id,
452 addr_t start_gpa, addr_t end_gpa) {
453 struct v3_mem_region * start_region = v3_get_mem_region(vm, core_id, start_gpa);
455 if (start_region == NULL) {
456 PrintError(vm, VCORE_NONE, "Invalid memory region\n");
461 if (start_region->guest_end < end_gpa) {
462 // Region ends before range
464 } else if (start_region->flags.base == 0) {
465 // sub region overlaps range
468 // Base region, now we have to scan forward for the next sub region
469 struct v3_mem_region * next_reg = get_next_mem_region(vm, core_id, start_gpa);
471 if (next_reg == NULL) {
472 // no sub regions after start_addr, base region is ok
474 } else if (next_reg->guest_start >= end_gpa) {
475 // Next sub region begins outside range
483 // Should never get here
491 void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) {
500 v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
504 // If the guest isn't running then there shouldn't be anything to invalidate.
505 // Page tables should __always__ be created on demand during execution
506 // NOTE: This is a sanity check, and can be removed if that assumption changes
507 if (vm->run_state != VM_RUNNING) {
514 for (i = 0; i < vm->num_cores; i++) {
515 struct guest_info * info = &(vm->cores[i]);
517 // flush virtual page tables
518 // 3 cases shadow, shadow passthrough, and nested
520 if (info->shdw_pg_mode == SHADOW_PAGING) {
521 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
523 if (mem_mode == PHYSICAL_MEM) {
524 rc |= v3_invalidate_passthrough_addr_range(info,reg->guest_start, reg->guest_end-1);
526 rc |= v3_invalidate_shadow_pts(info);
529 } else if (info->shdw_pg_mode == NESTED_PAGING) {
530 rc |= v3_invalidate_nested_addr_range(info,reg->guest_start, reg->guest_end-1);
536 // flush virtual page tables
537 // 3 cases shadow, shadow passthrough, and nested
539 if (rc) { PrintError(vm, VCORE_NONE, "Error in deleting memory region\n"); }
542 // Determine if a given address can be handled by a large page of the requested size
543 uint32_t v3_get_max_page_size(struct guest_info * core, addr_t page_addr, v3_cpu_mode_t mode) {
546 uint32_t page_size = PAGE_SIZE_4KB;
547 struct v3_mem_region * reg = NULL;
551 if (core->use_large_pages == 1) {
552 pg_start = PAGE_ADDR_4MB(page_addr);
553 pg_end = (pg_start + PAGE_SIZE_4MB);
555 reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
557 if ((reg) && ((reg->host_addr % PAGE_SIZE_4MB) == 0)) {
558 page_size = PAGE_SIZE_4MB;
563 if (core->use_large_pages == 1) {
564 pg_start = PAGE_ADDR_2MB(page_addr);
565 pg_end = (pg_start + PAGE_SIZE_2MB);
567 reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
569 if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
570 page_size = PAGE_SIZE_2MB;
577 if (core->use_giant_pages == 1) {
578 pg_start = PAGE_ADDR_1GB(page_addr);
579 pg_end = (pg_start + PAGE_SIZE_1GB);
581 reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
583 if ((reg) && ((reg->host_addr % PAGE_SIZE_1GB) == 0)) {
584 page_size = PAGE_SIZE_1GB;
589 if (core->use_large_pages == 1) {
590 pg_start = PAGE_ADDR_2MB(page_addr);
591 pg_end = (pg_start + PAGE_SIZE_2MB);
593 reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
595 if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
596 page_size = PAGE_SIZE_2MB;
601 PrintError(core->vm_info, core, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
610 void v3_print_mem_map(struct v3_vm_info * vm) {
611 struct v3_mem_map * map = &(vm->mem_map);
612 struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
613 struct v3_mem_region * reg = NULL;
616 V3_Print(vm, VCORE_NONE, "Memory Layout (all cores):\n");
618 V3_Print(vm, VCORE_NONE, "Base Memory: (%d regions)\n", map->num_base_regions);
620 for (i = 0; i < map->num_base_regions; i++) {
621 reg = &(map->base_regions[i]);
623 V3_Print(vm, VCORE_NONE, "Base Region[%d] (all cores): 0x%p - 0x%p -> 0x%p\n",
625 (void *)(reg->guest_start),
626 (void *)(reg->guest_end - 1),
627 (void *)(reg->host_addr));
631 // If the memory map is empty, don't print it
637 reg = rb_entry(node, struct v3_mem_region, tree_node);
639 V3_Print(vm, VCORE_NONE, "%d: 0x%p - 0x%p -> 0x%p\n", i,
640 (void *)(reg->guest_start),
641 (void *)(reg->guest_end - 1),
642 (void *)(reg->host_addr));
644 V3_Print(vm, VCORE_NONE, "\t(flags=0x%x) (core=0x%x) (unhandled = 0x%p)\n",
650 } while ((node = v3_rb_next(node)));