2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_mem.h>
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_util.h>
23 #include <palacios/vmm_emulator.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_debug.h>
27 #include <palacios/vmm_shadow_paging.h>
28 #include <palacios/vmm_direct_paging.h>
33 static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
34 PrintDebug(info->vm_info, info,"V3Vee: Memory offset hypercall (offset=%p)\n",
35 (void *)(info->vm_info->mem_map.base_region.host_addr));
37 info->vm_regs.rbx = info->vm_info->mem_map.base_region.host_addr;
42 static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa,
43 struct v3_mem_region * reg, pf_error_t access_info) {
45 PrintError(core->vm_info, core, "Unhandled memory access error (gpa=%p, gva=%p, error_code=%d)\n",
46 (void *)guest_pa, (void *)guest_va, *(uint32_t *)&access_info);
48 v3_print_mem_map(core->vm_info);
50 v3_print_guest_state(core);
55 int v3_init_mem_map(struct v3_vm_info * vm) {
56 struct v3_mem_map * map = &(vm->mem_map);
57 addr_t mem_pages = vm->mem_size >> 12;
59 memset(&(map->base_region), 0, sizeof(struct v3_mem_region));
61 map->mem_regions.rb_node = NULL;
63 // There is an underlying region that contains all of the guest memory
64 // PrintDebug(info->vm_info, info, "Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
66 // 2MB page alignment needed for 2MB hardware nested paging
67 map->base_region.guest_start = 0;
68 map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
70 #ifdef V3_CONFIG_ALIGNED_PG_ALLOC
71 map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, vm->mem_align);
73 map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
76 if ((void*)map->base_region.host_addr == NULL) {
77 PrintError(vm, VCORE_NONE,"Could not allocate guest memory\n");
81 // Clear the memory...
82 memset(V3_VAddr((void *)map->base_region.host_addr), 0, mem_pages * PAGE_SIZE_4KB);
85 map->base_region.flags.read = 1;
86 map->base_region.flags.write = 1;
87 map->base_region.flags.exec = 1;
88 map->base_region.flags.base = 1;
89 map->base_region.flags.alloced = 1;
91 map->base_region.unhandled = unhandled_err;
93 v3_register_hypercall(vm, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
99 void v3_delete_mem_map(struct v3_vm_info * vm) {
100 struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
101 struct v3_mem_region * reg;
102 struct rb_node * tmp_node = NULL;
103 addr_t mem_pages = vm->mem_size >> 12;
106 reg = rb_entry(node, struct v3_mem_region, tree_node);
108 node = v3_rb_next(node);
110 v3_delete_mem_region(vm, reg);
113 V3_FreePages((void *)(vm->mem_map.base_region.host_addr), mem_pages);
117 struct v3_mem_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
118 addr_t guest_addr_start, addr_t guest_addr_end) {
119 struct v3_mem_region * entry = NULL;
121 if (guest_addr_start >= guest_addr_end) {
122 PrintError(vm, VCORE_NONE, "Region start is after region end\n");
126 entry = (struct v3_mem_region *)V3_Malloc(sizeof(struct v3_mem_region));
129 PrintError(vm, VCORE_NONE, "Cannot allocate in creating a memory region\n");
133 memset(entry, 0, sizeof(struct v3_mem_region));
135 entry->guest_start = guest_addr_start;
136 entry->guest_end = guest_addr_end;
137 entry->core_id = core_id;
138 entry->unhandled = unhandled_err;
146 int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id,
147 addr_t guest_addr_start,
148 addr_t guest_addr_end,
151 struct v3_mem_region * entry = NULL;
153 entry = v3_create_mem_region(vm, core_id,
157 entry->host_addr = host_addr;
159 entry->flags.read = 1;
160 entry->flags.write = 1;
161 entry->flags.exec = 1;
162 entry->flags.alloced = 1;
164 if (v3_insert_mem_region(vm, entry) == -1) {
175 struct v3_mem_region * __insert_mem_region(struct v3_vm_info * vm,
176 struct v3_mem_region * region) {
177 struct rb_node ** p = &(vm->mem_map.mem_regions.rb_node);
178 struct rb_node * parent = NULL;
179 struct v3_mem_region * tmp_region;
183 tmp_region = rb_entry(parent, struct v3_mem_region, tree_node);
185 if (region->guest_end <= tmp_region->guest_start) {
187 } else if (region->guest_start >= tmp_region->guest_end) {
190 if ((region->guest_end != tmp_region->guest_end) ||
191 (region->guest_start != tmp_region->guest_start)) {
192 PrintError(vm, VCORE_NONE, "Trying to map a partial overlapped core specific page...\n");
193 return tmp_region; // This is ugly...
194 } else if (region->core_id == tmp_region->core_id) {
195 PrintError(vm, VCORE_NONE, "Trying to map a core-overlapping page\n");
197 } else if (region->core_id < tmp_region->core_id) {
205 rb_link_node(&(region->tree_node), parent, p);
212 int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) {
213 struct v3_mem_region * ret;
216 if ((ret = __insert_mem_region(vm, region))) {
217 PrintError(vm, VCORE_NONE, "Internal insert failed returned region is from 0x%p to 0x%p on vcore %d\n", (void*)(ret->guest_start), (void*)(ret->guest_end), ret->core_id);
221 v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.mem_regions));
225 for (i = 0; i < vm->num_cores; i++) {
226 struct guest_info * info = &(vm->cores[i]);
228 // flush virtual page tables
229 // 3 cases shadow, shadow passthrough, and nested
231 if (info->shdw_pg_mode == SHADOW_PAGING) {
232 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
234 if (mem_mode == PHYSICAL_MEM) {
237 for (cur_addr = region->guest_start;
238 cur_addr < region->guest_end;
239 cur_addr += PAGE_SIZE_4KB) {
240 v3_invalidate_passthrough_addr(info, cur_addr);
243 v3_invalidate_shadow_pts(info);
246 } else if (info->shdw_pg_mode == NESTED_PAGING) {
249 for (cur_addr = region->guest_start;
250 cur_addr < region->guest_end;
251 cur_addr += PAGE_SIZE_4KB) {
253 v3_invalidate_nested_addr(info, cur_addr);
264 struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
265 struct rb_node * n = vm->mem_map.mem_regions.rb_node;
266 struct v3_mem_region * reg = NULL;
270 reg = rb_entry(n, struct v3_mem_region, tree_node);
272 if (guest_addr < reg->guest_start) {
274 } else if (guest_addr >= reg->guest_end) {
277 if (reg->core_id == V3_MEM_CORE_ANY) {
278 // found relevant region, it's available on all cores
280 } else if (core_id == reg->core_id) {
281 // found relevant region, it's available on the indicated core
283 } else if (core_id < reg->core_id) {
284 // go left, core too big
286 } else if (core_id > reg->core_id) {
287 // go right, core too small
290 PrintDebug(vm, VCORE_NONE, "v3_get_mem_region: Impossible!\n");
297 // There is not registered region, so we check if its a valid address in the base region
299 if (guest_addr > vm->mem_map.base_region.guest_end) {
300 PrintError(vm, VCORE_NONE, "Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p) (core=0x%x)\n",
301 (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end, core_id);
302 v3_print_mem_map(vm);
307 return &(vm->mem_map.base_region);
312 /* This returns the next memory region based on a given address.
313 * If the address falls inside a sub region, that region is returned.
314 * If the address falls outside a sub region, the next sub region is returned
315 * NOTE that we have to be careful about core_ids here...
317 static struct v3_mem_region * get_next_mem_region( struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
318 struct rb_node * n = vm->mem_map.mem_regions.rb_node;
319 struct v3_mem_region * reg = NULL;
320 struct v3_mem_region * parent = NULL;
328 reg = rb_entry(n, struct v3_mem_region, tree_node);
330 if (guest_addr < reg->guest_start) {
332 } else if (guest_addr >= reg->guest_end) {
335 if (reg->core_id == V3_MEM_CORE_ANY) {
336 // found relevant region, it's available on all cores
338 } else if (core_id == reg->core_id) {
339 // found relevant region, it's available on the indicated core
341 } else if (core_id < reg->core_id) {
342 // go left, core too big
344 } else if (core_id > reg->core_id) {
345 // go right, core too small
348 PrintError(vm, VCORE_NONE, "v3_get_mem_region: Impossible!\n");
353 if ((reg->core_id == core_id) || (reg->core_id == V3_MEM_CORE_ANY)) {
359 if (parent->guest_start > guest_addr) {
361 } else if (parent->guest_end < guest_addr) {
362 struct rb_node * node = &(parent->tree_node);
364 while ((node = v3_rb_next(node)) != NULL) {
365 struct v3_mem_region * next_reg = rb_entry(node, struct v3_mem_region, tree_node);
367 if ((next_reg->core_id == V3_MEM_CORE_ANY) ||
368 (next_reg->core_id == core_id)) {
370 // This check is not strictly necessary, but it makes it clearer
371 if (next_reg->guest_start > guest_addr) {
384 /* Given an address region of memory, find if there are any regions that overlap with it.
385 * This checks that the range lies in a single region, and returns that region if it does,
386 * this can be either the base region or a sub region.
387 * IF there are multiple regions in the range then it returns NULL
389 static struct v3_mem_region * get_overlapping_region(struct v3_vm_info * vm, uint16_t core_id,
390 addr_t start_gpa, addr_t end_gpa) {
391 struct v3_mem_region * start_region = v3_get_mem_region(vm, core_id, start_gpa);
393 if (start_region == NULL) {
394 PrintError(vm, VCORE_NONE, "Invalid memory region\n");
399 if (start_region->guest_end < end_gpa) {
400 // Region ends before range
402 } else if (start_region->flags.base == 0) {
403 // sub region overlaps range
406 // Base region, now we have to scan forward for the next sub region
407 struct v3_mem_region * next_reg = get_next_mem_region(vm, core_id, start_gpa);
409 if (next_reg == NULL) {
410 // no sub regions after start_addr, base region is ok
412 } else if (next_reg->guest_start >= end_gpa) {
413 // Next sub region begins outside range
421 // Should never get here
429 void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) {
437 v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
441 // If the guest isn't running then there shouldn't be anything to invalidate.
442 // Page tables should __always__ be created on demand during execution
443 // NOTE: This is a sanity check, and can be removed if that assumption changes
444 if (vm->run_state != VM_RUNNING) {
449 for (i = 0; i < vm->num_cores; i++) {
450 struct guest_info * info = &(vm->cores[i]);
452 // flush virtual page tables
453 // 3 cases shadow, shadow passthrough, and nested
455 if (info->shdw_pg_mode == SHADOW_PAGING) {
456 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
458 if (mem_mode == PHYSICAL_MEM) {
461 for (cur_addr = reg->guest_start;
462 cur_addr < reg->guest_end;
463 cur_addr += PAGE_SIZE_4KB) {
464 v3_invalidate_passthrough_addr(info, cur_addr);
467 v3_invalidate_shadow_pts(info);
470 } else if (info->shdw_pg_mode == NESTED_PAGING) {
473 for (cur_addr = reg->guest_start;
474 cur_addr < reg->guest_end;
475 cur_addr += PAGE_SIZE_4KB) {
477 v3_invalidate_nested_addr(info, cur_addr);
484 // flush virtual page tables
485 // 3 cases shadow, shadow passthrough, and nested
489 // Determine if a given address can be handled by a large page of the requested size
490 uint32_t v3_get_max_page_size(struct guest_info * core, addr_t page_addr, v3_cpu_mode_t mode) {
493 uint32_t page_size = PAGE_SIZE_4KB;
494 struct v3_mem_region * reg = NULL;
498 if (core->use_large_pages == 1) {
499 pg_start = PAGE_ADDR_4MB(page_addr);
500 pg_end = (pg_start + PAGE_SIZE_4MB);
502 reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
504 if ((reg) && ((reg->host_addr % PAGE_SIZE_4MB) == 0)) {
505 page_size = PAGE_SIZE_4MB;
510 if (core->use_large_pages == 1) {
511 pg_start = PAGE_ADDR_2MB(page_addr);
512 pg_end = (pg_start + PAGE_SIZE_2MB);
514 reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
516 if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
517 page_size = PAGE_SIZE_2MB;
524 if (core->use_giant_pages == 1) {
525 pg_start = PAGE_ADDR_1GB(page_addr);
526 pg_end = (pg_start + PAGE_SIZE_1GB);
528 reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
530 if ((reg) && ((reg->host_addr % PAGE_SIZE_1GB) == 0)) {
531 page_size = PAGE_SIZE_1GB;
536 if (core->use_large_pages == 1) {
537 pg_start = PAGE_ADDR_2MB(page_addr);
538 pg_end = (pg_start + PAGE_SIZE_2MB);
540 reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
542 if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
543 page_size = PAGE_SIZE_2MB;
548 PrintError(core->vm_info, core, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
557 void v3_print_mem_map(struct v3_vm_info * vm) {
558 struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
559 struct v3_mem_region * reg = &(vm->mem_map.base_region);
562 V3_Print(vm, VCORE_NONE, "Memory Layout (all cores):\n");
565 V3_Print(vm, VCORE_NONE, "Base Region (all cores): 0x%p - 0x%p -> 0x%p\n",
566 (void *)(reg->guest_start),
567 (void *)(reg->guest_end - 1),
568 (void *)(reg->host_addr));
571 // If the memory map is empty, don't print it
577 reg = rb_entry(node, struct v3_mem_region, tree_node);
579 V3_Print(vm, VCORE_NONE, "%d: 0x%p - 0x%p -> 0x%p\n", i,
580 (void *)(reg->guest_start),
581 (void *)(reg->guest_end - 1),
582 (void *)(reg->host_addr));
584 V3_Print(vm, VCORE_NONE, "\t(flags=0x%x) (core=0x%x) (unhandled = 0x%p)\n",
590 } while ((node = v3_rb_next(node)));