2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_mem.h>
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_util.h>
23 #include <palacios/vmm_emulator.h>
24 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_shadow_paging.h>
27 #include <palacios/vmm_direct_paging.h>
31 struct v3_shadow_region * insert_shadow_region(struct v3_vm_info * vm,
32 struct v3_shadow_region * region);
35 static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
36 PrintDebug("V3Vee: Memory offset hypercall (offset=%p)\n",
37 (void *)(info->vm_info->mem_map.base_region.host_addr));
39 info->vm_regs.rbx = info->vm_info->mem_map.base_region.host_addr;
45 int v3_init_mem_map(struct v3_vm_info * vm) {
46 struct v3_mem_map * map = &(vm->mem_map);
47 addr_t mem_pages = vm->mem_size >> 12;
49 memset(&(map->base_region), 0, sizeof(struct v3_shadow_region));
51 map->shdw_regions.rb_node = NULL;
54 map->hook_hvas = V3_VAddr(V3_AllocPages(vm->num_cores));
57 // There is an underlying region that contains all of the guest memory
58 // PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
60 map->base_region.guest_start = 0;
61 map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
62 map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
64 map->base_region.flags.read = 1;
65 map->base_region.flags.write = 1;
66 map->base_region.flags.exec = 1;
67 map->base_region.flags.base = 1;
68 map->base_region.flags.alloced = 1;
70 if ((void *)map->base_region.host_addr == NULL) {
71 PrintError("Could not allocate Guest memory\n");
75 //memset(V3_VAddr((void *)map->base_region.host_addr), 0xffffffff, map->base_region.guest_end);
77 v3_register_hypercall(vm, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
83 static inline addr_t get_hook_hva(struct guest_info * info) {
84 return (addr_t)(info->vm_info->mem_map.hook_hvas + (PAGE_SIZE_4KB * info->cpu_id));
87 void v3_delete_shadow_map(struct v3_vm_info * vm) {
88 struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions));
89 struct v3_shadow_region * reg;
90 struct rb_node * tmp_node = NULL;
93 reg = rb_entry(node, struct v3_shadow_region, tree_node);
95 node = v3_rb_next(node);
97 v3_delete_shadow_region(vm, reg);
100 V3_FreePage((void *)(vm->mem_map.base_region.host_addr));
101 V3_FreePage(V3_PAddr((void *)(vm->mem_map.hook_hvas)));
107 int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id,
108 addr_t guest_addr_start,
109 addr_t guest_addr_end,
112 struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
113 memset(entry, 0, sizeof(struct v3_shadow_region));
115 entry->guest_start = guest_addr_start;
116 entry->guest_end = guest_addr_end;
117 entry->host_addr = host_addr;
118 entry->write_hook = NULL;
119 entry->read_hook = NULL;
120 entry->priv_data = NULL;
121 entry->core_id = core_id;
123 entry->flags.read = 1;
124 entry->flags.write = 1;
125 entry->flags.exec = 1;
126 entry->flags.alloced = 1;
128 if (insert_shadow_region(vm, entry)) {
138 int v3_hook_write_mem(struct v3_vm_info * vm, uint16_t core_id,
139 addr_t guest_addr_start, addr_t guest_addr_end, addr_t host_addr,
140 int (*write)(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data),
143 struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
144 memset(entry, 0, sizeof(struct v3_shadow_region));
146 entry->guest_start = guest_addr_start;
147 entry->guest_end = guest_addr_end;
148 entry->host_addr = host_addr;
149 entry->write_hook = write;
150 entry->read_hook = NULL;
151 entry->priv_data = priv_data;
152 entry->core_id = core_id;
154 entry->flags.hook = 1;
155 entry->flags.read = 1;
156 entry->flags.exec = 1;
157 entry->flags.alloced = 1;
160 if (insert_shadow_region(vm, entry)) {
168 int v3_hook_full_mem(struct v3_vm_info * vm, uint16_t core_id,
169 addr_t guest_addr_start, addr_t guest_addr_end,
170 int (*read)(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data),
171 int (*write)(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data),
174 struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
175 memset(entry, 0, sizeof(struct v3_shadow_region));
177 entry->guest_start = guest_addr_start;
178 entry->guest_end = guest_addr_end;
179 entry->host_addr = (addr_t)NULL;
180 entry->write_hook = write;
181 entry->read_hook = read;
182 entry->priv_data = priv_data;
183 entry->core_id = core_id;
185 entry->flags.hook = 1;
187 if (insert_shadow_region(vm, entry)) {
196 // This will unhook the memory hook registered at start address
197 // We do not support unhooking subregions
198 int v3_unhook_mem(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr_start) {
199 struct v3_shadow_region * reg = v3_get_shadow_region(vm, core_id, guest_addr_start);
201 if (!reg->flags.hook) {
202 PrintError("Trying to unhook a non hooked memory region (addr=%p)\n", (void *)guest_addr_start);
206 v3_delete_shadow_region(vm, reg);
214 struct v3_shadow_region * __insert_shadow_region(struct v3_vm_info * vm,
215 struct v3_shadow_region * region) {
216 struct rb_node ** p = &(vm->mem_map.shdw_regions.rb_node);
217 struct rb_node * parent = NULL;
218 struct v3_shadow_region * tmp_region;
222 tmp_region = rb_entry(parent, struct v3_shadow_region, tree_node);
224 if (region->guest_end <= tmp_region->guest_start) {
226 } else if (region->guest_start >= tmp_region->guest_end) {
229 if ((region->guest_end != tmp_region->guest_end) ||
230 (region->guest_start != tmp_region->guest_start)) {
231 PrintError("Trying to map a partial overlapped core specific page...\n");
232 return tmp_region; // This is ugly...
233 } else if (region->core_id == tmp_region->core_id) {
235 } else if (region->core_id < tmp_region->core_id) {
243 rb_link_node(&(region->tree_node), parent, p);
250 struct v3_shadow_region * insert_shadow_region(struct v3_vm_info * vm,
251 struct v3_shadow_region * region) {
252 struct v3_shadow_region * ret;
255 if ((ret = __insert_shadow_region(vm, region))) {
259 v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.shdw_regions));
263 for (i = 0; i < vm->num_cores; i++) {
264 struct guest_info * info = &(vm->cores[i]);
266 // flush virtual page tables
267 // 3 cases shadow, shadow passthrough, and nested
269 if (info->shdw_pg_mode == SHADOW_PAGING) {
270 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
272 if (mem_mode == PHYSICAL_MEM) {
275 for (cur_addr = region->guest_start;
276 cur_addr < region->guest_end;
277 cur_addr += PAGE_SIZE_4KB) {
278 v3_invalidate_passthrough_addr(info, cur_addr);
281 v3_invalidate_shadow_pts(info);
284 } else if (info->shdw_pg_mode == NESTED_PAGING) {
287 for (cur_addr = region->guest_start;
288 cur_addr < region->guest_end;
289 cur_addr += PAGE_SIZE_4KB) {
291 v3_invalidate_nested_addr(info, cur_addr);
304 int v3_handle_mem_hook(struct guest_info * info, addr_t guest_va, addr_t guest_pa,
305 struct v3_shadow_region * reg, pf_error_t access_info) {
309 if (reg->flags.alloced == 0) {
310 op_addr = get_hook_hva(info);
312 op_addr = (addr_t)V3_VAddr((void *)v3_get_shadow_addr(reg, info->cpu_id, guest_pa));
316 if (access_info.write == 1) {
319 if (v3_emulate_write_op(info, guest_va, guest_pa, op_addr,
320 reg->write_hook, reg->priv_data) == -1) {
321 PrintError("Write Full Hook emulation failed\n");
327 if (reg->flags.read == 1) {
328 PrintError("Tried to emulate read for a guest Readable page\n");
332 if (v3_emulate_read_op(info, guest_va, guest_pa, op_addr,
333 reg->read_hook, reg->write_hook,
334 reg->priv_data) == -1) {
335 PrintError("Read Full Hook emulation failed\n");
348 struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
349 struct rb_node * n = vm->mem_map.shdw_regions.rb_node;
350 struct v3_shadow_region * reg = NULL;
353 reg = rb_entry(n, struct v3_shadow_region, tree_node);
355 if (guest_addr < reg->guest_start) {
357 } else if (guest_addr >= reg->guest_end) {
360 if ((core_id == reg->core_id) ||
361 (reg->core_id == V3_MEM_CORE_ANY)) {
370 // There is not registered region, so we check if its a valid address in the base region
372 if (guest_addr > vm->mem_map.base_region.guest_end) {
373 PrintError("Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
374 (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
375 v3_print_mem_map(vm);
380 return &(vm->mem_map.base_region);
386 void v3_delete_shadow_region(struct v3_vm_info * vm, struct v3_shadow_region * reg) {
393 for (i = 0; i < vm->num_cores; i++) {
394 struct guest_info * info = &(vm->cores[i]);
396 // flush virtual page tables
397 // 3 cases shadow, shadow passthrough, and nested
399 if (info->shdw_pg_mode == SHADOW_PAGING) {
400 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
402 if (mem_mode == PHYSICAL_MEM) {
405 for (cur_addr = reg->guest_start;
406 cur_addr < reg->guest_end;
407 cur_addr += PAGE_SIZE_4KB) {
408 v3_invalidate_passthrough_addr(info, cur_addr);
411 v3_invalidate_shadow_pts(info);
414 } else if (info->shdw_pg_mode == NESTED_PAGING) {
417 for (cur_addr = reg->guest_start;
418 cur_addr < reg->guest_end;
419 cur_addr += PAGE_SIZE_4KB) {
421 v3_invalidate_nested_addr(info, cur_addr);
426 v3_rb_erase(&(reg->tree_node), &(vm->mem_map.shdw_regions));
430 // flush virtual page tables
431 // 3 cases shadow, shadow passthrough, and nested
438 addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, uint16_t core_id, addr_t guest_addr) {
439 if (reg && (reg->flags.alloced == 1)) {
440 return (guest_addr - reg->guest_start) + reg->host_addr;
442 // PrintError("MEM Region Invalid\n");
450 void v3_print_mem_map(struct v3_vm_info * vm) {
451 struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions));
452 struct v3_shadow_region * reg = &(vm->mem_map.base_region);
455 V3_Print("Memory Layout:\n");
458 V3_Print("Base Region: 0x%p - 0x%p -> 0x%p\n",
459 (void *)(reg->guest_start),
460 (void *)(reg->guest_end - 1),
461 (void *)(reg->host_addr));
464 // If the memory map is empty, don't print it
470 reg = rb_entry(node, struct v3_shadow_region, tree_node);
472 V3_Print("%d: 0x%p - 0x%p -> 0x%p\n", i,
473 (void *)(reg->guest_start),
474 (void *)(reg->guest_end - 1),
475 (void *)(reg->host_addr));
477 V3_Print("\t(flags=%x) (WriteHook = 0x%p) (ReadHook = 0x%p)\n",
479 (void *)(reg->write_hook),
480 (void *)(reg->read_hook));
483 } while ((node = v3_rb_next(node)));