2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_mem.h>
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_util.h>
23 #include <palacios/vmm_emulator.h>
24 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_shadow_paging.h>
27 #include <palacios/vmm_direct_paging.h>
31 struct v3_shadow_region * insert_shadow_region(struct v3_vm_info * vm,
32 struct v3_shadow_region * region);
35 static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
36 PrintDebug("V3Vee: Memory offset hypercall (offset=%p)\n",
37 (void *)(info->vm_info->mem_map.base_region.host_addr));
39 info->vm_regs.rbx = info->vm_info->mem_map.base_region.host_addr;
45 int v3_init_mem_map(struct v3_vm_info * vm) {
46 struct v3_mem_map * map = &(vm->mem_map);
47 addr_t mem_pages = vm->mem_size >> 12;
49 memset(&(map->base_region), 0, sizeof(struct v3_shadow_region));
51 map->shdw_regions.rb_node = NULL;
54 map->hook_hvas = V3_VAddr(V3_AllocPages(vm->num_cores));
57 // There is an underlying region that contains all of the guest memory
58 // PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
60 map->base_region.guest_start = 0;
61 map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
62 map->base_region.host_type = SHDW_REGION_ALLOCATED;
63 map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
65 map->base_region.flags.read = 1;
66 map->base_region.flags.write = 1;
67 map->base_region.flags.exec = 1;
68 map->base_region.flags.base = 1;
69 map->base_region.flags.alloced = 1;
71 if ((void *)map->base_region.host_addr == NULL) {
72 PrintError("Could not allocate Guest memory\n");
76 //memset(V3_VAddr((void *)map->base_region.host_addr), 0xffffffff, map->base_region.guest_end);
78 v3_register_hypercall(vm, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
84 static inline addr_t get_hook_hva(struct guest_info * info) {
85 return (addr_t)(info->vm_info->mem_map.hook_hvas + (PAGE_SIZE_4KB * info->cpu_id));
88 void v3_delete_shadow_map(struct v3_vm_info * vm) {
89 struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions));
90 struct v3_shadow_region * reg;
91 struct rb_node * tmp_node = NULL;
94 reg = rb_entry(node, struct v3_shadow_region, tree_node);
96 node = v3_rb_next(node);
98 v3_delete_shadow_region(vm, reg);
101 V3_FreePage((void *)(vm->mem_map.base_region.host_addr));
102 V3_FreePage(V3_PAddr((void *)(vm->mem_map.hook_hvas)));
108 int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id,
109 addr_t guest_addr_start,
110 addr_t guest_addr_end,
113 struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
114 memset(entry, 0, sizeof(struct v3_shadow_region));
116 entry->guest_start = guest_addr_start;
117 entry->guest_end = guest_addr_end;
118 entry->host_type = SHDW_REGION_ALLOCATED;
119 entry->host_addr = host_addr;
120 entry->write_hook = NULL;
121 entry->read_hook = NULL;
122 entry->priv_data = NULL;
123 entry->core_id = core_id;
125 entry->flags.read = 1;
126 entry->flags.write = 1;
127 entry->flags.exec = 1;
128 entry->flags.alloced = 1;
130 if (insert_shadow_region(vm, entry)) {
140 int v3_hook_write_mem(struct v3_vm_info * vm, uint16_t core_id,
141 addr_t guest_addr_start, addr_t guest_addr_end, addr_t host_addr,
142 int (*write)(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data),
145 struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
146 memset(entry, 0, sizeof(struct v3_shadow_region));
148 entry->guest_start = guest_addr_start;
149 entry->guest_end = guest_addr_end;
150 entry->host_type = SHDW_REGION_WRITE_HOOK;
151 entry->host_addr = host_addr;
152 entry->write_hook = write;
153 entry->read_hook = NULL;
154 entry->priv_data = priv_data;
155 entry->core_id = core_id;
157 entry->flags.hook = 1;
158 entry->flags.read = 1;
159 entry->flags.exec = 1;
160 entry->flags.alloced = 1;
163 if (insert_shadow_region(vm, entry)) {
171 int v3_hook_full_mem(struct v3_vm_info * vm, uint16_t core_id,
172 addr_t guest_addr_start, addr_t guest_addr_end,
173 int (*read)(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data),
174 int (*write)(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data),
177 struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
178 memset(entry, 0, sizeof(struct v3_shadow_region));
180 entry->guest_start = guest_addr_start;
181 entry->guest_end = guest_addr_end;
182 entry->host_type = SHDW_REGION_FULL_HOOK;
183 entry->host_addr = (addr_t)NULL;
184 entry->write_hook = write;
185 entry->read_hook = read;
186 entry->priv_data = priv_data;
187 entry->core_id = core_id;
189 entry->flags.hook = 1;
191 if (insert_shadow_region(vm, entry)) {
200 // This will unhook the memory hook registered at start address
201 // We do not support unhooking subregions
202 int v3_unhook_mem(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr_start) {
203 struct v3_shadow_region * reg = v3_get_shadow_region(vm, core_id, guest_addr_start);
205 if (!reg->flags.hook) {
206 PrintError("Trying to unhook a non hooked memory region (addr=%p)\n", (void *)guest_addr_start);
210 v3_delete_shadow_region(vm, reg);
218 struct v3_shadow_region * __insert_shadow_region(struct v3_vm_info * vm,
219 struct v3_shadow_region * region) {
220 struct rb_node ** p = &(vm->mem_map.shdw_regions.rb_node);
221 struct rb_node * parent = NULL;
222 struct v3_shadow_region * tmp_region;
226 tmp_region = rb_entry(parent, struct v3_shadow_region, tree_node);
228 if (region->guest_end <= tmp_region->guest_start) {
230 } else if (region->guest_start >= tmp_region->guest_end) {
233 if ((region->guest_end != tmp_region->guest_end) ||
234 (region->guest_start != tmp_region->guest_start)) {
235 PrintError("Trying to map a partial overlapped core specific page...\n");
236 return tmp_region; // This is ugly...
237 } else if (region->core_id == tmp_region->core_id) {
239 } else if (region->core_id < tmp_region->core_id) {
247 rb_link_node(&(region->tree_node), parent, p);
254 struct v3_shadow_region * insert_shadow_region(struct v3_vm_info * vm,
255 struct v3_shadow_region * region) {
256 struct v3_shadow_region * ret;
259 if ((ret = __insert_shadow_region(vm, region))) {
263 v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.shdw_regions));
267 for (i = 0; i < vm->num_cores; i++) {
268 struct guest_info * info = &(vm->cores[i]);
270 // flush virtual page tables
271 // 3 cases shadow, shadow passthrough, and nested
273 if (info->shdw_pg_mode == SHADOW_PAGING) {
274 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
276 if (mem_mode == PHYSICAL_MEM) {
279 for (cur_addr = region->guest_start;
280 cur_addr < region->guest_end;
281 cur_addr += PAGE_SIZE_4KB) {
282 v3_invalidate_passthrough_addr(info, cur_addr);
285 v3_invalidate_shadow_pts(info);
288 } else if (info->shdw_pg_mode == NESTED_PAGING) {
291 for (cur_addr = region->guest_start;
292 cur_addr < region->guest_end;
293 cur_addr += PAGE_SIZE_4KB) {
295 v3_invalidate_nested_addr(info, cur_addr);
308 int v3_handle_mem_hook(struct guest_info * info, addr_t guest_va, addr_t guest_pa,
309 struct v3_shadow_region * reg, pf_error_t access_info) {
313 if (reg->flags.alloced == 0) {
314 op_addr = get_hook_hva(info);
316 op_addr = (addr_t)V3_VAddr((void *)v3_get_shadow_addr(reg, info->cpu_id, guest_pa));
320 if (access_info.write == 1) {
323 if (v3_emulate_write_op(info, guest_va, guest_pa, op_addr,
324 reg->write_hook, reg->priv_data) == -1) {
325 PrintError("Write Full Hook emulation failed\n");
331 if (reg->flags.read == 1) {
332 PrintError("Tried to emulate read for a guest Readable page\n");
336 if (v3_emulate_read_op(info, guest_va, guest_pa, op_addr,
337 reg->read_hook, reg->write_hook,
338 reg->priv_data) == -1) {
339 PrintError("Read Full Hook emulation failed\n");
352 struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
353 struct rb_node * n = vm->mem_map.shdw_regions.rb_node;
354 struct v3_shadow_region * reg = NULL;
357 reg = rb_entry(n, struct v3_shadow_region, tree_node);
359 if (guest_addr < reg->guest_start) {
361 } else if (guest_addr >= reg->guest_end) {
364 if ((core_id == reg->core_id) ||
365 (reg->core_id == V3_MEM_CORE_ANY)) {
374 // There is not registered region, so we check if its a valid address in the base region
376 if (guest_addr > vm->mem_map.base_region.guest_end) {
377 PrintError("Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
378 (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
379 v3_print_mem_map(vm);
384 return &(vm->mem_map.base_region);
390 void v3_delete_shadow_region(struct v3_vm_info * vm, struct v3_shadow_region * reg) {
397 for (i = 0; i < vm->num_cores; i++) {
398 struct guest_info * info = &(vm->cores[i]);
400 // flush virtual page tables
401 // 3 cases shadow, shadow passthrough, and nested
403 if (info->shdw_pg_mode == SHADOW_PAGING) {
404 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
406 if (mem_mode == PHYSICAL_MEM) {
409 for (cur_addr = reg->guest_start;
410 cur_addr < reg->guest_end;
411 cur_addr += PAGE_SIZE_4KB) {
412 v3_invalidate_passthrough_addr(info, cur_addr);
415 v3_invalidate_shadow_pts(info);
418 } else if (info->shdw_pg_mode == NESTED_PAGING) {
421 for (cur_addr = reg->guest_start;
422 cur_addr < reg->guest_end;
423 cur_addr += PAGE_SIZE_4KB) {
425 v3_invalidate_nested_addr(info, cur_addr);
430 v3_rb_erase(&(reg->tree_node), &(vm->mem_map.shdw_regions));
434 // flush virtual page tables
435 // 3 cases shadow, shadow passthrough, and nested
442 addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, uint16_t core_id, addr_t guest_addr) {
443 if (reg && (reg->flags.alloced == 1)) {
444 return (guest_addr - reg->guest_start) + reg->host_addr;
446 // PrintError("MEM Region Invalid\n");
454 void v3_print_mem_map(struct v3_vm_info * vm) {
455 struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions));
456 struct v3_shadow_region * reg = &(vm->mem_map.base_region);
459 V3_Print("Memory Layout:\n");
462 V3_Print("Base Region: 0x%p - 0x%p -> 0x%p\n",
463 (void *)(reg->guest_start),
464 (void *)(reg->guest_end - 1),
465 (void *)(reg->host_addr));
468 // If the memory map is empty, don't print it
474 reg = rb_entry(node, struct v3_shadow_region, tree_node);
476 V3_Print("%d: 0x%p - 0x%p -> 0x%p\n", i,
477 (void *)(reg->guest_start),
478 (void *)(reg->guest_end - 1),
479 (void *)(reg->host_addr));
481 V3_Print("\t(flags=%x) (WriteHook = 0x%p) (ReadHook = 0x%p)\n",
483 (void *)(reg->write_hook),
484 (void *)(reg->read_hook));
487 } while ((node = v3_rb_next(node)));