2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_mem.h>
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_util.h>
23 #include <palacios/vmm_emulator.h>
24 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_shadow_paging.h>
27 #include <palacios/vmm_direct_paging.h>
29 #define MEM_OFFSET_HCALL 0x1000
33 struct v3_shadow_region * insert_shadow_region(struct v3_vm_info * vm,
34 struct v3_shadow_region * region);
37 static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
38 PrintDebug("V3Vee: Memory offset hypercall (offset=%p)\n",
39 (void *)(info->vm_info->mem_map.base_region.host_addr));
41 info->vm_regs.rbx = info->vm_info->mem_map.base_region.host_addr;
47 int v3_init_mem_map(struct v3_vm_info * vm) {
48 struct v3_mem_map * map = &(vm->mem_map);
49 addr_t mem_pages = vm->mem_size >> 12;
51 map->shdw_regions.rb_node = NULL;
54 map->hook_hvas = V3_VAddr(V3_AllocPages(vm->num_cores));
57 // There is an underlying region that contains all of the guest memory
58 // PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
60 map->base_region.guest_start = 0;
61 map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
62 map->base_region.host_type = SHDW_REGION_ALLOCATED;
63 map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
66 if ((void *)map->base_region.host_addr == NULL) {
67 PrintError("Could not allocate Guest memory\n");
71 //memset(V3_VAddr((void *)map->base_region.host_addr), 0xffffffff, map->base_region.guest_end);
73 v3_register_hypercall(vm, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
79 static inline addr_t get_hook_hva(struct guest_info * info) {
80 return (addr_t)(info->vm_info->mem_map.hook_hvas + (PAGE_SIZE_4KB * info->cpu_id));
83 void v3_delete_shadow_map(struct v3_vm_info * vm) {
84 struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions));
85 struct v3_shadow_region * reg;
86 struct rb_node * tmp_node = NULL;
89 reg = rb_entry(node, struct v3_shadow_region, tree_node);
91 node = v3_rb_next(node);
93 v3_delete_shadow_region(vm, reg);
96 V3_FreePage((void *)(vm->mem_map.base_region.host_addr));
97 V3_FreePage(V3_PAddr((void *)(vm->mem_map.hook_hvas)));
103 int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id,
104 addr_t guest_addr_start,
105 addr_t guest_addr_end,
108 struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
110 entry->guest_start = guest_addr_start;
111 entry->guest_end = guest_addr_end;
112 entry->host_type = SHDW_REGION_ALLOCATED;
113 entry->host_addr = host_addr;
114 entry->write_hook = NULL;
115 entry->read_hook = NULL;
116 entry->priv_data = NULL;
117 entry->core_id = core_id;
119 if (insert_shadow_region(vm, entry)) {
129 int v3_hook_write_mem(struct v3_vm_info * vm, uint16_t core_id,
130 addr_t guest_addr_start, addr_t guest_addr_end, addr_t host_addr,
131 int (*write)(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data),
134 struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
137 entry->guest_start = guest_addr_start;
138 entry->guest_end = guest_addr_end;
139 entry->host_type = SHDW_REGION_WRITE_HOOK;
140 entry->host_addr = host_addr;
141 entry->write_hook = write;
142 entry->read_hook = NULL;
143 entry->priv_data = priv_data;
144 entry->core_id = core_id;
146 if (insert_shadow_region(vm, entry)) {
154 int v3_hook_full_mem(struct v3_vm_info * vm, uint16_t core_id,
155 addr_t guest_addr_start, addr_t guest_addr_end,
156 int (*read)(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data),
157 int (*write)(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data),
160 struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
162 entry->guest_start = guest_addr_start;
163 entry->guest_end = guest_addr_end;
164 entry->host_type = SHDW_REGION_FULL_HOOK;
165 entry->host_addr = (addr_t)NULL;
166 entry->write_hook = write;
167 entry->read_hook = read;
168 entry->priv_data = priv_data;
169 entry->core_id = core_id;
171 if (insert_shadow_region(vm, entry)) {
180 // This will unhook the memory hook registered at start address
181 // We do not support unhooking subregions
182 int v3_unhook_mem(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr_start) {
183 struct v3_shadow_region * reg = v3_get_shadow_region(vm, core_id, guest_addr_start);
185 if ((reg->host_type != SHDW_REGION_FULL_HOOK) ||
186 (reg->host_type != SHDW_REGION_WRITE_HOOK)) {
187 PrintError("Trying to unhook a non hooked memory region (addr=%p)\n", (void *)guest_addr_start);
191 v3_delete_shadow_region(vm, reg);
199 struct v3_shadow_region * __insert_shadow_region(struct v3_vm_info * vm,
200 struct v3_shadow_region * region) {
201 struct rb_node ** p = &(vm->mem_map.shdw_regions.rb_node);
202 struct rb_node * parent = NULL;
203 struct v3_shadow_region * tmp_region;
207 tmp_region = rb_entry(parent, struct v3_shadow_region, tree_node);
209 if (region->guest_end <= tmp_region->guest_start) {
211 } else if (region->guest_start >= tmp_region->guest_end) {
214 if ((region->guest_end != tmp_region->guest_end) ||
215 (region->guest_start != tmp_region->guest_start)) {
216 PrintError("Trying to map a partial overlapped core specific page...\n");
217 return tmp_region; // This is ugly...
218 } else if (region->core_id == tmp_region->core_id) {
220 } else if (region->core_id < tmp_region->core_id) {
228 rb_link_node(&(region->tree_node), parent, p);
235 struct v3_shadow_region * insert_shadow_region(struct v3_vm_info * vm,
236 struct v3_shadow_region * region) {
237 struct v3_shadow_region * ret;
240 if ((ret = __insert_shadow_region(vm, region))) {
244 v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.shdw_regions));
248 for (i = 0; i < vm->num_cores; i++) {
249 struct guest_info * info = &(vm->cores[i]);
251 // flush virtual page tables
252 // 3 cases shadow, shadow passthrough, and nested
254 if (info->shdw_pg_mode == SHADOW_PAGING) {
255 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
257 if (mem_mode == PHYSICAL_MEM) {
260 for (cur_addr = region->guest_start;
261 cur_addr < region->guest_end;
262 cur_addr += PAGE_SIZE_4KB) {
263 v3_invalidate_passthrough_addr(info, cur_addr);
266 v3_invalidate_shadow_pts(info);
269 } else if (info->shdw_pg_mode == NESTED_PAGING) {
272 for (cur_addr = region->guest_start;
273 cur_addr < region->guest_end;
274 cur_addr += PAGE_SIZE_4KB) {
276 v3_invalidate_nested_addr(info, cur_addr);
287 int handle_special_page_fault(struct guest_info * info,
288 addr_t fault_gva, addr_t fault_gpa, pf_error_t access_info)
290 struct v3_shadow_region * reg = v3_get_shadow_region(info->vm_info, info->cpu_id, fault_gpa);
292 PrintDebug("Handling Special Page Fault\n");
294 switch (reg->host_type) {
295 case SHDW_REGION_WRITE_HOOK:
296 return v3_handle_mem_wr_hook(info, fault_gva, fault_gpa, reg, access_info);
297 case SHDW_REGION_FULL_HOOK:
298 return v3_handle_mem_full_hook(info, fault_gva, fault_gpa, reg, access_info);
307 int v3_handle_mem_wr_hook(struct guest_info * info, addr_t guest_va, addr_t guest_pa,
308 struct v3_shadow_region * reg, pf_error_t access_info) {
310 addr_t dst_addr = (addr_t)V3_VAddr((void *)v3_get_shadow_addr(reg, info->cpu_id, guest_pa));
312 if (v3_emulate_write_op(info, guest_va, guest_pa, dst_addr,
313 reg->write_hook, reg->priv_data) == -1) {
314 PrintError("Write hook emulation failed\n");
321 int v3_handle_mem_full_hook(struct guest_info * info, addr_t guest_va, addr_t guest_pa,
322 struct v3_shadow_region * reg, pf_error_t access_info) {
324 addr_t op_addr = get_hook_hva(info);
326 if (access_info.write == 1) {
327 if (v3_emulate_write_op(info, guest_va, guest_pa, op_addr,
328 reg->write_hook, reg->priv_data) == -1) {
329 PrintError("Write Full Hook emulation failed\n");
333 if (v3_emulate_read_op(info, guest_va, guest_pa, op_addr,
334 reg->read_hook, reg->write_hook,
335 reg->priv_data) == -1) {
336 PrintError("Read Full Hook emulation failed\n");
346 struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
347 struct rb_node * n = vm->mem_map.shdw_regions.rb_node;
348 struct v3_shadow_region * reg = NULL;
351 reg = rb_entry(n, struct v3_shadow_region, tree_node);
353 if (guest_addr < reg->guest_start) {
355 } else if (guest_addr >= reg->guest_end) {
358 if ((core_id == reg->core_id) ||
359 (reg->core_id == V3_MEM_CORE_ANY)) {
368 // There is not registered region, so we check if its a valid address in the base region
370 if (guest_addr > vm->mem_map.base_region.guest_end) {
371 PrintError("Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
372 (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
373 v3_print_mem_map(vm);
378 return &(vm->mem_map.base_region);
382 void v3_delete_shadow_region(struct v3_vm_info * vm, struct v3_shadow_region * reg) {
389 for (i = 0; i < vm->num_cores; i++) {
390 struct guest_info * info = &(vm->cores[i]);
392 // flush virtual page tables
393 // 3 cases shadow, shadow passthrough, and nested
395 if (info->shdw_pg_mode == SHADOW_PAGING) {
396 v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
398 if (mem_mode == PHYSICAL_MEM) {
401 for (cur_addr = reg->guest_start;
402 cur_addr < reg->guest_end;
403 cur_addr += PAGE_SIZE_4KB) {
404 v3_invalidate_passthrough_addr(info, cur_addr);
407 v3_invalidate_shadow_pts(info);
410 } else if (info->shdw_pg_mode == NESTED_PAGING) {
413 for (cur_addr = reg->guest_start;
414 cur_addr < reg->guest_end;
415 cur_addr += PAGE_SIZE_4KB) {
417 v3_invalidate_nested_addr(info, cur_addr);
422 v3_rb_erase(&(reg->tree_node), &(vm->mem_map.shdw_regions));
426 // flush virtual page tables
427 // 3 cases shadow, shadow passthrough, and nested
434 addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, uint16_t core_id, addr_t guest_addr) {
436 (reg->host_type != SHDW_REGION_FULL_HOOK)) {
437 return (guest_addr - reg->guest_start) + reg->host_addr;
439 // PrintError("MEM Region Invalid\n");
447 void v3_print_mem_map(struct v3_vm_info * vm) {
448 struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions));
449 struct v3_shadow_region * reg = &(vm->mem_map.base_region);
452 V3_Print("Memory Layout:\n");
455 V3_Print("Base Region: 0x%p - 0x%p -> 0x%p\n",
456 (void *)(reg->guest_start),
457 (void *)(reg->guest_end - 1),
458 (void *)(reg->host_addr));
461 // If the memory map is empty, don't print it
467 reg = rb_entry(node, struct v3_shadow_region, tree_node);
469 V3_Print("%d: 0x%p - 0x%p -> 0x%p\n", i,
470 (void *)(reg->guest_start),
471 (void *)(reg->guest_end - 1),
472 (void *)(reg->host_addr));
474 V3_Print("\t(%s) (WriteHook = 0x%p) (ReadHook = 0x%p)\n",
475 v3_shdw_region_type_to_str(reg->host_type),
476 (void *)(reg->write_hook),
477 (void *)(reg->read_hook));
480 } while ((node = v3_rb_next(node)));
484 static const uchar_t SHDW_REGION_WRITE_HOOK_STR[] = "SHDW_REGION_WRITE_HOOK";
485 static const uchar_t SHDW_REGION_FULL_HOOK_STR[] = "SHDW_REGION_FULL_HOOK";
486 static const uchar_t SHDW_REGION_ALLOCATED_STR[] = "SHDW_REGION_ALLOCATED";
488 const uchar_t * v3_shdw_region_type_to_str(v3_shdw_region_type_t type) {
490 case SHDW_REGION_WRITE_HOOK:
491 return SHDW_REGION_WRITE_HOOK_STR;
492 case SHDW_REGION_FULL_HOOK:
493 return SHDW_REGION_FULL_HOOK_STR;
494 case SHDW_REGION_ALLOCATED:
495 return SHDW_REGION_ALLOCATED_STR;
497 return (uchar_t *)"SHDW_REGION_INVALID";