X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_mem.c;h=659fd279b8471d60ef3c5750c64d47b2bc7ccf9a;hb=b06d0d0e5e57f5c4163a69efcabe6f96594ad814;hp=c24479363dccaaafd0279e3ea4101f711ea41f62;hpb=d3c1f093de953d47c55c11692f27aa2220c840b4;p=palacios.git diff --git a/palacios/src/palacios/vmm_mem.c b/palacios/src/palacios/vmm_mem.c index c244793..659fd27 100644 --- a/palacios/src/palacios/vmm_mem.c +++ b/palacios/src/palacios/vmm_mem.c @@ -20,12 +20,13 @@ #include #include #include -//#include #include +#include +#include - +#define MEM_OFFSET_HCALL 0x1000 static inline @@ -33,13 +34,27 @@ struct v3_shadow_region * insert_shadow_region(struct guest_info * info, struct v3_shadow_region * region); +static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) { + info->vm_regs.rbx = info->mem_map.base_region.host_addr; + + return 0; +} + void v3_init_shadow_map(struct guest_info * info) { v3_shdw_map_t * map = &(info->mem_map); + addr_t mem_pages = info->mem_size >> 12; map->shdw_regions.rb_node = NULL; map->hook_hva = (addr_t)V3_VAddr(V3_AllocPages(1)); + // There is an underlying region that contains all of the guest memory + map->base_region.guest_start = 0; + map->base_region.guest_end = info->mem_size; + map->base_region.host_type = SHDW_REGION_ALLOCATED; + map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages); + + v3_register_hypercall(info, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL); } void v3_delete_shadow_map(struct guest_info * info) { @@ -54,6 +69,9 @@ void v3_delete_shadow_map(struct guest_info * info) { v3_delete_shadow_region(info, reg); } + + V3_FreePage((void *)(info->mem_map.base_region.host_addr)); + V3_FreePage(V3_PAddr((void *)(info->mem_map.hook_hva))); } @@ -171,13 +189,39 @@ struct v3_shadow_region * insert_shadow_region(struct guest_info * info, v3_rb_insert_color(&(region->tree_node), &(info->mem_map.shdw_regions)); - return NULL; -} - + // flush virtual page tables + // 3 cases shadow, shadow passthrough, and nested + if (info->shdw_pg_mode == SHADOW_PAGING) { + v3_vm_mem_mode_t mem_mode = v3_get_mem_mode(info); + + if (mem_mode == PHYSICAL_MEM) { + addr_t cur_addr; + + for (cur_addr = region->guest_start; + cur_addr < region->guest_end; + cur_addr += PAGE_SIZE_4KB) { + v3_invalidate_passthrough_addr(info, cur_addr); + } + } else { + v3_invalidate_shadow_pts(info); + } + + } else if (info->shdw_pg_mode == NESTED_PAGING) { + addr_t cur_addr; + for (cur_addr = region->guest_start; + cur_addr < region->guest_end; + cur_addr += PAGE_SIZE_4KB) { + + v3_invalidate_nested_addr(info, cur_addr); + } + } + return NULL; +} + @@ -207,7 +251,8 @@ int v3_handle_mem_wr_hook(struct guest_info * info, addr_t guest_va, addr_t gues addr_t dst_addr = (addr_t)V3_VAddr((void *)v3_get_shadow_addr(reg, guest_pa)); - if (v3_emulate_write_op(info, guest_va, guest_pa, dst_addr, reg->write_hook, reg->priv_data) == -1) { + if (v3_emulate_write_op(info, guest_va, guest_pa, dst_addr, + reg->write_hook, reg->priv_data) == -1) { PrintError("Write hook emulation failed\n"); return -1; } @@ -221,12 +266,15 @@ int v3_handle_mem_full_hook(struct guest_info * info, addr_t guest_va, addr_t gu addr_t op_addr = info->mem_map.hook_hva; if (access_info.write == 1) { - if (v3_emulate_write_op(info, guest_va, guest_pa, op_addr, reg->write_hook, reg->priv_data) == -1) { + if (v3_emulate_write_op(info, guest_va, guest_pa, op_addr, + reg->write_hook, reg->priv_data) == -1) { PrintError("Write Full Hook emulation failed\n"); return -1; } } else { - if (v3_emulate_read_op(info, guest_va, guest_pa, op_addr, reg->read_hook, reg->write_hook, reg->priv_data) == -1) { + if (v3_emulate_read_op(info, guest_va, guest_pa, op_addr, + reg->read_hook, reg->write_hook, + reg->priv_data) == -1) { PrintError("Read Full Hook emulation failed\n"); return -1; } @@ -253,42 +301,91 @@ struct v3_shadow_region * v3_get_shadow_region(struct guest_info * info, addr_t } } - return NULL; + + // There is not registered region, so we check if its a valid address in the base region + + if (guest_addr > info->mem_map.base_region.guest_end) { + PrintError("Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n", + (void *)guest_addr, (void *)info->mem_map.base_region.guest_end); + return NULL; + } + + return &(info->mem_map.base_region); } +void v3_delete_shadow_region(struct guest_info * info, struct v3_shadow_region * reg) { + if (reg == NULL) { + return; + } -addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, addr_t guest_addr) { - if ( (reg) && - (reg->host_type != SHDW_REGION_FULL_HOOK) && - (reg->host_type != SHDW_REGION_INVALID) ) { - return (guest_addr - reg->guest_start) + reg->host_addr; - } else { - PrintDebug("MEM Region Invalid\n"); - return 0; + // flush virtual page tables + // 3 cases shadow, shadow passthrough, and nested + if (info->shdw_pg_mode == SHADOW_PAGING) { + v3_vm_mem_mode_t mem_mode = v3_get_mem_mode(info); + + if (mem_mode == PHYSICAL_MEM) { + addr_t cur_addr; + + for (cur_addr = reg->guest_start; + cur_addr < reg->guest_end; + cur_addr += PAGE_SIZE_4KB) { + v3_invalidate_passthrough_addr(info, cur_addr); + } + } else { + v3_invalidate_shadow_pts(info); + } + + } else if (info->shdw_pg_mode == NESTED_PAGING) { + addr_t cur_addr; + + for (cur_addr = reg->guest_start; + cur_addr < reg->guest_end; + cur_addr += PAGE_SIZE_4KB) { + + v3_invalidate_nested_addr(info, cur_addr); + } } + + + v3_rb_erase(&(reg->tree_node), &(info->mem_map.shdw_regions)); + + V3_Free(reg); + + // flush virtual page tables + // 3 cases shadow, shadow passthrough, and nested + } -void v3_delete_shadow_region(struct guest_info * info, struct v3_shadow_region * reg) { - if (reg != NULL) { - v3_rb_erase(&(reg->tree_node), &(info->mem_map.shdw_regions)); - V3_Free(reg); +addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, addr_t guest_addr) { + if ( (reg) && + (reg->host_type != SHDW_REGION_FULL_HOOK)) { + return (guest_addr - reg->guest_start) + reg->host_addr; + } else { + PrintDebug("MEM Region Invalid\n"); + return 0; } -} +} void print_shadow_map(struct guest_info * info) { struct rb_node * node = v3_rb_first(&(info->mem_map.shdw_regions)); - struct v3_shadow_region * reg; + struct v3_shadow_region * reg = &(info->mem_map.base_region); int i = 0; PrintDebug("Memory Layout:\n"); + + PrintDebug("Base Region: 0x%p - 0x%p -> 0x%p\n", + (void *)(reg->guest_start), + (void *)(reg->guest_end - 1), + (void *)(reg->host_addr)); + do { reg = rb_entry(node, struct v3_shadow_region, tree_node); @@ -307,13 +404,10 @@ void print_shadow_map(struct guest_info * info) { } -static const uchar_t SHDW_REGION_INVALID_STR[] = "SHDW_REGION_INVALID"; static const uchar_t SHDW_REGION_WRITE_HOOK_STR[] = "SHDW_REGION_WRITE_HOOK"; static const uchar_t SHDW_REGION_FULL_HOOK_STR[] = "SHDW_REGION_FULL_HOOK"; static const uchar_t SHDW_REGION_ALLOCATED_STR[] = "SHDW_REGION_ALLOCATED"; - - const uchar_t * v3_shdw_region_type_to_str(v3_shdw_region_type_t type) { switch (type) { case SHDW_REGION_WRITE_HOOK: @@ -323,7 +417,7 @@ const uchar_t * v3_shdw_region_type_to_str(v3_shdw_region_type_t type) { case SHDW_REGION_ALLOCATED: return SHDW_REGION_ALLOCATED_STR; default: - return SHDW_REGION_INVALID_STR; + return (uchar_t *)"SHDW_REGION_INVALID"; } }