X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_shadow_paging.c;h=9650f135cb0f528b44fd2bf35b405f22d430f93e;hb=a5d2c00cc461b4a60a1360a2a0bba55cef467bab;hp=d87ee112f198076ea78799ff1480eab202f65a7d;hpb=acaadd79c597c8d5180fbfbec79c01fef3dff003;p=palacios.git diff --git a/palacios/src/palacios/vmm_shadow_paging.c b/palacios/src/palacios/vmm_shadow_paging.c index d87ee11..9650f13 100644 --- a/palacios/src/palacios/vmm_shadow_paging.c +++ b/palacios/src/palacios/vmm_shadow_paging.c @@ -74,19 +74,26 @@ static int shdw_pg_eq_fn(addr_t key1, addr_t key2) { static int have_callbacks(struct guest_info *core) { + // lock acquistion unnecessary + // caller will acquire the lock before *iterating* through the list + // so any race will be resolved then return !list_empty(&(core->vm_info->shdw_impl.event_callback_list)); } static void dispatch_event(struct guest_info *core, struct v3_shdw_pg_event *event) { struct event_callback *cb,*temp; - + + v3_read_lock(&(core->vm_info->shdw_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(core->vm_info->shdw_impl.event_callback_list), node) { cb->callback(core,event,cb->priv_data); } + + v3_read_unlock(&(core->vm_info->shdw_impl.event_callback_lock)); } @@ -170,9 +177,15 @@ int v3_init_shdw_pg_state(struct guest_info * core) { int v3_deinit_shdw_pg_state(struct guest_info * core) { - struct v3_shdw_pg_impl * impl = core->vm_info->shdw_impl.current_impl; + struct v3_shdw_pg_impl * impl = NULL; + + if (!core || !core->vm_info) { + return -1; + } + + impl = core->vm_info->shdw_impl.current_impl; - if (impl->local_deinit(core) == -1) { + if (impl && impl->local_deinit(core) == -1) { PrintError(core->vm_info, core, "Error deinitializing shadow paging state\n"); return -1; } @@ -191,9 +204,27 @@ int v3_init_shdw_impl(struct v3_vm_info * vm) { struct v3_shdw_pg_impl * impl = NULL; PrintDebug(vm, VCORE_NONE, "Checking if shadow paging requested.\n"); - if ((pg_mode != NULL) && (strcasecmp(pg_mode, "nested") == 0)) { - PrintDebug(vm, VCORE_NONE, "Nested paging specified - not initializing shadow paging.\n"); - return 0; + if (pg_mode == NULL) { + V3_Print(vm, VCORE_NONE, "No paging mode specified, assuming shadow with defaults\n"); + pg_mode = "shadow"; + } else { + if (strcasecmp(pg_mode, "nested") == 0) { + // this check is repeated here (compare to vmm_config's determine paging mode) since + // shadow paging initialization *precedes* per-core pre-config. + extern v3_cpu_arch_t v3_mach_type; + if ((v3_mach_type == V3_SVM_REV3_CPU) || + (v3_mach_type == V3_VMX_EPT_CPU) || + (v3_mach_type == V3_VMX_EPT_UG_CPU)) { + PrintDebug(vm, VCORE_NONE, "Nested paging specified on machine that supports it - not initializing shadow paging\n"); + return 0; + } else { + V3_Print(vm, VCORE_NONE, "Nested paging specified but machine does not support it - falling back to shadow paging with defaults\n"); + pg_mode = "shadow"; + } + } else if (strcasecmp(pg_mode, "shadow") != 0) { + V3_Print(vm, VCORE_NONE, "Unknown paging mode '%s' specified - falling back to shadow paging with defaults\n",pg_mode); + pg_mode = "shadow"; + } } if (pg_strat == NULL) { @@ -210,6 +241,7 @@ int v3_init_shdw_impl(struct v3_vm_info * vm) { } INIT_LIST_HEAD(&(impl_state->event_callback_list)); + v3_rw_lock_init(&(impl_state->event_callback_lock)); impl_state->current_impl = impl; @@ -226,6 +258,7 @@ int v3_init_shdw_impl(struct v3_vm_info * vm) { int v3_deinit_shdw_impl(struct v3_vm_info * vm) { struct v3_shdw_pg_impl * impl = vm->shdw_impl.current_impl; struct event_callback *cb,*temp; + addr_t flags; if (impl == NULL) { // Shadow paging not implemented @@ -237,6 +270,8 @@ int v3_deinit_shdw_impl(struct v3_vm_info * vm) { return -1; } + flags=v3_write_lock_irqsave(&(vm->shdw_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(vm->shdw_impl.event_callback_list), @@ -245,6 +280,10 @@ int v3_deinit_shdw_impl(struct v3_vm_info * vm) { V3_Free(cb); } + v3_write_unlock_irqrestore(&(vm->shdw_impl.event_callback_lock),flags); + + v3_rw_lock_deinit(&(vm->shdw_impl.event_callback_lock)); + return 0; } @@ -311,7 +350,7 @@ int v3_handle_shadow_pagefault(struct guest_info * core, addr_t fault_addr, pf_e if (v3_get_vm_mem_mode(core) == PHYSICAL_MEM) { // If paging is not turned on we need to handle the special cases - rc = v3_handle_passthrough_pagefault(core, fault_addr, error_code); + rc = v3_handle_passthrough_pagefault(core, fault_addr, error_code,NULL,NULL); } else if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) { struct v3_shdw_impl_state * state = &(core->vm_info->shdw_impl); struct v3_shdw_pg_impl * impl = state->current_impl; @@ -454,6 +493,7 @@ int v3_register_shadow_paging_event_callback(struct v3_vm_info *vm, void *priv_data) { struct event_callback *ec = V3_Malloc(sizeof(struct event_callback)); + addr_t flags; if (!ec) { PrintError(vm, VCORE_NONE, "Unable to allocate for a shadow paging event callback\n"); @@ -463,7 +503,9 @@ int v3_register_shadow_paging_event_callback(struct v3_vm_info *vm, ec->callback = callback; ec->priv_data = priv_data; + flags=v3_write_lock_irqsave(&(vm->shdw_impl.event_callback_lock)); list_add(&(ec->node),&(vm->shdw_impl.event_callback_list)); + v3_write_unlock_irqrestore(&(vm->shdw_impl.event_callback_lock),flags); return 0; @@ -476,6 +518,9 @@ int v3_unregister_shadow_paging_event_callback(struct v3_vm_info *vm, void *priv_data) { struct event_callback *cb,*temp; + addr_t flags; + + flags=v3_write_lock_irqsave(&(vm->shdw_impl.event_callback_lock)); list_for_each_entry_safe(cb, temp, @@ -483,11 +528,14 @@ int v3_unregister_shadow_paging_event_callback(struct v3_vm_info *vm, node) { if ((callback == cb->callback) && (priv_data == cb->priv_data)) { list_del(&(cb->node)); + v3_write_unlock_irqrestore(&(vm->shdw_impl.event_callback_lock),flags); V3_Free(cb); return 0; } } + v3_write_unlock_irqrestore(&(vm->shdw_impl.event_callback_lock),flags); + PrintError(vm, VCORE_NONE, "No callback found!\n"); return -1;