X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_direct_paging.c;h=2884923c26010e21b9363aec27a230802bdc4401;hb=85c259546c85d19af43b443f9724c44caffb9b20;hp=a736571916a1f7bcf274e18aee1a147bad9b044c;hpb=894733a198529cfaf93f4f4b68b255906154ab56;p=palacios.git diff --git a/palacios/src/palacios/vmm_direct_paging.c b/palacios/src/palacios/vmm_direct_paging.c index a736571..2884923 100644 --- a/palacios/src/palacios/vmm_direct_paging.c +++ b/palacios/src/palacios/vmm_direct_paging.c @@ -99,19 +99,27 @@ struct passthrough_event_callback { static int have_passthrough_callbacks(struct guest_info *core) { + // lock acquistion unnecessary + // caller will acquire the lock before *iterating* through the list + // so any race will be resolved then return !list_empty(&(core->vm_info->passthrough_impl.event_callback_list)); } static void dispatch_passthrough_event(struct guest_info *core, struct v3_passthrough_pg_event *event) { struct passthrough_event_callback *cb,*temp; - + + v3_read_lock(&(core->vm_info->passthrough_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(core->vm_info->passthrough_impl.event_callback_list), node) { cb->callback(core,event,cb->priv_data); } + + v3_read_unlock(&(core->vm_info->passthrough_impl.event_callback_lock)); + } struct nested_event_callback { @@ -124,6 +132,9 @@ struct nested_event_callback { static int have_nested_callbacks(struct guest_info *core) { + // lock acquistion unnecessary + // caller will acquire the lock before *iterating* through the list + // so any race will be resolved then return !list_empty(&(core->vm_info->nested_impl.event_callback_list)); } @@ -131,12 +142,16 @@ static void dispatch_nested_event(struct guest_info *core, struct v3_nested_pg_e { struct nested_event_callback *cb,*temp; + v3_read_lock(&(core->vm_info->nested_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(core->vm_info->nested_impl.event_callback_list), node) { cb->callback(core,event,cb->priv_data); } + + v3_read_unlock(&(core->vm_info->nested_impl.event_callback_lock)); } @@ -185,7 +200,9 @@ int v3_free_passthrough_pts(struct guest_info * core) { case LONG: case LONG_32_COMPAT: // Long mode will only use 32PAE page tables... - delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt))); + if (core->direct_map_pt) { + delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt))); + } break; default: PrintError(core->vm_info, core, "Unknown CPU Mode\n"); @@ -370,12 +387,16 @@ int v3_invalidate_passthrough_addr_range(struct guest_info * info, int v3_init_passthrough_paging(struct v3_vm_info *vm) { INIT_LIST_HEAD(&(vm->passthrough_impl.event_callback_list)); + v3_rw_lock_init(&(vm->passthrough_impl.event_callback_lock)); return 0; } int v3_deinit_passthrough_paging(struct v3_vm_info *vm) { struct passthrough_event_callback *cb,*temp; + addr_t flags; + + flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock)); list_for_each_entry_safe(cb, temp, @@ -384,6 +405,10 @@ int v3_deinit_passthrough_paging(struct v3_vm_info *vm) list_del(&(cb->node)); V3_Free(cb); } + + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + + v3_rw_lock_deinit(&(vm->passthrough_impl.event_callback_lock)); return 0; } @@ -401,6 +426,64 @@ int v3_deinit_passthrough_paging_core(struct guest_info *core) } +int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_passthrough_pg_event *, + void *priv_data), + void *priv_data) +{ + struct passthrough_event_callback *ec = V3_Malloc(sizeof(struct passthrough_event_callback)); + addr_t flags; + + if (!ec) { + PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n"); + return -1; + } + + ec->callback = callback; + ec->priv_data = priv_data; + + flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock)); + list_add(&(ec->node),&(vm->passthrough_impl.event_callback_list)); + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + + return 0; + +} + + + +int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_passthrough_pg_event *, + void *priv_data), + void *priv_data) +{ + struct passthrough_event_callback *cb,*temp; + addr_t flags; + + flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock)); + + list_for_each_entry_safe(cb, + temp, + &(vm->passthrough_impl.event_callback_list), + node) { + if ((callback == cb->callback) && (priv_data == cb->priv_data)) { + list_del(&(cb->node)); + V3_Free(cb); + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + return 0; + } + } + + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + + PrintError(vm, VCORE_NONE, "No callback found!\n"); + + return -1; +} + + // inline nested paging support for Intel and AMD #include "svm_npt.h" #include "vmx_npt.h" @@ -516,6 +599,7 @@ int v3_invalidate_nested_addr_range(struct guest_info * info, int v3_init_nested_paging(struct v3_vm_info *vm) { INIT_LIST_HEAD(&(vm->nested_impl.event_callback_list)); + v3_rw_lock_init(&(vm->nested_impl.event_callback_lock)); return 0; } @@ -532,7 +616,10 @@ int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo) int v3_deinit_nested_paging(struct v3_vm_info *vm) { struct nested_event_callback *cb,*temp; + addr_t flags; + flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(vm->nested_impl.event_callback_list), @@ -541,6 +628,10 @@ int v3_deinit_nested_paging(struct v3_vm_info *vm) V3_Free(cb); } + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + + v3_rw_lock_deinit(&(vm->nested_impl.event_callback_lock)); + return 0; } @@ -550,3 +641,61 @@ int v3_deinit_nested_paging_core(struct guest_info *core) return 0; } + + +int v3_register_nested_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_nested_pg_event *, + void *priv_data), + void *priv_data) +{ + struct nested_event_callback *ec = V3_Malloc(sizeof(struct nested_event_callback)); + addr_t flags; + + if (!ec) { + PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n"); + return -1; + } + + ec->callback = callback; + ec->priv_data = priv_data; + + flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock)); + list_add(&(ec->node),&(vm->nested_impl.event_callback_list)); + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + + return 0; + +} + + + +int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_nested_pg_event *, + void *priv_data), + void *priv_data) +{ + struct nested_event_callback *cb,*temp; + addr_t flags; + + flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock)); + + list_for_each_entry_safe(cb, + temp, + &(vm->nested_impl.event_callback_list), + node) { + if ((callback == cb->callback) && (priv_data == cb->priv_data)) { + list_del(&(cb->node)); + V3_Free(cb); + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + return 0; + } + } + + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + + PrintError(vm, VCORE_NONE, "No callback found!\n"); + + return -1; +}