X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_direct_paging.c;h=716e029f92a12617c80c66ce9f5c924e940bcfa8;hb=42b80fddfbfea5c5603b71d80d3e2ba2a53a2074;hp=a736571916a1f7bcf274e18aee1a147bad9b044c;hpb=894733a198529cfaf93f4f4b68b255906154ab56;p=palacios.git diff --git a/palacios/src/palacios/vmm_direct_paging.c b/palacios/src/palacios/vmm_direct_paging.c index a736571..716e029 100644 --- a/palacios/src/palacios/vmm_direct_paging.c +++ b/palacios/src/palacios/vmm_direct_paging.c @@ -99,19 +99,27 @@ struct passthrough_event_callback { static int have_passthrough_callbacks(struct guest_info *core) { + // lock acquistion unnecessary + // caller will acquire the lock before *iterating* through the list + // so any race will be resolved then return !list_empty(&(core->vm_info->passthrough_impl.event_callback_list)); } static void dispatch_passthrough_event(struct guest_info *core, struct v3_passthrough_pg_event *event) { struct passthrough_event_callback *cb,*temp; - + + v3_read_lock(&(core->vm_info->passthrough_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(core->vm_info->passthrough_impl.event_callback_list), node) { cb->callback(core,event,cb->priv_data); } + + v3_read_unlock(&(core->vm_info->passthrough_impl.event_callback_lock)); + } struct nested_event_callback { @@ -124,6 +132,9 @@ struct nested_event_callback { static int have_nested_callbacks(struct guest_info *core) { + // lock acquistion unnecessary + // caller will acquire the lock before *iterating* through the list + // so any race will be resolved then return !list_empty(&(core->vm_info->nested_impl.event_callback_list)); } @@ -131,12 +142,16 @@ static void dispatch_nested_event(struct guest_info *core, struct v3_nested_pg_e { struct nested_event_callback *cb,*temp; + v3_read_lock(&(core->vm_info->nested_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(core->vm_info->nested_impl.event_callback_list), node) { cb->callback(core,event,cb->priv_data); } + + v3_read_unlock(&(core->vm_info->nested_impl.event_callback_lock)); } @@ -146,7 +161,10 @@ static addr_t create_generic_pt_page(struct guest_info *core) { void * page = 0; void *temp; - temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, 0); // no constraints + temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, + core->resource_control.pg_node_id, + core->resource_control.pg_filter_func, + core->resource_control.pg_filter_state); if (!temp) { PrintError(VM_NONE, VCORE_NONE,"Cannot allocate page\n"); @@ -167,6 +185,10 @@ static addr_t create_generic_pt_page(struct guest_info *core) { int v3_init_passthrough_pts(struct guest_info * info) { + if (info->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) { + // skip - ept_init will do this allocation + return 0; + } info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page(info)); return 0; } @@ -175,6 +197,18 @@ int v3_init_passthrough_pts(struct guest_info * info) { int v3_free_passthrough_pts(struct guest_info * core) { v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core); + if (core->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) { + // there are no passthrough page tables, but + // the EPT implementation is using direct_map_pt to store + // the EPT root table pointer... and the EPT tables + // are not compatible with regular x86 tables, so we + // must not attempt to free them here... + return 0; + } + + // we are either in shadow or in SVM nested + // in either case, we can nuke the PTs + // Delete the old direct map page tables switch(mode) { case REAL: @@ -185,7 +219,9 @@ int v3_free_passthrough_pts(struct guest_info * core) { case LONG: case LONG_32_COMPAT: // Long mode will only use 32PAE page tables... - delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt))); + if (core->direct_map_pt) { + delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt))); + } break; default: PrintError(core->vm_info, core, "Unknown CPU Mode\n"); @@ -370,12 +406,21 @@ int v3_invalidate_passthrough_addr_range(struct guest_info * info, int v3_init_passthrough_paging(struct v3_vm_info *vm) { INIT_LIST_HEAD(&(vm->passthrough_impl.event_callback_list)); + v3_rw_lock_init(&(vm->passthrough_impl.event_callback_lock)); + vm->passthrough_impl.inited=1; return 0; } int v3_deinit_passthrough_paging(struct v3_vm_info *vm) { struct passthrough_event_callback *cb,*temp; + addr_t flags; + + if (!vm->passthrough_impl.inited) { + return 0; + } + + flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock)); list_for_each_entry_safe(cb, temp, @@ -384,6 +429,10 @@ int v3_deinit_passthrough_paging(struct v3_vm_info *vm) list_del(&(cb->node)); V3_Free(cb); } + + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + + v3_rw_lock_deinit(&(vm->passthrough_impl.event_callback_lock)); return 0; } @@ -401,6 +450,64 @@ int v3_deinit_passthrough_paging_core(struct guest_info *core) } +int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_passthrough_pg_event *, + void *priv_data), + void *priv_data) +{ + struct passthrough_event_callback *ec = V3_Malloc(sizeof(struct passthrough_event_callback)); + addr_t flags; + + if (!ec) { + PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n"); + return -1; + } + + ec->callback = callback; + ec->priv_data = priv_data; + + flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock)); + list_add(&(ec->node),&(vm->passthrough_impl.event_callback_list)); + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + + return 0; + +} + + + +int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_passthrough_pg_event *, + void *priv_data), + void *priv_data) +{ + struct passthrough_event_callback *cb,*temp; + addr_t flags; + + flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock)); + + list_for_each_entry_safe(cb, + temp, + &(vm->passthrough_impl.event_callback_list), + node) { + if ((callback == cb->callback) && (priv_data == cb->priv_data)) { + list_del(&(cb->node)); + V3_Free(cb); + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + return 0; + } + } + + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + + PrintError(vm, VCORE_NONE, "No callback found!\n"); + + return -1; +} + + // inline nested paging support for Intel and AMD #include "svm_npt.h" #include "vmx_npt.h" @@ -516,6 +623,8 @@ int v3_invalidate_nested_addr_range(struct guest_info * info, int v3_init_nested_paging(struct v3_vm_info *vm) { INIT_LIST_HEAD(&(vm->nested_impl.event_callback_list)); + v3_rw_lock_init(&(vm->nested_impl.event_callback_lock)); + vm->nested_impl.inited=1; return 0; } @@ -525,6 +634,8 @@ int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo) return init_ept(core, (struct vmx_hw_info *) hwinfo); } else { // no initialization for SVM + // the direct map page tables are used since the + // nested pt format is identical to the main pt format return 0; } } @@ -532,7 +643,14 @@ int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo) int v3_deinit_nested_paging(struct v3_vm_info *vm) { struct nested_event_callback *cb,*temp; + addr_t flags; + if (!vm->nested_impl.inited) { + return 0; + } + + flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(vm->nested_impl.event_callback_list), @@ -541,12 +659,82 @@ int v3_deinit_nested_paging(struct v3_vm_info *vm) V3_Free(cb); } + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + + v3_rw_lock_deinit(&(vm->nested_impl.event_callback_lock)); + return 0; } int v3_deinit_nested_paging_core(struct guest_info *core) { - // nothing to do.. probably dealloc? FIXME PAD + if (core->shdw_pg_mode == NESTED_PAGING) { + if (is_vmx_nested()) { + return deinit_ept(core); + } else { + // SVM nested deinit is handled by the passthrough paging teardown + return 0; + } + } else { + // not relevant + return 0; + } +} - return 0; + +int v3_register_nested_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_nested_pg_event *, + void *priv_data), + void *priv_data) +{ + struct nested_event_callback *ec = V3_Malloc(sizeof(struct nested_event_callback)); + addr_t flags; + + if (!ec) { + PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n"); + return -1; + } + + ec->callback = callback; + ec->priv_data = priv_data; + + flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock)); + list_add(&(ec->node),&(vm->nested_impl.event_callback_list)); + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + + return 0; + +} + + + +int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_nested_pg_event *, + void *priv_data), + void *priv_data) +{ + struct nested_event_callback *cb,*temp; + addr_t flags; + + flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock)); + + list_for_each_entry_safe(cb, + temp, + &(vm->nested_impl.event_callback_list), + node) { + if ((callback == cb->callback) && (priv_data == cb->priv_data)) { + list_del(&(cb->node)); + V3_Free(cb); + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + return 0; + } + } + + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + + PrintError(vm, VCORE_NONE, "No callback found!\n"); + + return -1; }