X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_direct_paging.c;h=1175b2560c05c176e755212cd3ff49630a7d82a9;hb=b58fe2254858e3ecc94be5d86f2a93f2cfe0a0d5;hp=e2fad695db4ee8d68bee82dec2f1f8dafd633c94;hpb=ce0f119828348c3c57a00c4aa268a8a223ccd7f8;p=palacios.git diff --git a/palacios/src/palacios/vmm_direct_paging.c b/palacios/src/palacios/vmm_direct_paging.c index e2fad69..1175b25 100644 --- a/palacios/src/palacios/vmm_direct_paging.c +++ b/palacios/src/palacios/vmm_direct_paging.c @@ -99,19 +99,27 @@ struct passthrough_event_callback { static int have_passthrough_callbacks(struct guest_info *core) { + // lock acquistion unnecessary + // caller will acquire the lock before *iterating* through the list + // so any race will be resolved then return !list_empty(&(core->vm_info->passthrough_impl.event_callback_list)); } static void dispatch_passthrough_event(struct guest_info *core, struct v3_passthrough_pg_event *event) { struct passthrough_event_callback *cb,*temp; - + + v3_read_lock(&(core->vm_info->passthrough_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(core->vm_info->passthrough_impl.event_callback_list), node) { cb->callback(core,event,cb->priv_data); } + + v3_read_unlock(&(core->vm_info->passthrough_impl.event_callback_lock)); + } struct nested_event_callback { @@ -124,6 +132,9 @@ struct nested_event_callback { static int have_nested_callbacks(struct guest_info *core) { + // lock acquistion unnecessary + // caller will acquire the lock before *iterating* through the list + // so any race will be resolved then return !list_empty(&(core->vm_info->nested_impl.event_callback_list)); } @@ -131,12 +142,16 @@ static void dispatch_nested_event(struct guest_info *core, struct v3_nested_pg_e { struct nested_event_callback *cb,*temp; + v3_read_lock(&(core->vm_info->nested_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(core->vm_info->nested_impl.event_callback_list), node) { cb->callback(core,event,cb->priv_data); } + + v3_read_unlock(&(core->vm_info->nested_impl.event_callback_lock)); } @@ -146,7 +161,7 @@ static addr_t create_generic_pt_page(struct guest_info *core) { void * page = 0; void *temp; - temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, 0); // no constraints + temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, 0, 0); // no constraints if (!temp) { PrintError(VM_NONE, VCORE_NONE,"Cannot allocate page\n"); @@ -167,6 +182,10 @@ static addr_t create_generic_pt_page(struct guest_info *core) { int v3_init_passthrough_pts(struct guest_info * info) { + if (info->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) { + // skip - ept_init will do this allocation + return 0; + } info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page(info)); return 0; } @@ -175,6 +194,18 @@ int v3_init_passthrough_pts(struct guest_info * info) { int v3_free_passthrough_pts(struct guest_info * core) { v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core); + if (core->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) { + // there are no passthrough page tables, but + // the EPT implementation is using direct_map_pt to store + // the EPT root table pointer... and the EPT tables + // are not compatible with regular x86 tables, so we + // must not attempt to free them here... + return 0; + } + + // we are either in shadow or in SVM nested + // in either case, we can nuke the PTs + // Delete the old direct map page tables switch(mode) { case REAL: @@ -185,7 +216,9 @@ int v3_free_passthrough_pts(struct guest_info * core) { case LONG: case LONG_32_COMPAT: // Long mode will only use 32PAE page tables... - delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt))); + if (core->direct_map_pt) { + delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt))); + } break; default: PrintError(core->vm_info, core, "Unknown CPU Mode\n"); @@ -370,12 +403,16 @@ int v3_invalidate_passthrough_addr_range(struct guest_info * info, int v3_init_passthrough_paging(struct v3_vm_info *vm) { INIT_LIST_HEAD(&(vm->passthrough_impl.event_callback_list)); + v3_rw_lock_init(&(vm->passthrough_impl.event_callback_lock)); return 0; } int v3_deinit_passthrough_paging(struct v3_vm_info *vm) { struct passthrough_event_callback *cb,*temp; + addr_t flags; + + flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock)); list_for_each_entry_safe(cb, temp, @@ -384,6 +421,10 @@ int v3_deinit_passthrough_paging(struct v3_vm_info *vm) list_del(&(cb->node)); V3_Free(cb); } + + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + + v3_rw_lock_deinit(&(vm->passthrough_impl.event_callback_lock)); return 0; } @@ -408,6 +449,7 @@ int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm, void *priv_data) { struct passthrough_event_callback *ec = V3_Malloc(sizeof(struct passthrough_event_callback)); + addr_t flags; if (!ec) { PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n"); @@ -417,7 +459,9 @@ int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm, ec->callback = callback; ec->priv_data = priv_data; + flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock)); list_add(&(ec->node),&(vm->passthrough_impl.event_callback_list)); + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); return 0; @@ -432,7 +476,10 @@ int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm, void *priv_data) { struct passthrough_event_callback *cb,*temp; - + addr_t flags; + + flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(vm->passthrough_impl.event_callback_list), @@ -440,10 +487,13 @@ int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm, if ((callback == cb->callback) && (priv_data == cb->priv_data)) { list_del(&(cb->node)); V3_Free(cb); + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); return 0; } } + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + PrintError(vm, VCORE_NONE, "No callback found!\n"); return -1; @@ -565,6 +615,7 @@ int v3_invalidate_nested_addr_range(struct guest_info * info, int v3_init_nested_paging(struct v3_vm_info *vm) { INIT_LIST_HEAD(&(vm->nested_impl.event_callback_list)); + v3_rw_lock_init(&(vm->nested_impl.event_callback_lock)); return 0; } @@ -574,6 +625,8 @@ int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo) return init_ept(core, (struct vmx_hw_info *) hwinfo); } else { // no initialization for SVM + // the direct map page tables are used since the + // nested pt format is identical to the main pt format return 0; } } @@ -581,7 +634,10 @@ int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo) int v3_deinit_nested_paging(struct v3_vm_info *vm) { struct nested_event_callback *cb,*temp; + addr_t flags; + flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(vm->nested_impl.event_callback_list), @@ -590,14 +646,26 @@ int v3_deinit_nested_paging(struct v3_vm_info *vm) V3_Free(cb); } + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + + v3_rw_lock_deinit(&(vm->nested_impl.event_callback_lock)); + return 0; } int v3_deinit_nested_paging_core(struct guest_info *core) { - // nothing to do.. probably dealloc? FIXME PAD - - return 0; + if (core->shdw_pg_mode == NESTED_PAGING) { + if (is_vmx_nested()) { + return deinit_ept(core); + } else { + // SVM nested deinit is handled by the passthrough paging teardown + return 0; + } + } else { + // not relevant + return 0; + } } @@ -608,6 +676,7 @@ int v3_register_nested_paging_event_callback(struct v3_vm_info *vm, void *priv_data) { struct nested_event_callback *ec = V3_Malloc(sizeof(struct nested_event_callback)); + addr_t flags; if (!ec) { PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n"); @@ -617,7 +686,9 @@ int v3_register_nested_paging_event_callback(struct v3_vm_info *vm, ec->callback = callback; ec->priv_data = priv_data; + flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock)); list_add(&(ec->node),&(vm->nested_impl.event_callback_list)); + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); return 0; @@ -632,6 +703,9 @@ int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm, void *priv_data) { struct nested_event_callback *cb,*temp; + addr_t flags; + + flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock)); list_for_each_entry_safe(cb, temp, @@ -640,10 +714,13 @@ int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm, if ((callback == cb->callback) && (priv_data == cb->priv_data)) { list_del(&(cb->node)); V3_Free(cb); + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); return 0; } } + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + PrintError(vm, VCORE_NONE, "No callback found!\n"); return -1;