X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_direct_paging.c;h=1175b2560c05c176e755212cd3ff49630a7d82a9;hb=b58fe2254858e3ecc94be5d86f2a93f2cfe0a0d5;hp=32aba7c84a9de845d060d6f89927fe752b0accd5;hpb=a4fd5bcc79e7cdf9a3bd879294566bff0666ced7;p=palacios.git diff --git a/palacios/src/palacios/vmm_direct_paging.c b/palacios/src/palacios/vmm_direct_paging.c index 32aba7c..1175b25 100644 --- a/palacios/src/palacios/vmm_direct_paging.c +++ b/palacios/src/palacios/vmm_direct_paging.c @@ -99,19 +99,27 @@ struct passthrough_event_callback { static int have_passthrough_callbacks(struct guest_info *core) { + // lock acquistion unnecessary + // caller will acquire the lock before *iterating* through the list + // so any race will be resolved then return !list_empty(&(core->vm_info->passthrough_impl.event_callback_list)); } static void dispatch_passthrough_event(struct guest_info *core, struct v3_passthrough_pg_event *event) { struct passthrough_event_callback *cb,*temp; - + + v3_read_lock(&(core->vm_info->passthrough_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(core->vm_info->passthrough_impl.event_callback_list), node) { cb->callback(core,event,cb->priv_data); } + + v3_read_unlock(&(core->vm_info->passthrough_impl.event_callback_lock)); + } struct nested_event_callback { @@ -124,6 +132,9 @@ struct nested_event_callback { static int have_nested_callbacks(struct guest_info *core) { + // lock acquistion unnecessary + // caller will acquire the lock before *iterating* through the list + // so any race will be resolved then return !list_empty(&(core->vm_info->nested_impl.event_callback_list)); } @@ -131,12 +142,16 @@ static void dispatch_nested_event(struct guest_info *core, struct v3_nested_pg_e { struct nested_event_callback *cb,*temp; + v3_read_lock(&(core->vm_info->nested_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(core->vm_info->nested_impl.event_callback_list), node) { cb->callback(core,event,cb->priv_data); } + + v3_read_unlock(&(core->vm_info->nested_impl.event_callback_lock)); } @@ -146,7 +161,7 @@ static addr_t create_generic_pt_page(struct guest_info *core) { void * page = 0; void *temp; - temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, 0); // no constraints + temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, 0, 0); // no constraints if (!temp) { PrintError(VM_NONE, VCORE_NONE,"Cannot allocate page\n"); @@ -167,6 +182,10 @@ static addr_t create_generic_pt_page(struct guest_info *core) { int v3_init_passthrough_pts(struct guest_info * info) { + if (info->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) { + // skip - ept_init will do this allocation + return 0; + } info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page(info)); return 0; } @@ -175,6 +194,18 @@ int v3_init_passthrough_pts(struct guest_info * info) { int v3_free_passthrough_pts(struct guest_info * core) { v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core); + if (core->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) { + // there are no passthrough page tables, but + // the EPT implementation is using direct_map_pt to store + // the EPT root table pointer... and the EPT tables + // are not compatible with regular x86 tables, so we + // must not attempt to free them here... + return 0; + } + + // we are either in shadow or in SVM nested + // in either case, we can nuke the PTs + // Delete the old direct map page tables switch(mode) { case REAL: @@ -185,7 +216,9 @@ int v3_free_passthrough_pts(struct guest_info * core) { case LONG: case LONG_32_COMPAT: // Long mode will only use 32PAE page tables... - delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt))); + if (core->direct_map_pt) { + delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt))); + } break; default: PrintError(core->vm_info, core, "Unknown CPU Mode\n"); @@ -264,7 +297,7 @@ int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, case LONG_32_COMPAT: // Long mode will only use 32PAE page tables... rc=handle_passthrough_pagefault_32pae(info, fault_addr, error_code, actual_start, actual_end); - + break; default: PrintError(info->vm_info, info, "Unknown CPU Mode\n"); break; @@ -308,8 +341,8 @@ int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr, case LONG: case LONG_32_COMPAT: // Long mode will only use 32PAE page tables... - rc=invalidate_addr_32pae(info, inv_addr, actual_start, actual_end); - + rc=invalidate_addr_32pae(info, inv_addr, actual_start, actual_end); + break; default: PrintError(info->vm_info, info, "Unknown CPU Mode\n"); break; @@ -352,7 +385,7 @@ int v3_invalidate_passthrough_addr_range(struct guest_info * info, case LONG_32_COMPAT: // Long mode will only use 32PAE page tables... rc=invalidate_addr_32pae_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end); - + break; default: PrintError(info->vm_info, info, "Unknown CPU Mode\n"); break; @@ -370,12 +403,16 @@ int v3_invalidate_passthrough_addr_range(struct guest_info * info, int v3_init_passthrough_paging(struct v3_vm_info *vm) { INIT_LIST_HEAD(&(vm->passthrough_impl.event_callback_list)); + v3_rw_lock_init(&(vm->passthrough_impl.event_callback_lock)); return 0; } int v3_deinit_passthrough_paging(struct v3_vm_info *vm) { struct passthrough_event_callback *cb,*temp; + addr_t flags; + + flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock)); list_for_each_entry_safe(cb, temp, @@ -384,6 +421,10 @@ int v3_deinit_passthrough_paging(struct v3_vm_info *vm) list_del(&(cb->node)); V3_Free(cb); } + + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + + v3_rw_lock_deinit(&(vm->passthrough_impl.event_callback_lock)); return 0; } @@ -401,6 +442,64 @@ int v3_deinit_passthrough_paging_core(struct guest_info *core) } +int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_passthrough_pg_event *, + void *priv_data), + void *priv_data) +{ + struct passthrough_event_callback *ec = V3_Malloc(sizeof(struct passthrough_event_callback)); + addr_t flags; + + if (!ec) { + PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n"); + return -1; + } + + ec->callback = callback; + ec->priv_data = priv_data; + + flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock)); + list_add(&(ec->node),&(vm->passthrough_impl.event_callback_list)); + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + + return 0; + +} + + + +int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_passthrough_pg_event *, + void *priv_data), + void *priv_data) +{ + struct passthrough_event_callback *cb,*temp; + addr_t flags; + + flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock)); + + list_for_each_entry_safe(cb, + temp, + &(vm->passthrough_impl.event_callback_list), + node) { + if ((callback == cb->callback) && (priv_data == cb->priv_data)) { + list_del(&(cb->node)); + V3_Free(cb); + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + return 0; + } + } + + v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags); + + PrintError(vm, VCORE_NONE, "No callback found!\n"); + + return -1; +} + + // inline nested paging support for Intel and AMD #include "svm_npt.h" #include "vmx_npt.h" @@ -516,6 +615,7 @@ int v3_invalidate_nested_addr_range(struct guest_info * info, int v3_init_nested_paging(struct v3_vm_info *vm) { INIT_LIST_HEAD(&(vm->nested_impl.event_callback_list)); + v3_rw_lock_init(&(vm->nested_impl.event_callback_lock)); return 0; } @@ -525,6 +625,8 @@ int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo) return init_ept(core, (struct vmx_hw_info *) hwinfo); } else { // no initialization for SVM + // the direct map page tables are used since the + // nested pt format is identical to the main pt format return 0; } } @@ -532,7 +634,10 @@ int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo) int v3_deinit_nested_paging(struct v3_vm_info *vm) { struct nested_event_callback *cb,*temp; + addr_t flags; + flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock)); + list_for_each_entry_safe(cb, temp, &(vm->nested_impl.event_callback_list), @@ -541,12 +646,82 @@ int v3_deinit_nested_paging(struct v3_vm_info *vm) V3_Free(cb); } + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + + v3_rw_lock_deinit(&(vm->nested_impl.event_callback_lock)); + return 0; } int v3_deinit_nested_paging_core(struct guest_info *core) { - // nothing to do.. probably dealloc? FIXME PAD + if (core->shdw_pg_mode == NESTED_PAGING) { + if (is_vmx_nested()) { + return deinit_ept(core); + } else { + // SVM nested deinit is handled by the passthrough paging teardown + return 0; + } + } else { + // not relevant + return 0; + } +} - return 0; + +int v3_register_nested_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_nested_pg_event *, + void *priv_data), + void *priv_data) +{ + struct nested_event_callback *ec = V3_Malloc(sizeof(struct nested_event_callback)); + addr_t flags; + + if (!ec) { + PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n"); + return -1; + } + + ec->callback = callback; + ec->priv_data = priv_data; + + flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock)); + list_add(&(ec->node),&(vm->nested_impl.event_callback_list)); + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + + return 0; + +} + + + +int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_nested_pg_event *, + void *priv_data), + void *priv_data) +{ + struct nested_event_callback *cb,*temp; + addr_t flags; + + flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock)); + + list_for_each_entry_safe(cb, + temp, + &(vm->nested_impl.event_callback_list), + node) { + if ((callback == cb->callback) && (priv_data == cb->priv_data)) { + list_del(&(cb->node)); + V3_Free(cb); + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + return 0; + } + } + + v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags); + + PrintError(vm, VCORE_NONE, "No callback found!\n"); + + return -1; }