static int have_passthrough_callbacks(struct guest_info *core)
{
+ // lock acquistion unnecessary
+ // caller will acquire the lock before *iterating* through the list
+ // so any race will be resolved then
return !list_empty(&(core->vm_info->passthrough_impl.event_callback_list));
}
static void dispatch_passthrough_event(struct guest_info *core, struct v3_passthrough_pg_event *event)
{
struct passthrough_event_callback *cb,*temp;
-
+
+ v3_read_lock(&(core->vm_info->passthrough_impl.event_callback_lock));
+
list_for_each_entry_safe(cb,
temp,
&(core->vm_info->passthrough_impl.event_callback_list),
node) {
cb->callback(core,event,cb->priv_data);
}
+
+ v3_read_unlock(&(core->vm_info->passthrough_impl.event_callback_lock));
+
}
struct nested_event_callback {
static int have_nested_callbacks(struct guest_info *core)
{
+ // lock acquistion unnecessary
+ // caller will acquire the lock before *iterating* through the list
+ // so any race will be resolved then
return !list_empty(&(core->vm_info->nested_impl.event_callback_list));
}
{
struct nested_event_callback *cb,*temp;
+ v3_read_lock(&(core->vm_info->nested_impl.event_callback_lock));
+
list_for_each_entry_safe(cb,
temp,
&(core->vm_info->nested_impl.event_callback_list),
node) {
cb->callback(core,event,cb->priv_data);
}
+
+ v3_read_unlock(&(core->vm_info->nested_impl.event_callback_lock));
}
void * page = 0;
void *temp;
- temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, 0); // no constraints
+ temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, 0, 0); // no constraints
if (!temp) {
PrintError(VM_NONE, VCORE_NONE,"Cannot allocate page\n");
int v3_init_passthrough_pts(struct guest_info * info) {
+ if (info->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) {
+ // skip - ept_init will do this allocation
+ return 0;
+ }
info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page(info));
return 0;
}
int v3_free_passthrough_pts(struct guest_info * core) {
v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
+ if (core->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) {
+ // there are no passthrough page tables, but
+ // the EPT implementation is using direct_map_pt to store
+ // the EPT root table pointer... and the EPT tables
+ // are not compatible with regular x86 tables, so we
+ // must not attempt to free them here...
+ return 0;
+ }
+
+ // we are either in shadow or in SVM nested
+ // in either case, we can nuke the PTs
+
// Delete the old direct map page tables
switch(mode) {
case REAL:
case LONG:
case LONG_32_COMPAT:
// Long mode will only use 32PAE page tables...
- delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt)));
+ if (core->direct_map_pt) {
+ delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt)));
+ }
break;
default:
PrintError(core->vm_info, core, "Unknown CPU Mode\n");
int v3_init_passthrough_paging(struct v3_vm_info *vm)
{
INIT_LIST_HEAD(&(vm->passthrough_impl.event_callback_list));
+ v3_rw_lock_init(&(vm->passthrough_impl.event_callback_lock));
return 0;
}
int v3_deinit_passthrough_paging(struct v3_vm_info *vm)
{
struct passthrough_event_callback *cb,*temp;
+ addr_t flags;
+
+ flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
list_for_each_entry_safe(cb,
temp,
list_del(&(cb->node));
V3_Free(cb);
}
+
+ v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
+
+ v3_rw_lock_deinit(&(vm->passthrough_impl.event_callback_lock));
return 0;
}
}
+int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm,
+ int (*callback)(struct guest_info *core,
+ struct v3_passthrough_pg_event *,
+ void *priv_data),
+ void *priv_data)
+{
+ struct passthrough_event_callback *ec = V3_Malloc(sizeof(struct passthrough_event_callback));
+ addr_t flags;
+
+ if (!ec) {
+ PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
+ return -1;
+ }
+
+ ec->callback = callback;
+ ec->priv_data = priv_data;
+
+ flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
+ list_add(&(ec->node),&(vm->passthrough_impl.event_callback_list));
+ v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
+
+ return 0;
+
+}
+
+
+
+int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm,
+ int (*callback)(struct guest_info *core,
+ struct v3_passthrough_pg_event *,
+ void *priv_data),
+ void *priv_data)
+{
+ struct passthrough_event_callback *cb,*temp;
+ addr_t flags;
+
+ flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
+
+ list_for_each_entry_safe(cb,
+ temp,
+ &(vm->passthrough_impl.event_callback_list),
+ node) {
+ if ((callback == cb->callback) && (priv_data == cb->priv_data)) {
+ list_del(&(cb->node));
+ V3_Free(cb);
+ v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
+ return 0;
+ }
+ }
+
+ v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
+
+ PrintError(vm, VCORE_NONE, "No callback found!\n");
+
+ return -1;
+}
+
+
// inline nested paging support for Intel and AMD
#include "svm_npt.h"
#include "vmx_npt.h"
int v3_init_nested_paging(struct v3_vm_info *vm)
{
INIT_LIST_HEAD(&(vm->nested_impl.event_callback_list));
+ v3_rw_lock_init(&(vm->nested_impl.event_callback_lock));
return 0;
}
return init_ept(core, (struct vmx_hw_info *) hwinfo);
} else {
// no initialization for SVM
+ // the direct map page tables are used since the
+ // nested pt format is identical to the main pt format
return 0;
}
}
int v3_deinit_nested_paging(struct v3_vm_info *vm)
{
struct nested_event_callback *cb,*temp;
+ addr_t flags;
+ flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
+
list_for_each_entry_safe(cb,
temp,
&(vm->nested_impl.event_callback_list),
V3_Free(cb);
}
+ v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
+
+ v3_rw_lock_deinit(&(vm->nested_impl.event_callback_lock));
+
return 0;
}
int v3_deinit_nested_paging_core(struct guest_info *core)
{
- // nothing to do.. probably dealloc? FIXME PAD
+ if (core->shdw_pg_mode == NESTED_PAGING) {
+ if (is_vmx_nested()) {
+ return deinit_ept(core);
+ } else {
+ // SVM nested deinit is handled by the passthrough paging teardown
+ return 0;
+ }
+ } else {
+ // not relevant
+ return 0;
+ }
+}
+
+
+int v3_register_nested_paging_event_callback(struct v3_vm_info *vm,
+ int (*callback)(struct guest_info *core,
+ struct v3_nested_pg_event *,
+ void *priv_data),
+ void *priv_data)
+{
+ struct nested_event_callback *ec = V3_Malloc(sizeof(struct nested_event_callback));
+ addr_t flags;
- return 0;
+ if (!ec) {
+ PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
+ return -1;
+ }
+
+ ec->callback = callback;
+ ec->priv_data = priv_data;
+
+ flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
+ list_add(&(ec->node),&(vm->nested_impl.event_callback_list));
+ v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
+
+ return 0;
+
+}
+
+
+
+int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm,
+ int (*callback)(struct guest_info *core,
+ struct v3_nested_pg_event *,
+ void *priv_data),
+ void *priv_data)
+{
+ struct nested_event_callback *cb,*temp;
+ addr_t flags;
+
+ flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
+
+ list_for_each_entry_safe(cb,
+ temp,
+ &(vm->nested_impl.event_callback_list),
+ node) {
+ if ((callback == cb->callback) && (priv_data == cb->priv_data)) {
+ list_del(&(cb->node));
+ V3_Free(cb);
+ v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
+ return 0;
+ }
+ }
+
+ v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
+
+ PrintError(vm, VCORE_NONE, "No callback found!\n");
+
+ return -1;
}