static int have_passthrough_callbacks(struct guest_info *core)
{
+ // lock acquistion unnecessary
+ // caller will acquire the lock before *iterating* through the list
+ // so any race will be resolved then
return !list_empty(&(core->vm_info->passthrough_impl.event_callback_list));
}
static void dispatch_passthrough_event(struct guest_info *core, struct v3_passthrough_pg_event *event)
{
struct passthrough_event_callback *cb,*temp;
-
+
+ v3_read_lock(&(core->vm_info->passthrough_impl.event_callback_lock));
+
list_for_each_entry_safe(cb,
temp,
&(core->vm_info->passthrough_impl.event_callback_list),
node) {
cb->callback(core,event,cb->priv_data);
}
+
+ v3_read_unlock(&(core->vm_info->passthrough_impl.event_callback_lock));
+
}
struct nested_event_callback {
static int have_nested_callbacks(struct guest_info *core)
{
+ // lock acquistion unnecessary
+ // caller will acquire the lock before *iterating* through the list
+ // so any race will be resolved then
return !list_empty(&(core->vm_info->nested_impl.event_callback_list));
}
{
struct nested_event_callback *cb,*temp;
+ v3_read_lock(&(core->vm_info->nested_impl.event_callback_lock));
+
list_for_each_entry_safe(cb,
temp,
&(core->vm_info->nested_impl.event_callback_list),
node) {
cb->callback(core,event,cb->priv_data);
}
+
+ v3_read_unlock(&(core->vm_info->nested_impl.event_callback_lock));
}
int v3_init_passthrough_paging(struct v3_vm_info *vm)
{
INIT_LIST_HEAD(&(vm->passthrough_impl.event_callback_list));
+ v3_rw_lock_init(&(vm->passthrough_impl.event_callback_lock));
return 0;
}
int v3_deinit_passthrough_paging(struct v3_vm_info *vm)
{
struct passthrough_event_callback *cb,*temp;
+ addr_t flags;
+
+ flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
list_for_each_entry_safe(cb,
temp,
list_del(&(cb->node));
V3_Free(cb);
}
+
+ v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
+
+ v3_rw_lock_deinit(&(vm->passthrough_impl.event_callback_lock));
return 0;
}
void *priv_data)
{
struct passthrough_event_callback *ec = V3_Malloc(sizeof(struct passthrough_event_callback));
+ addr_t flags;
if (!ec) {
PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
ec->callback = callback;
ec->priv_data = priv_data;
+ flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
list_add(&(ec->node),&(vm->passthrough_impl.event_callback_list));
+ v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
return 0;
void *priv_data)
{
struct passthrough_event_callback *cb,*temp;
-
+ addr_t flags;
+
+ flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
+
list_for_each_entry_safe(cb,
temp,
&(vm->passthrough_impl.event_callback_list),
if ((callback == cb->callback) && (priv_data == cb->priv_data)) {
list_del(&(cb->node));
V3_Free(cb);
+ v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
return 0;
}
}
+ v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
+
PrintError(vm, VCORE_NONE, "No callback found!\n");
return -1;
int v3_init_nested_paging(struct v3_vm_info *vm)
{
INIT_LIST_HEAD(&(vm->nested_impl.event_callback_list));
+ v3_rw_lock_init(&(vm->nested_impl.event_callback_lock));
return 0;
}
int v3_deinit_nested_paging(struct v3_vm_info *vm)
{
struct nested_event_callback *cb,*temp;
+ addr_t flags;
+ flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
+
list_for_each_entry_safe(cb,
temp,
&(vm->nested_impl.event_callback_list),
V3_Free(cb);
}
+ v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
+
+ v3_rw_lock_deinit(&(vm->nested_impl.event_callback_lock));
+
return 0;
}
void *priv_data)
{
struct nested_event_callback *ec = V3_Malloc(sizeof(struct nested_event_callback));
+ addr_t flags;
if (!ec) {
PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
ec->callback = callback;
ec->priv_data = priv_data;
+ flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
list_add(&(ec->node),&(vm->nested_impl.event_callback_list));
+ v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
return 0;
void *priv_data)
{
struct nested_event_callback *cb,*temp;
+ addr_t flags;
+
+ flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
list_for_each_entry_safe(cb,
temp,
if ((callback == cb->callback) && (priv_data == cb->priv_data)) {
list_del(&(cb->node));
V3_Free(cb);
+ v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
return 0;
}
}
+ v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
+
PrintError(vm, VCORE_NONE, "No callback found!\n");
return -1;
static int have_callbacks(struct guest_info *core)
{
+ // lock acquistion unnecessary
+ // caller will acquire the lock before *iterating* through the list
+ // so any race will be resolved then
return !list_empty(&(core->vm_info->shdw_impl.event_callback_list));
}
static void dispatch_event(struct guest_info *core, struct v3_shdw_pg_event *event)
{
struct event_callback *cb,*temp;
-
+
+ v3_read_lock(&(core->vm_info->shdw_impl.event_callback_lock));
+
list_for_each_entry_safe(cb,
temp,
&(core->vm_info->shdw_impl.event_callback_list),
node) {
cb->callback(core,event,cb->priv_data);
}
+
+ v3_read_unlock(&(core->vm_info->shdw_impl.event_callback_lock));
}
}
INIT_LIST_HEAD(&(impl_state->event_callback_list));
+ v3_rw_lock_init(&(impl_state->event_callback_lock));
impl_state->current_impl = impl;
int v3_deinit_shdw_impl(struct v3_vm_info * vm) {
struct v3_shdw_pg_impl * impl = vm->shdw_impl.current_impl;
struct event_callback *cb,*temp;
+ addr_t flags;
if (impl == NULL) {
// Shadow paging not implemented
return -1;
}
+ flags=v3_write_lock_irqsave(&(vm->shdw_impl.event_callback_lock));
+
list_for_each_entry_safe(cb,
temp,
&(vm->shdw_impl.event_callback_list),
V3_Free(cb);
}
+ v3_write_unlock_irqrestore(&(vm->shdw_impl.event_callback_lock),flags);
+
+ v3_rw_lock_deinit(&(vm->shdw_impl.event_callback_lock));
+
return 0;
}
void *priv_data)
{
struct event_callback *ec = V3_Malloc(sizeof(struct event_callback));
+ addr_t flags;
if (!ec) {
PrintError(vm, VCORE_NONE, "Unable to allocate for a shadow paging event callback\n");
ec->callback = callback;
ec->priv_data = priv_data;
+ flags=v3_write_lock_irqsave(&(vm->shdw_impl.event_callback_lock));
list_add(&(ec->node),&(vm->shdw_impl.event_callback_list));
+ v3_write_unlock_irqrestore(&(vm->shdw_impl.event_callback_lock),flags);
return 0;
void *priv_data)
{
struct event_callback *cb,*temp;
+ addr_t flags;
+
+ flags=v3_write_lock_irqsave(&(vm->shdw_impl.event_callback_lock));
list_for_each_entry_safe(cb,
temp,
node) {
if ((callback == cb->callback) && (priv_data == cb->priv_data)) {
list_del(&(cb->node));
+ v3_write_unlock_irqrestore(&(vm->shdw_impl.event_callback_lock),flags);
V3_Free(cb);
return 0;
}
}
+ v3_write_unlock_irqrestore(&(vm->shdw_impl.event_callback_lock),flags);
+
PrintError(vm, VCORE_NONE, "No callback found!\n");
return -1;