static int have_callbacks(struct guest_info *core)
{
+ // lock acquistion unnecessary
+ // caller will acquire the lock before *iterating* through the list
+ // so any race will be resolved then
return !list_empty(&(core->vm_info->shdw_impl.event_callback_list));
}
static void dispatch_event(struct guest_info *core, struct v3_shdw_pg_event *event)
{
struct event_callback *cb,*temp;
-
+
+ v3_read_lock(&(core->vm_info->shdw_impl.event_callback_lock));
+
list_for_each_entry_safe(cb,
temp,
&(core->vm_info->shdw_impl.event_callback_list),
node) {
cb->callback(core,event,cb->priv_data);
}
+
+ v3_read_unlock(&(core->vm_info->shdw_impl.event_callback_lock));
}
while (tmp_impl != __stop__v3_shdw_pg_impls) {
- V3_Print("Registering Shadow Paging Impl (%s)\n", (*tmp_impl)->name);
+ V3_Print(VM_NONE, VCORE_NONE, "Registering Shadow Paging Impl (%s)\n", (*tmp_impl)->name);
if (v3_htable_search(master_shdw_pg_table, (addr_t)((*tmp_impl)->name))) {
- PrintError("Multiple instances of shadow paging impl (%s)\n", (*tmp_impl)->name);
+ PrintError(VM_NONE, VCORE_NONE, "Multiple instances of shadow paging impl (%s)\n", (*tmp_impl)->name);
return -1;
}
if (v3_htable_insert(master_shdw_pg_table,
(addr_t)((*tmp_impl)->name),
(addr_t)(*tmp_impl)) == 0) {
- PrintError("Could not register shadow paging impl (%s)\n", (*tmp_impl)->name);
+ PrintError(VM_NONE, VCORE_NONE, "Could not register shadow paging impl (%s)\n", (*tmp_impl)->name);
return -1;
}
for (i = 0; i < vm->num_cores; i++) {
struct guest_info * core = &(vm->cores[i]);
- V3_Print("%s Guest Page faults: %d\n", hdr, core->shdw_pg_state.guest_faults);
+ V3_Print(vm, core, "%s Guest Page faults: %d\n", hdr, core->shdw_pg_state.guest_faults);
}
}
#endif
state->guest_efer.value = 0x0LL;
if (impl->local_init(core) == -1) {
- PrintError("Error in Shadow paging local initialization (%s)\n", impl->name);
+ PrintError(core->vm_info, core, "Error in Shadow paging local initialization (%s)\n", impl->name);
return -1;
}
int v3_deinit_shdw_pg_state(struct guest_info * core) {
- struct v3_shdw_pg_impl * impl = core->vm_info->shdw_impl.current_impl;
+ struct v3_shdw_pg_impl * impl = NULL;
+
+ if (!core || !core->vm_info) {
+ return -1;
+ }
+
+ impl = core->vm_info->shdw_impl.current_impl;
- if (impl->local_deinit(core) == -1) {
- PrintError("Error deinitializing shadow paging state\n");
+ if (impl && impl->local_deinit(core) == -1) {
+ PrintError(core->vm_info, core, "Error deinitializing shadow paging state\n");
return -1;
}
char * pg_strat = v3_cfg_val(pg_cfg, "strategy");
struct v3_shdw_pg_impl * impl = NULL;
- PrintDebug("Checking if shadow paging requested.\n");
- if ((pg_mode != NULL) && (strcasecmp(pg_mode, "nested") == 0)) {
- PrintDebug("Nested paging specified - not initializing shadow paging.\n");
- return 0;
+ PrintDebug(vm, VCORE_NONE, "Checking if shadow paging requested.\n");
+ if (pg_mode == NULL) {
+ V3_Print(vm, VCORE_NONE, "No paging mode specified, assuming shadow with defaults\n");
+ pg_mode = "shadow";
+ } else {
+ if (strcasecmp(pg_mode, "nested") == 0) {
+ // this check is repeated here (compare to vmm_config's determine paging mode) since
+ // shadow paging initialization *precedes* per-core pre-config.
+ extern v3_cpu_arch_t v3_mach_type;
+ if ((v3_mach_type == V3_SVM_REV3_CPU) ||
+ (v3_mach_type == V3_VMX_EPT_CPU) ||
+ (v3_mach_type == V3_VMX_EPT_UG_CPU)) {
+ PrintDebug(vm, VCORE_NONE, "Nested paging specified on machine that supports it - not initializing shadow paging\n");
+ return 0;
+ } else {
+ V3_Print(vm, VCORE_NONE, "Nested paging specified but machine does not support it - falling back to shadow paging with defaults\n");
+ pg_mode = "shadow";
+ }
+ } else if (strcasecmp(pg_mode, "shadow") != 0) {
+ V3_Print(vm, VCORE_NONE, "Unknown paging mode '%s' specified - falling back to shadow paging with defaults\n",pg_mode);
+ pg_mode = "shadow";
+ }
}
if (pg_strat == NULL) {
pg_strat = (char *)default_strategy;
}
- V3_Print("Initialization of Shadow Paging implementation\n");
+ V3_Print(vm, VCORE_NONE,"Initialization of Shadow Paging implementation\n");
impl = (struct v3_shdw_pg_impl *)v3_htable_search(master_shdw_pg_table, (addr_t)pg_strat);
if (impl == NULL) {
- PrintError("Could not find shadow paging impl (%s)\n", pg_strat);
+ PrintError(vm, VCORE_NONE, "Could not find shadow paging impl (%s)\n", pg_strat);
return -1;
}
INIT_LIST_HEAD(&(impl_state->event_callback_list));
+ v3_rw_lock_init(&(impl_state->event_callback_lock));
impl_state->current_impl = impl;
if (impl->init(vm, pg_cfg) == -1) {
- PrintError("Could not initialize Shadow paging implemenation (%s)\n", impl->name);
+ PrintError(vm, VCORE_NONE, "Could not initialize Shadow paging implemenation (%s)\n", impl->name);
return -1;
}
int v3_deinit_shdw_impl(struct v3_vm_info * vm) {
struct v3_shdw_pg_impl * impl = vm->shdw_impl.current_impl;
struct event_callback *cb,*temp;
+ addr_t flags;
if (impl == NULL) {
// Shadow paging not implemented
}
if (impl->deinit(vm) == -1) {
- PrintError("Error deinitializing shadow paging implementation\n");
+ PrintError(vm, VCORE_NONE,"Error deinitializing shadow paging implementation\n");
return -1;
}
+ flags=v3_write_lock_irqsave(&(vm->shdw_impl.event_callback_lock));
+
list_for_each_entry_safe(cb,
temp,
&(vm->shdw_impl.event_callback_list),
V3_Free(cb);
}
+ v3_write_unlock_irqrestore(&(vm->shdw_impl.event_callback_lock),flags);
+
+ v3_rw_lock_deinit(&(vm->shdw_impl.event_callback_lock));
+
return 0;
}
if (v3_get_vm_mem_mode(core) == PHYSICAL_MEM) {
// If paging is not turned on we need to handle the special cases
- rc = v3_handle_passthrough_pagefault(core, fault_addr, error_code);
+ rc = v3_handle_passthrough_pagefault(core, fault_addr, error_code,NULL,NULL);
} else if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) {
struct v3_shdw_impl_state * state = &(core->vm_info->shdw_impl);
struct v3_shdw_pg_impl * impl = state->current_impl;
rc = impl->handle_pagefault(core, fault_addr, error_code);
} else {
- PrintError("Invalid Memory mode\n");
+ PrintError(core->vm_info, core, "Invalid Memory mode\n");
rc = -1;
}
if (v3_get_vm_mem_mode(core) != VIRTUAL_MEM) {
// Paging must be turned on...
// should handle with some sort of fault I think
- PrintError("ERROR: INVLPG called in non paged mode\n");
+ PrintError(core->vm_info, core, "ERROR: INVLPG called in non paged mode\n");
return -1;
}
}
if (ret == -1) {
- PrintError("Could not read instruction into buffer\n");
+ PrintError(core->vm_info, core, "Could not read instruction into buffer\n");
return -1;
}
if (v3_decode(core, (addr_t)instr, &dec_instr) == -1) {
- PrintError("Decoding Error\n");
+ PrintError(core->vm_info, core, "Decoding Error\n");
return -1;
}
if ((dec_instr.op_type != V3_OP_INVLPG) ||
(dec_instr.num_operands != 1) ||
(dec_instr.dst_operand.type != MEM_OPERAND)) {
- PrintError("Decoder Error: Not a valid INVLPG instruction...\n");
+ PrintError(core->vm_info, core, "Decoder Error: Not a valid INVLPG instruction...\n");
return -1;
}
void *priv_data)
{
struct event_callback *ec = V3_Malloc(sizeof(struct event_callback));
+ addr_t flags;
if (!ec) {
- PrintError("Unable to allocate for a shadow paging event callback\n");
+ PrintError(vm, VCORE_NONE, "Unable to allocate for a shadow paging event callback\n");
return -1;
}
ec->callback = callback;
ec->priv_data = priv_data;
+ flags=v3_write_lock_irqsave(&(vm->shdw_impl.event_callback_lock));
list_add(&(ec->node),&(vm->shdw_impl.event_callback_list));
+ v3_write_unlock_irqrestore(&(vm->shdw_impl.event_callback_lock),flags);
return 0;
void *priv_data)
{
struct event_callback *cb,*temp;
+ addr_t flags;
+
+ flags=v3_write_lock_irqsave(&(vm->shdw_impl.event_callback_lock));
list_for_each_entry_safe(cb,
temp,
node) {
if ((callback == cb->callback) && (priv_data == cb->priv_data)) {
list_del(&(cb->node));
+ v3_write_unlock_irqrestore(&(vm->shdw_impl.event_callback_lock),flags);
V3_Free(cb);
return 0;
}
}
- PrintError("No callback found!\n");
+ v3_write_unlock_irqrestore(&(vm->shdw_impl.event_callback_lock),flags);
+
+ PrintError(vm, VCORE_NONE, "No callback found!\n");
return -1;
}