#include <palacios/vmm_paging.h>
int v3_init_passthrough_pts(struct guest_info * guest_info);
+int v3_free_passthrough_pts(struct guest_info * core);
+
int v3_reset_passthrough_pts(struct guest_info * guest_info);
int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
int (*init)(struct v3_vm_info * vm, v3_cfg_tree_t * cfg);
int (*deinit)(struct v3_vm_info * vm);
int (*local_init)(struct guest_info * core);
+ int (*local_deinit)(struct guest_info * core);
int (*handle_pagefault)(struct guest_info * core, addr_t fault_addr, pf_error_t error_code);
int (*handle_invlpg)(struct guest_info * core, addr_t vaddr);
int (*activate_shdw_pt)(struct guest_info * core);
int v3_init_shdw_impl(struct v3_vm_info * vm);
-int v3_init_shdw_pg_state(struct guest_info * info);
+int v3_deinit_shdw_impl(struct v3_vm_info * vm);
+
+int v3_init_shdw_pg_state(struct guest_info * core);
+int v3_deinit_shdw_pg_state(struct guest_info * core);
/* Handler implementations */
}
static int vtlb_deinit(struct v3_vm_info * vm) {
- return -1;
+ return 0;
}
static int vtlb_local_init(struct guest_info * core) {
V3_Print("VTLB local initialization\n");
-
vtlb_state = (struct vtlb_local_state *)V3_Malloc(sizeof(struct vtlb_local_state));
INIT_LIST_HEAD(&(vtlb_state->page_list));
}
+static int vtlb_local_deinit(struct guest_info * core) {
+ struct v3_shdw_pg_state * state = &(core->shdw_pg_state);
+ struct vtlb_local_state * vtlb_state = state->local_impl_data;
+
+ struct shadow_page_data * shdw_pg = NULL;
+ struct shadow_page_data * tmp = NULL;
+
+ // free page list...
+ list_for_each_entry_safe(shdw_pg, tmp, &(vtlb_state->page_list), page_list_node) {
+ list_del(&(shdw_pg->page_list_node));
+ V3_FreePages((void *)shdw_pg->page_pa, 1);
+ V3_Free(shdw_pg);
+ }
+
+
+ V3_Free(vtlb_state);
+
+ return 0;
+}
+
+
static int vtlb_activate_shdw_pt(struct guest_info * core) {
switch (v3_get_vm_cpu_mode(core)) {
.init = vtlb_init,
.deinit = vtlb_deinit,
.local_init = vtlb_local_init,
+ .local_deinit = vtlb_local_deinit,
.handle_pagefault = vtlb_handle_pf,
.handle_invlpg = vtlb_handle_invlpg,
.activate_shdw_pt = vtlb_activate_shdw_pt,
#include <palacios/vmm_sprintf.h>
#include <palacios/vmm_muxer.h>
#include <palacios/vmm_xed.h>
+#include <palacios/vmm_direct_paging.h>
v3_cpu_mode_t v3_get_vm_cpu_mode(struct guest_info * info) {
v3_deinit_intr_controllers(core);
v3_deinit_time_core(core);
+ if (core->shdw_pg_mode == SHADOW_PAGING) {
+ v3_deinit_shdw_pg_state(core);
+ }
+
+ v3_free_passthrough_pts(core);
+
switch (cpu_type) {
#ifdef CONFIG_SVM
case V3_SVM_CPU:
return 0;
}
-int v3_reset_passthrough_pts(struct guest_info * info) {
- v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
+
+int v3_free_passthrough_pts(struct guest_info * core) {
+ v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
// Delete the old direct map page tables
switch(mode) {
case REAL:
case PROTECTED:
- delete_page_tables_32((pde32_t *)V3_VAddr((void *)(info->direct_map_pt)));
+ delete_page_tables_32((pde32_t *)V3_VAddr((void *)(core->direct_map_pt)));
break;
case PROTECTED_PAE:
case LONG:
case LONG_32_COMPAT:
// Long mode will only use 32PAE page tables...
- delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(info->direct_map_pt)));
+ delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt)));
break;
default:
PrintError("Unknown CPU Mode\n");
+ return -1;
break;
}
-
+
+ return 0;
+}
+
+
+int v3_reset_passthrough_pts(struct guest_info * core) {
+
+ v3_free_passthrough_pts(core);
+
// create new direct map page table
- v3_init_passthrough_pts(info);
+ v3_init_passthrough_pts(core);
return 0;
}
+
int v3_activate_passthrough_pt(struct guest_info * info) {
// For now... But we need to change this....
// As soon as shadow paging becomes active the passthrough tables are hosed
}
+int v3_deinit_shdw_pg_state(struct guest_info * core) {
+ struct v3_shdw_pg_impl * impl = core->vm_info->shdw_impl.current_impl;
+
+ if (impl->local_deinit(core) == -1) {
+ PrintError("Error deinitializing shadow paging state\n");
+ return -1;
+ }
+
+#ifdef CONFIG_SHADOW_PAGING_TELEMETRY
+ v3_remove_telemetry_cb(core->vm_info, telemetry_cb, NULL);
+#endif
+
+ return 0;
+}
+
+
int v3_init_shdw_impl(struct v3_vm_info * vm) {
struct v3_shdw_impl_state * impl_state = &(vm->shdw_impl);
return -1;
}
-
+ return 0;
+}
+
+int v3_deinit_shdw_impl(struct v3_vm_info * vm) {
+ struct v3_shdw_pg_impl * impl = vm->shdw_impl.current_impl;
+ if (impl->deinit(vm) == -1) {
+ PrintError("Error deinitializing shadow paging implementation\n");
+ return -1;
+ }
return 0;
}
/* Yield until that host time is reached */
host_time = v3_get_host_time(time_state);
+
while (host_time < target_host_time) {
v3_yield(info);
host_time = v3_get_host_time(time_state);
time_state->guest_host_offset = (sint64_t)guest_time - (sint64_t)host_time;
}
+
return 0;
}
* ra/c/dx here since they're modified by this instruction anyway. */
info->vm_regs.rcx = TSC_AUX_MSR;
ret = v3_handle_msr_read(info);
- if (ret) return ret;
+
+ if (ret != 0) {
+ return ret;
+ }
+
info->vm_regs.rcx = info->vm_regs.rax;
/* Now do the TSC half of the instruction */
ret = v3_rdtsc(info);
- if (ret) return ret;
-
+
+ if (ret != 0) {
+ return ret;
+ }
+
return 0;
}
struct vm_time * time_state = &(info->time_state);
V3_ASSERT(msr_num == TSC_AUX_MSR);
+
msr_val->lo = time_state->tsc_aux.lo;
msr_val->hi = time_state->tsc_aux.hi;
struct vm_time * time_state = &(info->time_state);
V3_ASSERT(msr_num == TSC_AUX_MSR);
+
time_state->tsc_aux.lo = msr_val.lo;
time_state->tsc_aux.hi = msr_val.hi;
uint64_t time = v3_get_guest_tsc(&info->time_state);
V3_ASSERT(msr_num == TSC_MSR);
+
msr_val->hi = time >> 32;
msr_val->lo = time & 0xffffffffLL;
struct v3_msr msr_val, void *priv) {
struct vm_time * time_state = &(info->time_state);
uint64_t guest_time, new_tsc;
+
V3_ASSERT(msr_num == TSC_MSR);
+
new_tsc = (((uint64_t)msr_val.hi) << 32) | (uint64_t)msr_val.lo;
guest_time = v3_get_guest_time(time_state);
time_state->tsc_guest_offset = (sint64_t)new_tsc - (sint64_t)guest_time;