struct v3_bitmap modified_pages;
};
-static int paging_callback(struct guest_info *core,
- struct v3_shdw_pg_event *event,
- void *priv_data)
+static int shadow_paging_callback(struct guest_info *core,
+ struct v3_shdw_pg_event *event,
+ void *priv_data)
{
struct mem_migration_state *m = (struct mem_migration_state *)priv_data;
if (event->event_type==SHADOW_PAGEFAULT &&
event->event_order==SHADOW_PREIMPL &&
- event->error_code.write) {
+ event->error_code.write) { // Note, assumes VTLB behavior where we will see the write even if preceded by a read
addr_t gpa;
if (!v3_gva_to_gpa(core,event->gva,&gpa)) {
// write to this page
return 0;
}
-
+
+
+/*
+static int nested_paging_callback(struct guest_info *core,
+ struct v3_nested_pg_event *event,
+ void *priv_data)
+{
+ struct mem_migration_state *m = (struct mem_migration_state *)priv_data;
+
+ if (event->event_type==NESTED_PAGEFAULT &&
+ event->event_order==NESTED_PREIMPL &&
+ event->error_code.write) { // Assumes we will see a write after reads
+ if (event->gpa<core->vm_info->mem_size) {
+ v3_bitmap_set(&(m->modified_pages),(event->gpa)>>12);
+ } else {
+ // no worries, this isn't physical memory
+ }
+ } else {
+ // we don't care about other events
+ }
+
+ return 0;
+}
+*/
static struct mem_migration_state *start_page_tracking(struct v3_vm_info *vm)
V3_Free(m);
}
- v3_register_shadow_paging_event_callback(vm,paging_callback,m);
+ // We assume that the migrator has already verified that all cores are
+ // using the identical model (shadow or nested)
+ // This must not change over the execution of the migration
- for (i=0;i<vm->num_cores;i++) {
+ if (vm->cores[0].shdw_pg_mode==SHADOW_PAGING) {
+ v3_register_shadow_paging_event_callback(vm,shadow_paging_callback,m);
+
+ for (i=0;i<vm->num_cores;i++) {
v3_invalidate_shadow_pts(&(vm->cores[i]));
+ }
+ } else if (vm->cores[0].shdw_pg_mode==NESTED_PAGING) {
+ //v3_register_nested_paging_event_callback(vm,nested_paging_callback,m);
+
+ for (i=0;i<vm->num_cores;i++) {
+ //v3_invalidate_nested_addr_range(&(vm->cores[i]),0,vm->mem_size-1);
+ }
+ } else {
+ PrintError(vm, VCORE_NONE, "Unsupported paging mode\n");
+ v3_bitmap_deinit(&(m->modified_pages));
+ V3_Free(m);
+ return 0;
}
// and now we should get callbacks as writes happen
static void stop_page_tracking(struct mem_migration_state *m)
{
- v3_unregister_shadow_paging_event_callback(m->vm,paging_callback,m);
-
- v3_bitmap_deinit(&(m->modified_pages));
+ if (m->vm->cores[0].shdw_pg_mode==SHADOW_PAGING) {
+ v3_unregister_shadow_paging_event_callback(m->vm,shadow_paging_callback,m);
+ } else {
+ //v3_unregister_nested_paging_event_callback(m->vm,nested_paging_callback,m);
+ }
- V3_Free(m);
+ v3_bitmap_deinit(&(m->modified_pages));
+
+ V3_Free(m);
}
ctx = v3_chkpt_open_ctx(chkpt, "header");
+ if (!ctx) {
+ PrintError(vm, VCORE_NONE, "Cannot open context to load header\n");
+ return -1;
+ }
+
switch (v3_mach_type) {
case V3_SVM_CPU:
case V3_SVM_REV3_CPU: {
PrintError(info->vm_info, info, "Could not open context to load core\n");
goto loadfailout;
}
+
+ // Run state is needed to determine when AP cores need
+ // to be immediately run after resume
+ V3_CHKPT_LOAD(ctx,"run_state",info->core_run_state,loadfailout);
+ V3_CHKPT_LOAD(ctx,"cpu_mode",info->cpu_mode,loadfailout);
+ V3_CHKPT_LOAD(ctx,"mem_mode",info->mem_mode,loadfailout);
+
+ V3_CHKPT_LOAD(ctx,"CPL",info->cpl,loadfailout);
+
+ if (info->cpl != info->segments.ss.dpl) {
+ V3_Print(info->vm_info,info,"Strange, CPL=%d but ss.dpl=%d on core save\n",info->cpl,info->segments.ss.dpl);
+ }
+
V3_CHKPT_LOAD(ctx, "RIP", info->rip, loadfailout);
V3_CHKPT_LOAD(ctx, "GDTR", info->segments.gdtr, loadfailout);
V3_CHKPT_LOAD(ctx, "IDTR", info->segments.idtr, loadfailout);
V3_CHKPT_LOAD(ctx, "TR", info->segments.tr, loadfailout);
-
+
+ if (info->cpl != info->segments.ss.dpl) {
+ V3_Print(info->vm_info,info,"Strange, CPL=%d but ss.dpl=%d on core load\n",info->cpl,info->segments.ss.dpl);
+ }
+
// several MSRs...
V3_CHKPT_LOAD(ctx, "STAR", info->msrs.star, loadfailout);
V3_CHKPT_LOAD(ctx, "LSTAR", info->msrs.lstar, loadfailout);
V3_CHKPT_LOAD(ctx, "GUEST_CR0", info->shdw_pg_state.guest_cr0, loadfailout);
V3_CHKPT_LOAD(ctx, "GUEST_EFER", info->shdw_pg_state.guest_efer, loadfailout);
+ // floating point
+ if (v3_load_fp_state(ctx,info)) {
+ goto loadfailout;
+ }
+
v3_chkpt_close_ctx(ctx); ctx=0;
PrintDebug(info->vm_info, info, "Finished reading guest_info information\n");
goto savefailout;
}
+ V3_CHKPT_SAVE(ctx,"run_state",info->core_run_state,savefailout);
+ V3_CHKPT_SAVE(ctx,"cpu_mode",info->cpu_mode,savefailout);
+ V3_CHKPT_SAVE(ctx,"mem_mode",info->mem_mode,savefailout);
+
+ V3_CHKPT_SAVE(ctx,"CPL",info->cpl,savefailout);
V3_CHKPT_SAVE(ctx, "RIP", info->rip, savefailout);
-
+
// GPRs
V3_CHKPT_SAVE(ctx,"RDI",info->vm_regs.rdi, savefailout);
V3_CHKPT_SAVE(ctx,"RSI",info->vm_regs.rsi, savefailout);
V3_CHKPT_SAVE(ctx, "GUEST_CR0", info->shdw_pg_state.guest_cr0, savefailout);
V3_CHKPT_SAVE(ctx, "GUEST_EFER", info->shdw_pg_state.guest_efer, savefailout);
+ // floating point
+ if (v3_save_fp_state(ctx,info)) {
+ goto savefailout;
+ }
+
v3_chkpt_close_ctx(ctx); ctx=0;
if (opts & V3_CHKPT_OPT_SKIP_ARCHDEP) {
struct mem_migration_state *mm_state;
int i;
- // Currently will work only for shadow paging
- for (i=0;i<vm->num_cores;i++) {
- if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING && !(opts & V3_CHKPT_OPT_SKIP_MEM)) {
- PrintError(vm, VCORE_NONE, "Cannot currently handle nested paging\n");
- return -1;
+ // Cores must all be in the same mode
+ // or we must be skipping mmeory
+ if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ v3_paging_mode_t mode = vm->cores[0].shdw_pg_mode;
+ for (i=1;i<vm->num_cores;i++) {
+ if (vm->cores[i].shdw_pg_mode != mode) {
+ PrintError(vm, VCORE_NONE, "Cores having different paging modes (nested and shadow) are not supported\n");
+ return -1;
+ }
}
}