X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_checkpoint.c;h=a67560e99351e61d89f734f3d5980d3812eaa325;hb=c0ecfba627c1d6c3f46d59bd4e5e6f883a494dc4;hp=92c5e166b403d4b6fb1630facba6ca5baa99ebc7;hpb=5d20e4fc3a9b5b9cf762e67c23f817986d802bb7;p=palacios.git diff --git a/palacios/src/palacios/vmm_checkpoint.c b/palacios/src/palacios/vmm_checkpoint.c index 92c5e16..a67560e 100644 --- a/palacios/src/palacios/vmm_checkpoint.c +++ b/palacios/src/palacios/vmm_checkpoint.c @@ -260,7 +260,7 @@ int v3_chkpt_close_ctx(struct v3_chkpt_ctx * ctx) { int v3_chkpt_save(struct v3_chkpt_ctx * ctx, char * tag, uint64_t len, void * buf) { - struct v3_chkpt * chkpt = ctx->chkpt; + struct v3_chkpt * chkpt; int rc; if (!ctx) { @@ -268,6 +268,8 @@ int v3_chkpt_save(struct v3_chkpt_ctx * ctx, char * tag, uint64_t len, void * bu return -1; } + chkpt = ctx->chkpt; + if (chkpt->current_ctx != ctx) { PrintError(VM_NONE, VCORE_NONE, "Attempt to save on context that is not the current context for the store\n"); return -1; @@ -285,13 +287,15 @@ int v3_chkpt_save(struct v3_chkpt_ctx * ctx, char * tag, uint64_t len, void * bu int v3_chkpt_load(struct v3_chkpt_ctx * ctx, char * tag, uint64_t len, void * buf) { - struct v3_chkpt * chkpt = ctx->chkpt; + struct v3_chkpt * chkpt; int rc; if (!ctx) { PrintError(VM_NONE, VCORE_NONE, "Attempt to load tag %s from null context\n",tag); return -1; } + + chkpt = ctx->chkpt; if (chkpt->current_ctx != ctx) { PrintError(VM_NONE, VCORE_NONE, "Attempt to load from context that is not the current context for the store\n"); @@ -412,15 +416,15 @@ struct mem_migration_state { struct v3_bitmap modified_pages; }; -static int paging_callback(struct guest_info *core, - struct v3_shdw_pg_event *event, - void *priv_data) +static int shadow_paging_callback(struct guest_info *core, + struct v3_shdw_pg_event *event, + void *priv_data) { struct mem_migration_state *m = (struct mem_migration_state *)priv_data; if (event->event_type==SHADOW_PAGEFAULT && event->event_order==SHADOW_PREIMPL && - event->error_code.write) { + event->error_code.write) { // Note, assumes VTLB behavior where we will see the write even if preceded by a read addr_t gpa; if (!v3_gva_to_gpa(core,event->gva,&gpa)) { // write to this page @@ -434,7 +438,30 @@ static int paging_callback(struct guest_info *core, return 0; } - + + +/* +static int nested_paging_callback(struct guest_info *core, + struct v3_nested_pg_event *event, + void *priv_data) +{ + struct mem_migration_state *m = (struct mem_migration_state *)priv_data; + + if (event->event_type==NESTED_PAGEFAULT && + event->event_order==NESTED_PREIMPL && + event->error_code.write) { // Assumes we will see a write after reads + if (event->gpavm_info->mem_size) { + v3_bitmap_set(&(m->modified_pages),(event->gpa)>>12); + } else { + // no worries, this isn't physical memory + } + } else { + // we don't care about other events + } + + return 0; +} +*/ static struct mem_migration_state *start_page_tracking(struct v3_vm_info *vm) @@ -456,10 +483,27 @@ static struct mem_migration_state *start_page_tracking(struct v3_vm_info *vm) V3_Free(m); } - v3_register_shadow_paging_event_callback(vm,paging_callback,m); + // We assume that the migrator has already verified that all cores are + // using the identical model (shadow or nested) + // This must not change over the execution of the migration - for (i=0;inum_cores;i++) { + if (vm->cores[0].shdw_pg_mode==SHADOW_PAGING) { + v3_register_shadow_paging_event_callback(vm,shadow_paging_callback,m); + + for (i=0;inum_cores;i++) { v3_invalidate_shadow_pts(&(vm->cores[i])); + } + } else if (vm->cores[0].shdw_pg_mode==NESTED_PAGING) { + //v3_register_nested_paging_event_callback(vm,nested_paging_callback,m); + + for (i=0;inum_cores;i++) { + //v3_invalidate_nested_addr_range(&(vm->cores[i]),0,vm->mem_size-1); + } + } else { + PrintError(vm, VCORE_NONE, "Unsupported paging mode\n"); + v3_bitmap_deinit(&(m->modified_pages)); + V3_Free(m); + return 0; } // and now we should get callbacks as writes happen @@ -469,11 +513,15 @@ static struct mem_migration_state *start_page_tracking(struct v3_vm_info *vm) static void stop_page_tracking(struct mem_migration_state *m) { - v3_unregister_shadow_paging_event_callback(m->vm,paging_callback,m); - - v3_bitmap_deinit(&(m->modified_pages)); + if (m->vm->cores[0].shdw_pg_mode==SHADOW_PAGING) { + v3_unregister_shadow_paging_event_callback(m->vm,shadow_paging_callback,m); + } else { + //v3_unregister_nested_paging_event_callback(m->vm,nested_paging_callback,m); + } - V3_Free(m); + v3_bitmap_deinit(&(m->modified_pages)); + + V3_Free(m); } @@ -671,6 +719,11 @@ static int load_header(struct v3_vm_info * vm, struct v3_chkpt * chkpt) { ctx = v3_chkpt_open_ctx(chkpt, "header"); + if (!ctx) { + PrintError(vm, VCORE_NONE, "Cannot open context to load header\n"); + return -1; + } + switch (v3_mach_type) { case V3_SVM_CPU: case V3_SVM_REV3_CPU: { @@ -731,6 +784,19 @@ static int load_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt PrintError(info->vm_info, info, "Could not open context to load core\n"); goto loadfailout; } + + // Run state is needed to determine when AP cores need + // to be immediately run after resume + V3_CHKPT_LOAD(ctx,"run_state",info->core_run_state,loadfailout); + V3_CHKPT_LOAD(ctx,"cpu_mode",info->cpu_mode,loadfailout); + V3_CHKPT_LOAD(ctx,"mem_mode",info->mem_mode,loadfailout); + + V3_CHKPT_LOAD(ctx,"CPL",info->cpl,loadfailout); + + if (info->cpl != info->segments.ss.dpl) { + V3_Print(info->vm_info,info,"Strange, CPL=%d but ss.dpl=%d on core save\n",info->cpl,info->segments.ss.dpl); + } + V3_CHKPT_LOAD(ctx, "RIP", info->rip, loadfailout); @@ -786,7 +852,11 @@ static int load_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt V3_CHKPT_LOAD(ctx, "GDTR", info->segments.gdtr, loadfailout); V3_CHKPT_LOAD(ctx, "IDTR", info->segments.idtr, loadfailout); V3_CHKPT_LOAD(ctx, "TR", info->segments.tr, loadfailout); - + + if (info->cpl != info->segments.ss.dpl) { + V3_Print(info->vm_info,info,"Strange, CPL=%d but ss.dpl=%d on core load\n",info->cpl,info->segments.ss.dpl); + } + // several MSRs... V3_CHKPT_LOAD(ctx, "STAR", info->msrs.star, loadfailout); V3_CHKPT_LOAD(ctx, "LSTAR", info->msrs.lstar, loadfailout); @@ -798,6 +868,11 @@ static int load_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt V3_CHKPT_LOAD(ctx, "GUEST_CR0", info->shdw_pg_state.guest_cr0, loadfailout); V3_CHKPT_LOAD(ctx, "GUEST_EFER", info->shdw_pg_state.guest_efer, loadfailout); + // floating point + if (v3_load_fp_state(ctx,info)) { + goto loadfailout; + } + v3_chkpt_close_ctx(ctx); ctx=0; PrintDebug(info->vm_info, info, "Finished reading guest_info information\n"); @@ -912,9 +987,14 @@ static int save_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt goto savefailout; } + V3_CHKPT_SAVE(ctx,"run_state",info->core_run_state,savefailout); + V3_CHKPT_SAVE(ctx,"cpu_mode",info->cpu_mode,savefailout); + V3_CHKPT_SAVE(ctx,"mem_mode",info->mem_mode,savefailout); + + V3_CHKPT_SAVE(ctx,"CPL",info->cpl,savefailout); V3_CHKPT_SAVE(ctx, "RIP", info->rip, savefailout); - + // GPRs V3_CHKPT_SAVE(ctx,"RDI",info->vm_regs.rdi, savefailout); V3_CHKPT_SAVE(ctx,"RSI",info->vm_regs.rsi, savefailout); @@ -979,6 +1059,11 @@ static int save_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt V3_CHKPT_SAVE(ctx, "GUEST_CR0", info->shdw_pg_state.guest_cr0, savefailout); V3_CHKPT_SAVE(ctx, "GUEST_EFER", info->shdw_pg_state.guest_efer, savefailout); + // floating point + if (v3_save_fp_state(ctx,info)) { + goto savefailout; + } + v3_chkpt_close_ctx(ctx); ctx=0; if (opts & V3_CHKPT_OPT_SKIP_ARCHDEP) { @@ -1194,17 +1279,21 @@ int v3_chkpt_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_ int iter = 0; bool last_modpage_iteration=false; struct v3_bitmap modified_pages_to_send; - uint64_t start_time; + uint64_t start_time=0; uint64_t stop_time; int num_mod_pages=0; struct mem_migration_state *mm_state; int i; - // Currently will work only for shadow paging - for (i=0;inum_cores;i++) { - if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING && !(opts & V3_CHKPT_OPT_SKIP_MEM)) { - PrintError(vm, VCORE_NONE, "Cannot currently handle nested paging\n"); - return -1; + // Cores must all be in the same mode + // or we must be skipping mmeory + if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) { + v3_paging_mode_t mode = vm->cores[0].shdw_pg_mode; + for (i=1;inum_cores;i++) { + if (vm->cores[i].shdw_pg_mode != mode) { + PrintError(vm, VCORE_NONE, "Cores having different paging modes (nested and shadow) are not supported\n"); + return -1; + } } }