2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2011, Madhav Suresh <madhav@u.northwestern.edu>
11 * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Madhav Suresh <madhav@u.northwestern.edu>
15 * Arefin Huq <fig@arefin.net>
16 * Peter Dinda <pdinda@northwestern.edu> (store interface changes)
19 * This is free software. You are permitted to use,
20 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 #include <palacios/vmm.h>
24 #include <palacios/vmm_sprintf.h>
25 #include <palacios/vm_guest.h>
26 #include <palacios/svm.h>
27 #include <palacios/vmx.h>
28 #include <palacios/vmm_checkpoint.h>
29 #include <palacios/vmm_hashtable.h>
30 #include <palacios/vmm_direct_paging.h>
31 #include <palacios/vmm_debug.h>
33 #include <palacios/vmm_dev_mgr.h>
35 #ifdef V3_CONFIG_LIVE_MIGRATION
36 #include <palacios/vmm_time.h>
37 #include <palacios/vm_guest_mem.h>
38 #include <palacios/vmm_shadow_paging.h>
41 #ifndef V3_CONFIG_DEBUG_CHECKPOINT
43 #define PrintDebug(fmt, args...)
47 static struct hashtable * store_table = NULL;
51 typedef enum {SAVE, LOAD} chkpt_mode_t;
53 struct chkpt_interface {
55 // Opening a checkpoint should return a pointer to the internal representation
56 // of the checkpoint in the store. This will be passed back
57 // as "store_data". Return NULL if the context cannot be opened
58 void * (*open_chkpt)(char * url, chkpt_mode_t mode);
59 // Closing the checkpoint should return -1 on failure, 0 on success
60 int (*close_chkpt)(void * store_data);
62 // Opening a context on the checkpoint with a given name should return
63 // a pointer to an internal representation of the context. This pointer
64 // is then passed back as "ctx".
65 // We will open only a single context at a time.
66 void * (*open_ctx)(void * store_data, char *name);
67 // Closing the context should return -1 on failure, 0 on success
68 int (*close_ctx)(void * store_data, void * ctx);
70 // Save and load include a tagged data buffer. These are
71 // "all or nothing" writes and reads.
72 // return -1 on failure, and 0 on success
74 int (*save)(void * store_data, void * ctx, char * tag, uint64_t len, void * buf);
75 int (*load)(void * store_data, void * ctx, char * tag, uint64_t len, void * buf);
80 struct v3_vm_info * vm;
82 struct v3_chkpt_ctx *current_ctx;
84 struct chkpt_interface * interface;
92 static uint_t store_hash_fn(addr_t key) {
93 char * name = (char *)key;
94 return v3_hash_buffer((uint8_t *)name, strlen(name));
97 static int store_eq_fn(addr_t key1, addr_t key2) {
98 char * name1 = (char *)key1;
99 char * name2 = (char *)key2;
101 return (strcmp(name1, name2) == 0);
106 #include "vmm_chkpt_stores.h"
109 int V3_init_checkpoint() {
110 extern struct chkpt_interface * __start__v3_chkpt_stores[];
111 extern struct chkpt_interface * __stop__v3_chkpt_stores[];
112 struct chkpt_interface ** tmp_store = __start__v3_chkpt_stores;
115 store_table = v3_create_htable(0, store_hash_fn, store_eq_fn);
117 while (tmp_store != __stop__v3_chkpt_stores) {
118 V3_Print(VM_NONE, VCORE_NONE, "Registering Checkpoint Backing Store (%s)\n", (*tmp_store)->name);
120 if (v3_htable_search(store_table, (addr_t)((*tmp_store)->name))) {
121 PrintError(VM_NONE, VCORE_NONE, "Multiple instances of Checkpoint backing Store (%s)\n", (*tmp_store)->name);
125 if (v3_htable_insert(store_table, (addr_t)((*tmp_store)->name), (addr_t)(*tmp_store)) == 0) {
126 PrintError(VM_NONE, VCORE_NONE, "Could not register Checkpoint backing store (%s)\n", (*tmp_store)->name);
130 tmp_store = &(__start__v3_chkpt_stores[++i]);
136 int V3_deinit_checkpoint() {
137 v3_free_htable(store_table, 0, 0);
142 static char svm_chkpt_header[] = "v3vee palacios checkpoint version: x.x, SVM x.x";
143 static char vmx_chkpt_header[] = "v3vee palacios checkpoint version: x.x, VMX x.x";
145 static int chkpt_close(struct v3_chkpt * chkpt) {
149 rc = chkpt->interface->close_chkpt(chkpt->store_data);
154 PrintError(VM_NONE, VCORE_NONE, "Internal store failed to close valid checkpoint\n");
160 PrintError(VM_NONE, VCORE_NONE, "Attempt to close null checkpoint\n");
166 static struct v3_chkpt * chkpt_open(struct v3_vm_info * vm, char * store, char * url, chkpt_mode_t mode) {
167 struct chkpt_interface * iface = NULL;
168 struct v3_chkpt * chkpt = NULL;
169 void * store_data = NULL;
171 iface = (void *)v3_htable_search(store_table, (addr_t)store);
174 V3_Print(vm, VCORE_NONE, "Error: Could not locate Checkpoint interface for store (%s)\n", store);
178 store_data = iface->open_chkpt(url, mode);
180 if (store_data == NULL) {
181 PrintError(vm, VCORE_NONE, "Could not open url (%s) for backing store (%s)\n", url, store);
186 chkpt = V3_Malloc(sizeof(struct v3_chkpt));
189 PrintError(vm, VCORE_NONE, "Could not allocate checkpoint state, closing checkpoint\n");
190 iface->close_chkpt(store_data);
194 memset(chkpt,0,sizeof(struct v3_chkpt));
196 chkpt->interface = iface;
198 chkpt->store_data = store_data;
199 chkpt->current_ctx = NULL;
204 struct v3_chkpt_ctx * v3_chkpt_open_ctx(struct v3_chkpt * chkpt, char * name) {
205 struct v3_chkpt_ctx * ctx;
207 if (chkpt->current_ctx) {
208 PrintError(VM_NONE, VCORE_NONE, "Attempt to open context %s before old context has been closed\n", name);
212 ctx = V3_Malloc(sizeof(struct v3_chkpt_ctx));
215 PrintError(VM_NONE, VCORE_NONE, "Unable to allocate context\n");
219 memset(ctx, 0, sizeof(struct v3_chkpt_ctx));
222 ctx->store_ctx = chkpt->interface->open_ctx(chkpt->store_data, name);
224 if (!(ctx->store_ctx)) {
225 PrintError(VM_NONE, VCORE_NONE, "Underlying store failed to open context %s\n",name);
230 chkpt->current_ctx = ctx;
235 int v3_chkpt_close_ctx(struct v3_chkpt_ctx * ctx) {
236 struct v3_chkpt * chkpt = ctx->chkpt;
239 if (chkpt->current_ctx != ctx) {
240 PrintError(VM_NONE, VCORE_NONE, "Attempt to close a context that is not the current context on the store\n");
244 ret = chkpt->interface->close_ctx(chkpt->store_data, ctx->store_ctx);
247 PrintError(VM_NONE, VCORE_NONE, "Failed to close context on store, closing device-independent context anyway - bad\n");
251 chkpt->current_ctx=NULL;
262 int v3_chkpt_save(struct v3_chkpt_ctx * ctx, char * tag, uint64_t len, void * buf) {
263 struct v3_chkpt * chkpt = ctx->chkpt;
267 PrintError(VM_NONE, VCORE_NONE, "Attempt to save tag %s on null context\n",tag);
271 if (chkpt->current_ctx != ctx) {
272 PrintError(VM_NONE, VCORE_NONE, "Attempt to save on context that is not the current context for the store\n");
276 rc = chkpt->interface->save(chkpt->store_data, ctx->store_ctx, tag , len, buf);
279 PrintError(VM_NONE, VCORE_NONE, "Underlying store failed to save tag %s on valid context\n",tag);
287 int v3_chkpt_load(struct v3_chkpt_ctx * ctx, char * tag, uint64_t len, void * buf) {
288 struct v3_chkpt * chkpt = ctx->chkpt;
292 PrintError(VM_NONE, VCORE_NONE, "Attempt to load tag %s from null context\n",tag);
296 if (chkpt->current_ctx != ctx) {
297 PrintError(VM_NONE, VCORE_NONE, "Attempt to load from context that is not the current context for the store\n");
301 rc = chkpt->interface->load(chkpt->store_data, ctx->store_ctx, tag, len, buf);
304 PrintError(VM_NONE, VCORE_NONE, "Underlying store failed to load tag %s from valid context\n",tag);
313 static int load_memory(struct v3_vm_info * vm, struct v3_chkpt * chkpt) {
315 void * guest_mem_base = NULL;
318 uint64_t saved_mem_block_size;
319 uint32_t saved_num_base_regions;
322 extern uint64_t v3_mem_block_size;
324 ctx = v3_chkpt_open_ctx(chkpt, "memory_img");
327 PrintError(vm, VCORE_NONE, "Unable to open context for memory load\n");
331 if (V3_CHKPT_LOAD(ctx, "region_size",saved_mem_block_size)) {
332 PrintError(vm, VCORE_NONE, "Unable to load memory region size\n");
336 if (V3_CHKPT_LOAD(ctx, "num_regions",saved_num_base_regions)) {
337 PrintError(vm, VCORE_NONE, "Unable to load number of regions\n");
341 if (saved_mem_block_size != v3_mem_block_size) {
342 PrintError(vm, VCORE_NONE, "Unable to load as memory block size differs\n");
344 } // support will eventually be added for this
346 if (saved_num_base_regions != vm->mem_map.num_base_regions) {
347 PrintError(vm, VCORE_NONE, "Unable to laod as number of base regions differs\n");
349 } // support will eventually be added for this
351 for (i=0;i<vm->mem_map.num_base_regions;i++) {
352 guest_mem_base = V3_VAddr((void *)vm->mem_map.base_regions[i].host_addr);
353 sprintf(buf,"memory_img%d",i);
354 if (v3_chkpt_load(ctx, buf, v3_mem_block_size, guest_mem_base)) {
355 PrintError(vm, VCORE_NONE, "Unable to load all of memory (region %d) (requested=%llu bytes, result=%llu bytes\n",i,(uint64_t)(vm->mem_size),ret);
356 v3_chkpt_close_ctx(ctx);
361 v3_chkpt_close_ctx(ctx);
367 static int save_memory(struct v3_vm_info * vm, struct v3_chkpt * chkpt) {
368 void * guest_mem_base = NULL;
370 char buf[128]; // region name
372 extern uint64_t v3_mem_block_size;
376 ctx = v3_chkpt_open_ctx(chkpt, "memory_img");
379 PrintError(vm, VCORE_NONE, "Unable to open context to save memory\n");
383 if (V3_CHKPT_SAVE(ctx, "region_size",v3_mem_block_size)) {
384 PrintError(vm, VCORE_NONE, "Unable to save memory region size\n");
388 if (V3_CHKPT_SAVE(ctx, "num_regions",vm->mem_map.num_base_regions)) {
389 PrintError(vm, VCORE_NONE, "Unable to save number of regions\n");
393 for (i=0;i<vm->mem_map.num_base_regions;i++) {
394 guest_mem_base = V3_VAddr((void *)vm->mem_map.base_regions[i].host_addr);
395 sprintf(buf,"memory_img%d",i);
396 if (v3_chkpt_save(ctx, buf, v3_mem_block_size, guest_mem_base)) {
397 PrintError(vm, VCORE_NONE, "Unable to save all of memory (region %d) (requested=%llu, received=%llu)\n",i,(uint64_t)(vm->mem_size),ret);
398 v3_chkpt_close_ctx(ctx);
403 v3_chkpt_close_ctx(ctx);
408 #ifdef V3_CONFIG_LIVE_MIGRATION
410 struct mem_migration_state {
411 struct v3_vm_info *vm;
412 struct v3_bitmap modified_pages;
415 static int shadow_paging_callback(struct guest_info *core,
416 struct v3_shdw_pg_event *event,
419 struct mem_migration_state *m = (struct mem_migration_state *)priv_data;
421 if (event->event_type==SHADOW_PAGEFAULT &&
422 event->event_order==SHADOW_PREIMPL &&
423 event->error_code.write) { // Note, assumes VTLB behavior where we will see the write even if preceded by a read
425 if (!v3_gva_to_gpa(core,event->gva,&gpa)) {
426 // write to this page
427 v3_bitmap_set(&(m->modified_pages),gpa>>12);
429 // no worries, this isn't physical memory
432 // we don't care about other events
440 static int nested_paging_callback(struct guest_info *core,
441 struct v3_nested_pg_event *event,
444 struct mem_migration_state *m = (struct mem_migration_state *)priv_data;
446 if (event->event_type==NESTED_PAGEFAULT &&
447 event->event_order==NESTED_PREIMPL &&
448 event->error_code.write) { // Assumes we will see a write after reads
449 if (event->gpa<core->vm_info->mem_size) {
450 v3_bitmap_set(&(m->modified_pages),(event->gpa)>>12);
452 // no worries, this isn't physical memory
455 // we don't care about other events
463 static struct mem_migration_state *start_page_tracking(struct v3_vm_info *vm)
465 struct mem_migration_state *m;
468 m = (struct mem_migration_state *)V3_Malloc(sizeof(struct mem_migration_state));
471 PrintError(vm, VCORE_NONE, "Cannot allocate\n");
477 if (v3_bitmap_init(&(m->modified_pages),vm->mem_size >> 12) == -1) {
478 PrintError(vm, VCORE_NONE, "Failed to initialize modified_pages bit vector");
482 // We assume that the migrator has already verified that all cores are
483 // using the identical model (shadow or nested)
484 // This must not change over the execution of the migration
486 if (vm->cores[0].shdw_pg_mode==SHADOW_PAGING) {
487 v3_register_shadow_paging_event_callback(vm,shadow_paging_callback,m);
489 for (i=0;i<vm->num_cores;i++) {
490 v3_invalidate_shadow_pts(&(vm->cores[i]));
492 } else if (vm->cores[0].shdw_pg_mode==NESTED_PAGING) {
493 //v3_register_nested_paging_event_callback(vm,nested_paging_callback,m);
495 for (i=0;i<vm->num_cores;i++) {
496 //v3_invalidate_nested_addr_range(&(vm->cores[i]),0,vm->mem_size-1);
499 PrintError(vm, VCORE_NONE, "Unsupported paging mode\n");
500 v3_bitmap_deinit(&(m->modified_pages));
505 // and now we should get callbacks as writes happen
510 static void stop_page_tracking(struct mem_migration_state *m)
512 if (m->vm->cores[0].shdw_pg_mode==SHADOW_PAGING) {
513 v3_unregister_shadow_paging_event_callback(m->vm,shadow_paging_callback,m);
515 //v3_unregister_nested_paging_event_callback(m->vm,nested_paging_callback,m);
518 v3_bitmap_deinit(&(m->modified_pages));
531 // zero: done with this round
532 static int save_inc_memory(struct v3_vm_info * vm,
533 struct v3_bitmap * mod_pgs_to_send,
534 struct v3_chkpt * chkpt) {
535 int page_size_bytes = 1 << 12; // assuming 4k pages right now
538 int bitmap_num_bytes = (mod_pgs_to_send->num_bits / 8)
539 + ((mod_pgs_to_send->num_bits % 8) > 0);
542 PrintDebug(vm, VCORE_NONE, "Saving incremental memory.\n");
544 ctx = v3_chkpt_open_ctx(chkpt,"memory_bitmap_bits");
547 PrintError(vm, VCORE_NONE, "Cannot open context for dirty memory bitmap\n");
552 if (v3_chkpt_save(ctx,
553 "memory_bitmap_bits",
555 mod_pgs_to_send->bits)) {
556 PrintError(vm, VCORE_NONE, "Unable to write all of the dirty memory bitmap\n");
557 v3_chkpt_close_ctx(ctx);
561 v3_chkpt_close_ctx(ctx);
563 PrintDebug(vm, VCORE_NONE, "Sent bitmap bits.\n");
565 // Dirty memory pages are sent in bitmap order
566 for (i = 0; i < mod_pgs_to_send->num_bits; i++) {
567 if (v3_bitmap_check(mod_pgs_to_send, i)) {
568 struct v3_mem_region *region = v3_get_base_region(vm,page_size_bytes * i);
570 PrintError(vm, VCORE_NONE, "Failed to find base region for page %d\n",i);
573 // PrintDebug(vm, VCORE_NONE, "Sending memory page %d.\n",i);
574 ctx = v3_chkpt_open_ctx(chkpt, "memory_page");
576 PrintError(vm, VCORE_NONE, "Unable to open context to send memory page\n");
579 if (v3_chkpt_save(ctx,
582 (void*)(region->host_addr + page_size_bytes * i - region->guest_start))) {
583 PrintError(vm, VCORE_NONE, "Unable to send a memory page\n");
584 v3_chkpt_close_ctx(ctx);
588 v3_chkpt_close_ctx(ctx);
599 // zero: ok, but not done
600 // positive: ok, and also done
601 static int load_inc_memory(struct v3_vm_info * vm,
602 struct v3_bitmap * mod_pgs,
603 struct v3_chkpt * chkpt) {
604 int page_size_bytes = 1 << 12; // assuming 4k pages right now
607 bool empty_bitmap = true;
608 int bitmap_num_bytes = (mod_pgs->num_bits / 8)
609 + ((mod_pgs->num_bits % 8) > 0);
612 ctx = v3_chkpt_open_ctx(chkpt, "memory_bitmap_bits");
615 PrintError(vm, VCORE_NONE, "Cannot open context to receive memory bitmap\n");
619 if (v3_chkpt_load(ctx,
620 "memory_bitmap_bits",
623 PrintError(vm, VCORE_NONE, "Did not receive all of memory bitmap\n");
624 v3_chkpt_close_ctx(ctx);
628 v3_chkpt_close_ctx(ctx);
630 // Receive also follows bitmap order
631 for (i = 0; i < mod_pgs->num_bits; i ++) {
632 if (v3_bitmap_check(mod_pgs, i)) {
633 struct v3_mem_region *region = v3_get_base_region(vm,page_size_bytes * i);
635 PrintError(vm, VCORE_NONE, "Failed to find base region for page %d\n",i);
638 //PrintDebug(vm, VCORE_NONE, "Loading page %d\n", i);
639 empty_bitmap = false;
640 ctx = v3_chkpt_open_ctx(chkpt, "memory_page");
642 PrintError(vm, VCORE_NONE, "Cannot open context to receive memory page\n");
646 if (v3_chkpt_load(ctx,
649 (void*)(region->host_addr + page_size_bytes * i - region->guest_start))) {
650 PrintError(vm, VCORE_NONE, "Did not receive all of memory page\n");
651 v3_chkpt_close_ctx(ctx);
654 v3_chkpt_close_ctx(ctx);
659 // signal end of receiving pages
660 PrintDebug(vm, VCORE_NONE, "Finished receiving pages.\n");
671 int save_header(struct v3_vm_info * vm, struct v3_chkpt * chkpt) {
672 extern v3_cpu_arch_t v3_mach_type;
675 ctx = v3_chkpt_open_ctx(chkpt, "header");
677 PrintError(vm, VCORE_NONE, "Cannot open context to save header\n");
681 switch (v3_mach_type) {
683 case V3_SVM_REV3_CPU: {
684 if (v3_chkpt_save(ctx, "header", strlen(svm_chkpt_header), svm_chkpt_header)) {
685 PrintError(vm, VCORE_NONE, "Could not save all of SVM header\n");
686 v3_chkpt_close_ctx(ctx);
693 case V3_VMX_EPT_UG_CPU: {
694 if (v3_chkpt_save(ctx, "header", strlen(vmx_chkpt_header), vmx_chkpt_header)) {
695 PrintError(vm, VCORE_NONE, "Could not save all of VMX header\n");
696 v3_chkpt_close_ctx(ctx);
702 PrintError(vm, VCORE_NONE, "checkpoint not supported on this architecture\n");
703 v3_chkpt_close_ctx(ctx);
707 v3_chkpt_close_ctx(ctx);
712 static int load_header(struct v3_vm_info * vm, struct v3_chkpt * chkpt) {
713 extern v3_cpu_arch_t v3_mach_type;
716 ctx = v3_chkpt_open_ctx(chkpt, "header");
719 PrintError(vm, VCORE_NONE, "Cannot open context to load header\n");
723 switch (v3_mach_type) {
725 case V3_SVM_REV3_CPU: {
726 char header[strlen(svm_chkpt_header) + 1];
728 if (v3_chkpt_load(ctx, "header", strlen(svm_chkpt_header), header)) {
729 PrintError(vm, VCORE_NONE, "Could not load all of SVM header\n");
730 v3_chkpt_close_ctx(ctx);
734 header[strlen(svm_chkpt_header)] = 0;
740 case V3_VMX_EPT_UG_CPU: {
741 char header[strlen(vmx_chkpt_header) + 1];
743 if (v3_chkpt_load(ctx, "header", strlen(vmx_chkpt_header), header)) {
744 PrintError(vm, VCORE_NONE, "Could not load all of VMX header\n");
745 v3_chkpt_close_ctx(ctx);
749 header[strlen(vmx_chkpt_header)] = 0;
754 PrintError(vm, VCORE_NONE, "checkpoint not supported on this architecture\n");
755 v3_chkpt_close_ctx(ctx);
759 v3_chkpt_close_ctx(ctx);
765 static int load_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt_options_t opts) {
766 extern v3_cpu_arch_t v3_mach_type;
771 PrintDebug(info->vm_info, info, "Loading core\n");
773 memset(key_name, 0, 16);
775 snprintf(key_name, 16, "guest_info%d", info->vcpu_id);
777 ctx = v3_chkpt_open_ctx(chkpt, key_name);
780 PrintError(info->vm_info, info, "Could not open context to load core\n");
784 // Run state is needed to determine when AP cores need
785 // to be immediately run after resume
786 V3_CHKPT_LOAD(ctx,"run_state",info->core_run_state,loadfailout);
787 V3_CHKPT_LOAD(ctx,"cpu_mode",info->cpu_mode,loadfailout);
788 V3_CHKPT_LOAD(ctx,"mem_mode",info->mem_mode,loadfailout);
790 V3_CHKPT_LOAD(ctx,"CPL",info->cpl,loadfailout);
792 if (info->cpl != info->segments.ss.dpl) {
793 V3_Print(info->vm_info,info,"Strange, CPL=%d but ss.dpl=%d on core save\n",info->cpl,info->segments.ss.dpl);
797 V3_CHKPT_LOAD(ctx, "RIP", info->rip, loadfailout);
800 V3_CHKPT_LOAD(ctx,"RDI",info->vm_regs.rdi, loadfailout);
801 V3_CHKPT_LOAD(ctx,"RSI",info->vm_regs.rsi, loadfailout);
802 V3_CHKPT_LOAD(ctx,"RBP",info->vm_regs.rbp, loadfailout);
803 V3_CHKPT_LOAD(ctx,"RSP",info->vm_regs.rsp, loadfailout);
804 V3_CHKPT_LOAD(ctx,"RBX",info->vm_regs.rbx, loadfailout);
805 V3_CHKPT_LOAD(ctx,"RDX",info->vm_regs.rdx, loadfailout);
806 V3_CHKPT_LOAD(ctx,"RCX",info->vm_regs.rcx, loadfailout);
807 V3_CHKPT_LOAD(ctx,"RAX",info->vm_regs.rax, loadfailout);
808 V3_CHKPT_LOAD(ctx,"R8",info->vm_regs.r8, loadfailout);
809 V3_CHKPT_LOAD(ctx,"R9",info->vm_regs.r9, loadfailout);
810 V3_CHKPT_LOAD(ctx,"R10",info->vm_regs.r10, loadfailout);
811 V3_CHKPT_LOAD(ctx,"R11",info->vm_regs.r11, loadfailout);
812 V3_CHKPT_LOAD(ctx,"R12",info->vm_regs.r12, loadfailout);
813 V3_CHKPT_LOAD(ctx,"R13",info->vm_regs.r13, loadfailout);
814 V3_CHKPT_LOAD(ctx,"R14",info->vm_regs.r14, loadfailout);
815 V3_CHKPT_LOAD(ctx,"R15",info->vm_regs.r15, loadfailout);
818 V3_CHKPT_LOAD(ctx, "CR0", info->ctrl_regs.cr0, loadfailout);
820 V3_CHKPT_LOAD(ctx, "CR2", info->ctrl_regs.cr2, loadfailout);
821 V3_CHKPT_LOAD(ctx, "CR3", info->ctrl_regs.cr3, loadfailout);
822 V3_CHKPT_LOAD(ctx, "CR4", info->ctrl_regs.cr4, loadfailout);
823 // There are no CR5,6,7
824 // CR8 is derived from apic_tpr
825 tempreg = (info->ctrl_regs.apic_tpr >> 4) & 0xf;
826 V3_CHKPT_LOAD(ctx, "CR8", tempreg, loadfailout);
827 V3_CHKPT_LOAD(ctx, "APIC_TPR", info->ctrl_regs.apic_tpr, loadfailout);
828 V3_CHKPT_LOAD(ctx, "RFLAGS", info->ctrl_regs.rflags, loadfailout);
829 V3_CHKPT_LOAD(ctx, "EFER", info->ctrl_regs.efer, loadfailout);
832 V3_CHKPT_LOAD(ctx, "DR0", info->dbg_regs.dr0, loadfailout);
833 V3_CHKPT_LOAD(ctx, "DR1", info->dbg_regs.dr1, loadfailout);
834 V3_CHKPT_LOAD(ctx, "DR2", info->dbg_regs.dr2, loadfailout);
835 V3_CHKPT_LOAD(ctx, "DR3", info->dbg_regs.dr3, loadfailout);
836 // there is no DR4 or DR5
837 V3_CHKPT_LOAD(ctx, "DR6", info->dbg_regs.dr6, loadfailout);
838 V3_CHKPT_LOAD(ctx, "DR7", info->dbg_regs.dr7, loadfailout);
841 V3_CHKPT_LOAD(ctx, "CS", info->segments.cs, loadfailout);
842 V3_CHKPT_LOAD(ctx, "DS", info->segments.ds, loadfailout);
843 V3_CHKPT_LOAD(ctx, "ES", info->segments.es, loadfailout);
844 V3_CHKPT_LOAD(ctx, "FS", info->segments.fs, loadfailout);
845 V3_CHKPT_LOAD(ctx, "GS", info->segments.gs, loadfailout);
846 V3_CHKPT_LOAD(ctx, "SS", info->segments.ss, loadfailout);
847 V3_CHKPT_LOAD(ctx, "LDTR", info->segments.ldtr, loadfailout);
848 V3_CHKPT_LOAD(ctx, "GDTR", info->segments.gdtr, loadfailout);
849 V3_CHKPT_LOAD(ctx, "IDTR", info->segments.idtr, loadfailout);
850 V3_CHKPT_LOAD(ctx, "TR", info->segments.tr, loadfailout);
852 if (info->cpl != info->segments.ss.dpl) {
853 V3_Print(info->vm_info,info,"Strange, CPL=%d but ss.dpl=%d on core load\n",info->cpl,info->segments.ss.dpl);
857 V3_CHKPT_LOAD(ctx, "STAR", info->msrs.star, loadfailout);
858 V3_CHKPT_LOAD(ctx, "LSTAR", info->msrs.lstar, loadfailout);
859 V3_CHKPT_LOAD(ctx, "SFMASK", info->msrs.sfmask, loadfailout);
860 V3_CHKPT_LOAD(ctx, "KERN_GS_BASE", info->msrs.kern_gs_base, loadfailout);
862 // Some components of guest state captured in the shadow pager
863 V3_CHKPT_LOAD(ctx, "GUEST_CR3", info->shdw_pg_state.guest_cr3, loadfailout);
864 V3_CHKPT_LOAD(ctx, "GUEST_CR0", info->shdw_pg_state.guest_cr0, loadfailout);
865 V3_CHKPT_LOAD(ctx, "GUEST_EFER", info->shdw_pg_state.guest_efer, loadfailout);
868 if (v3_load_fp_state(ctx,info)) {
872 v3_chkpt_close_ctx(ctx); ctx=0;
874 PrintDebug(info->vm_info, info, "Finished reading guest_info information\n");
876 info->cpu_mode = v3_get_vm_cpu_mode(info);
877 info->mem_mode = v3_get_vm_mem_mode(info);
879 if (info->shdw_pg_mode == SHADOW_PAGING) {
880 if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
881 if (v3_activate_shadow_pt(info) == -1) {
882 PrintError(info->vm_info, info, "Failed to activate shadow page tables\n");
886 if (v3_activate_passthrough_pt(info) == -1) {
887 PrintError(info->vm_info, info, "Failed to activate passthrough page tables\n");
894 if (opts & V3_CHKPT_OPT_SKIP_ARCHDEP) {
898 switch (v3_mach_type) {
900 case V3_SVM_REV3_CPU: {
903 snprintf(key_name, 16, "vmcb_data%d", info->vcpu_id);
904 ctx = v3_chkpt_open_ctx(chkpt, key_name);
907 PrintError(info->vm_info, info, "Could not open context to load SVM core\n");
911 if (v3_svm_load_core(info, ctx) < 0 ) {
912 PrintError(info->vm_info, info, "Failed to patch core %d\n", info->vcpu_id);
916 v3_chkpt_close_ctx(ctx); ctx=0;
922 case V3_VMX_EPT_UG_CPU: {
925 snprintf(key_name, 16, "vmcs_data%d", info->vcpu_id);
927 ctx = v3_chkpt_open_ctx(chkpt, key_name);
930 PrintError(info->vm_info, info, "Could not open context to load VMX core\n");
934 if (v3_vmx_load_core(info, ctx) < 0) {
935 PrintError(info->vm_info, info, "VMX checkpoint failed\n");
939 v3_chkpt_close_ctx(ctx); ctx=0;
944 PrintError(info->vm_info, info, "Invalid CPU Type (%d)\n", v3_mach_type);
950 PrintDebug(info->vm_info, info, "Load of core succeeded\n");
952 v3_print_guest_state(info);
957 PrintError(info->vm_info, info, "Failed to load core\n");
958 if (ctx) { v3_chkpt_close_ctx(ctx);}
963 // GEM5 - Hypercall for initiating transfer to gem5 (checkpoint)
965 static int save_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt_options_t opts) {
966 extern v3_cpu_arch_t v3_mach_type;
971 PrintDebug(info->vm_info, info, "Saving core\n");
973 v3_print_guest_state(info);
975 memset(key_name, 0, 16);
977 snprintf(key_name, 16, "guest_info%d", info->vcpu_id);
979 ctx = v3_chkpt_open_ctx(chkpt, key_name);
982 PrintError(info->vm_info, info, "Unable to open context to save core\n");
986 V3_CHKPT_SAVE(ctx,"run_state",info->core_run_state,savefailout);
987 V3_CHKPT_SAVE(ctx,"cpu_mode",info->cpu_mode,savefailout);
988 V3_CHKPT_SAVE(ctx,"mem_mode",info->mem_mode,savefailout);
990 V3_CHKPT_SAVE(ctx,"CPL",info->cpl,savefailout);
992 V3_CHKPT_SAVE(ctx, "RIP", info->rip, savefailout);
995 V3_CHKPT_SAVE(ctx,"RDI",info->vm_regs.rdi, savefailout);
996 V3_CHKPT_SAVE(ctx,"RSI",info->vm_regs.rsi, savefailout);
997 V3_CHKPT_SAVE(ctx,"RBP",info->vm_regs.rbp, savefailout);
998 V3_CHKPT_SAVE(ctx,"RSP",info->vm_regs.rsp, savefailout);
999 V3_CHKPT_SAVE(ctx,"RBX",info->vm_regs.rbx, savefailout);
1000 V3_CHKPT_SAVE(ctx,"RDX",info->vm_regs.rdx, savefailout);
1001 V3_CHKPT_SAVE(ctx,"RCX",info->vm_regs.rcx, savefailout);
1002 V3_CHKPT_SAVE(ctx,"RAX",info->vm_regs.rax, savefailout);
1003 V3_CHKPT_SAVE(ctx,"R8",info->vm_regs.r8, savefailout);
1004 V3_CHKPT_SAVE(ctx,"R9",info->vm_regs.r9, savefailout);
1005 V3_CHKPT_SAVE(ctx,"R10",info->vm_regs.r10, savefailout);
1006 V3_CHKPT_SAVE(ctx,"R11",info->vm_regs.r11, savefailout);
1007 V3_CHKPT_SAVE(ctx,"R12",info->vm_regs.r12, savefailout);
1008 V3_CHKPT_SAVE(ctx,"R13",info->vm_regs.r13, savefailout);
1009 V3_CHKPT_SAVE(ctx,"R14",info->vm_regs.r14, savefailout);
1010 V3_CHKPT_SAVE(ctx,"R15",info->vm_regs.r15, savefailout);
1012 // Control registers
1013 V3_CHKPT_SAVE(ctx, "CR0", info->ctrl_regs.cr0, savefailout);
1015 V3_CHKPT_SAVE(ctx, "CR2", info->ctrl_regs.cr2, savefailout);
1016 V3_CHKPT_SAVE(ctx, "CR3", info->ctrl_regs.cr3, savefailout);
1017 V3_CHKPT_SAVE(ctx, "CR4", info->ctrl_regs.cr4, savefailout);
1018 // There are no CR5,6,7
1019 // CR8 is derived from apic_tpr
1020 tempreg = (info->ctrl_regs.apic_tpr >> 4) & 0xf;
1021 V3_CHKPT_SAVE(ctx, "CR8", tempreg, savefailout);
1022 V3_CHKPT_SAVE(ctx, "APIC_TPR", info->ctrl_regs.apic_tpr, savefailout);
1023 V3_CHKPT_SAVE(ctx, "RFLAGS", info->ctrl_regs.rflags, savefailout);
1024 V3_CHKPT_SAVE(ctx, "EFER", info->ctrl_regs.efer, savefailout);
1027 V3_CHKPT_SAVE(ctx, "DR0", info->dbg_regs.dr0, savefailout);
1028 V3_CHKPT_SAVE(ctx, "DR1", info->dbg_regs.dr1, savefailout);
1029 V3_CHKPT_SAVE(ctx, "DR2", info->dbg_regs.dr2, savefailout);
1030 V3_CHKPT_SAVE(ctx, "DR3", info->dbg_regs.dr3, savefailout);
1031 // there is no DR4 or DR5
1032 V3_CHKPT_SAVE(ctx, "DR6", info->dbg_regs.dr6, savefailout);
1033 V3_CHKPT_SAVE(ctx, "DR7", info->dbg_regs.dr7, savefailout);
1035 // Segment registers
1036 V3_CHKPT_SAVE(ctx, "CS", info->segments.cs, savefailout);
1037 V3_CHKPT_SAVE(ctx, "DS", info->segments.ds, savefailout);
1038 V3_CHKPT_SAVE(ctx, "ES", info->segments.es, savefailout);
1039 V3_CHKPT_SAVE(ctx, "FS", info->segments.fs, savefailout);
1040 V3_CHKPT_SAVE(ctx, "GS", info->segments.gs, savefailout);
1041 V3_CHKPT_SAVE(ctx, "SS", info->segments.ss, savefailout);
1042 V3_CHKPT_SAVE(ctx, "LDTR", info->segments.ldtr, savefailout);
1043 V3_CHKPT_SAVE(ctx, "GDTR", info->segments.gdtr, savefailout);
1044 V3_CHKPT_SAVE(ctx, "IDTR", info->segments.idtr, savefailout);
1045 V3_CHKPT_SAVE(ctx, "TR", info->segments.tr, savefailout);
1048 V3_CHKPT_SAVE(ctx, "STAR", info->msrs.star, savefailout);
1049 V3_CHKPT_SAVE(ctx, "LSTAR", info->msrs.lstar, savefailout);
1050 V3_CHKPT_SAVE(ctx, "SFMASK", info->msrs.sfmask, savefailout);
1051 V3_CHKPT_SAVE(ctx, "KERN_GS_BASE", info->msrs.kern_gs_base, savefailout);
1053 // Some components of guest state captured in the shadow pager
1054 V3_CHKPT_SAVE(ctx, "GUEST_CR3", info->shdw_pg_state.guest_cr3, savefailout);
1055 V3_CHKPT_SAVE(ctx, "GUEST_CR0", info->shdw_pg_state.guest_cr0, savefailout);
1056 V3_CHKPT_SAVE(ctx, "GUEST_EFER", info->shdw_pg_state.guest_efer, savefailout);
1059 if (v3_save_fp_state(ctx,info)) {
1063 v3_chkpt_close_ctx(ctx); ctx=0;
1065 if (opts & V3_CHKPT_OPT_SKIP_ARCHDEP) {
1069 //Architechture specific code
1070 switch (v3_mach_type) {
1072 case V3_SVM_REV3_CPU: {
1075 snprintf(key_name, 16, "vmcb_data%d", info->vcpu_id);
1077 ctx = v3_chkpt_open_ctx(chkpt, key_name);
1080 PrintError(info->vm_info, info, "Could not open context to store SVM core\n");
1084 if (v3_svm_save_core(info, ctx) < 0) {
1085 PrintError(info->vm_info, info, "VMCB Unable to be written\n");
1089 v3_chkpt_close_ctx(ctx); ctx=0;;
1093 case V3_VMX_EPT_CPU:
1094 case V3_VMX_EPT_UG_CPU: {
1097 snprintf(key_name, 16, "vmcs_data%d", info->vcpu_id);
1099 ctx = v3_chkpt_open_ctx(chkpt, key_name);
1102 PrintError(info->vm_info, info, "Could not open context to store VMX core\n");
1106 if (v3_vmx_save_core(info, ctx) == -1) {
1107 PrintError(info->vm_info, info, "VMX checkpoint failed\n");
1111 v3_chkpt_close_ctx(ctx); ctx=0;
1116 PrintError(info->vm_info, info, "Invalid CPU Type (%d)\n", v3_mach_type);
1126 PrintError(info->vm_info, info, "Failed to save core\n");
1127 if (ctx) { v3_chkpt_close_ctx(ctx); }
1133 // GEM5 - Madhav has debug code here for printing instrucions
1136 int v3_chkpt_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1137 struct v3_chkpt * chkpt = NULL;
1142 chkpt = chkpt_open(vm, store, url, SAVE);
1144 if (chkpt == NULL) {
1145 PrintError(vm, VCORE_NONE, "Error creating checkpoint store for url %s\n",url);
1149 /* If this guest is running we need to block it while the checkpoint occurs */
1150 if (vm->run_state == VM_RUNNING) {
1151 while (v3_raise_barrier(vm, NULL) == -1);
1154 if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
1155 if ((ret = save_memory(vm, chkpt)) == -1) {
1156 PrintError(vm, VCORE_NONE, "Unable to save memory\n");
1162 if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
1163 if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
1164 PrintError(vm, VCORE_NONE, "Unable to save devices\n");
1169 if ((ret = save_header(vm, chkpt)) == -1) {
1170 PrintError(vm, VCORE_NONE, "Unable to save header\n");
1174 if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
1175 for (i = 0; i < vm->num_cores; i++){
1176 if ((ret = save_core(&(vm->cores[i]), chkpt, opts)) == -1) {
1177 PrintError(vm, VCORE_NONE, "chkpt of core %d failed\n", i);
1185 /* Resume the guest if it was running */
1186 if (vm->run_state == VM_RUNNING) {
1187 v3_lower_barrier(vm);
1196 int v3_chkpt_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1197 struct v3_chkpt * chkpt = NULL;
1201 chkpt = chkpt_open(vm, store, url, LOAD);
1203 if (chkpt == NULL) {
1204 PrintError(vm, VCORE_NONE, "Error creating checkpoint store\n");
1208 /* If this guest is running we need to block it while the checkpoint occurs */
1209 if (vm->run_state == VM_RUNNING) {
1210 while (v3_raise_barrier(vm, NULL) == -1);
1213 if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
1214 if ((ret = load_memory(vm, chkpt)) == -1) {
1215 PrintError(vm, VCORE_NONE, "Unable to load memory\n");
1220 if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
1221 if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
1222 PrintError(vm, VCORE_NONE, "Unable to load devies\n");
1228 if ((ret = load_header(vm, chkpt)) == -1) {
1229 PrintError(vm, VCORE_NONE, "Unable to load header\n");
1234 if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
1235 for (i = 0; i < vm->num_cores; i++) {
1236 if ((ret = load_core(&(vm->cores[i]), chkpt, opts)) == -1) {
1237 PrintError(vm, VCORE_NONE, "Error loading core state (core=%d)\n", i);
1245 /* Resume the guest if it was running and we didn't just trash the state*/
1246 if (vm->run_state == VM_RUNNING) {
1249 vm->run_state = VM_STOPPED;
1252 /* We check the run state of the VM after every barrier
1253 So this will immediately halt the VM
1255 v3_lower_barrier(vm);
1265 #ifdef V3_CONFIG_LIVE_MIGRATION
1267 #define MOD_THRESHOLD 200 // pages below which we declare victory
1268 #define ITER_THRESHOLD 32 // iters below which we declare victory
1272 int v3_chkpt_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1273 struct v3_chkpt * chkpt = NULL;
1276 bool last_modpage_iteration=false;
1277 struct v3_bitmap modified_pages_to_send;
1278 uint64_t start_time;
1280 int num_mod_pages=0;
1281 struct mem_migration_state *mm_state;
1284 // Cores must all be in the same mode
1285 // or we must be skipping mmeory
1286 if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
1287 v3_paging_mode_t mode = vm->cores[0].shdw_pg_mode;
1288 for (i=1;i<vm->num_cores;i++) {
1289 if (vm->cores[i].shdw_pg_mode != mode) {
1290 PrintError(vm, VCORE_NONE, "Cores having different paging modes (nested and shadow) are not supported\n");
1297 chkpt = chkpt_open(vm, store, url, SAVE);
1299 if (chkpt == NULL) {
1300 PrintError(vm, VCORE_NONE, "Error creating checkpoint store\n");
1305 if (opts & V3_CHKPT_OPT_SKIP_MEM) {
1309 // In a send, the memory is copied incrementally first,
1310 // followed by the remainder of the state
1312 if (v3_bitmap_init(&modified_pages_to_send,
1313 vm->mem_size>>12 // number of pages in main region
1315 PrintError(vm, VCORE_NONE, "Could not intialize bitmap.\n");
1319 // 0. Initialize bitmap to all 1s
1320 for (i=0; i < modified_pages_to_send.num_bits; i++) {
1321 v3_bitmap_set(&modified_pages_to_send,i);
1325 while (!last_modpage_iteration) {
1326 PrintDebug(vm, VCORE_NONE, "Modified memory page iteration %d\n",i++);
1328 start_time = v3_get_host_time(&(vm->cores[0].time_state));
1330 // We will pause the VM for a short while
1331 // so that we can collect the set of changed pages
1332 if (v3_pause_vm(vm) == -1) {
1333 PrintError(vm, VCORE_NONE, "Could not pause VM\n");
1339 // special case, we already have the pages to send (all of them)
1340 // they are already in modified_pages_to_send
1342 // normally, we are in the middle of a round
1343 // We need to copy from the current tracking bitmap
1344 // to our send bitmap
1345 v3_bitmap_copy(&modified_pages_to_send,&(mm_state->modified_pages));
1346 // and now we need to remove our tracking
1347 stop_page_tracking(mm_state);
1350 // are we done? (note that we are still paused)
1351 num_mod_pages = v3_bitmap_count(&modified_pages_to_send);
1352 if (num_mod_pages<MOD_THRESHOLD || iter>ITER_THRESHOLD) {
1353 // we are done, so we will not restart page tracking
1354 // the vm is paused, and so we should be able
1355 // to just send the data
1356 PrintDebug(vm, VCORE_NONE, "Last modified memory page iteration.\n");
1357 last_modpage_iteration = true;
1359 // we are not done, so we will restart page tracking
1360 // to prepare for a second round of pages
1361 // we will resume the VM as this happens
1362 if (!(mm_state=start_page_tracking(vm))) {
1363 PrintError(vm, VCORE_NONE, "Error enabling page tracking.\n");
1367 if (v3_continue_vm(vm) == -1) {
1368 PrintError(vm, VCORE_NONE, "Error resuming the VM\n");
1369 stop_page_tracking(mm_state);
1374 stop_time = v3_get_host_time(&(vm->cores[0].time_state));
1375 PrintDebug(vm, VCORE_NONE, "num_mod_pages=%d\ndowntime=%llu\n",num_mod_pages,stop_time-start_time);
1379 // At this point, we are either paused and about to copy
1380 // the last chunk, or we are running, and will copy the last
1381 // round in parallel with current execution
1382 if (num_mod_pages>0) {
1383 if (save_inc_memory(vm, &modified_pages_to_send, chkpt) == -1) {
1384 PrintError(vm, VCORE_NONE, "Error sending incremental memory.\n");
1388 } // we don't want to copy an empty bitmap here
1393 if (v3_bitmap_reset(&modified_pages_to_send) == -1) {
1394 PrintError(vm, VCORE_NONE, "Error reseting bitmap.\n");
1399 // send bitmap of 0s to signal end of modpages
1400 if (save_inc_memory(vm, &modified_pages_to_send, chkpt) == -1) {
1401 PrintError(vm, VCORE_NONE, "Error sending incremental memory.\n");
1407 // save the non-memory state
1408 if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
1409 if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
1410 PrintError(vm, VCORE_NONE, "Unable to save devices\n");
1415 if ((ret = save_header(vm, chkpt)) == -1) {
1416 PrintError(vm, VCORE_NONE, "Unable to save header\n");
1420 if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
1421 for (i = 0; i < vm->num_cores; i++){
1422 if ((ret = save_core(&(vm->cores[i]), chkpt, opts)) == -1) {
1423 PrintError(vm, VCORE_NONE, "chkpt of core %d failed\n", i);
1429 if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
1430 stop_time = v3_get_host_time(&(vm->cores[0].time_state));
1431 PrintDebug(vm, VCORE_NONE, "num_mod_pages=%d\ndowntime=%llu\n",num_mod_pages,stop_time-start_time);
1432 PrintDebug(vm, VCORE_NONE, "Done sending VM!\n");
1434 v3_bitmap_deinit(&modified_pages_to_send);
1443 int v3_chkpt_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1444 struct v3_chkpt * chkpt = NULL;
1447 struct v3_bitmap mod_pgs;
1449 // Currently will work only for shadow paging
1450 for (i=0;i<vm->num_cores;i++) {
1451 if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING && !(opts & V3_CHKPT_OPT_SKIP_MEM)) {
1452 PrintError(vm, VCORE_NONE, "Cannot currently handle nested paging\n");
1457 chkpt = chkpt_open(vm, store, url, LOAD);
1459 if (chkpt == NULL) {
1460 PrintError(vm, VCORE_NONE, "Error creating checkpoint store\n");
1466 if (opts & V3_CHKPT_OPT_SKIP_MEM) {
1470 if (v3_bitmap_init(&mod_pgs,vm->mem_size>>12) == -1) {
1472 PrintError(vm, VCORE_NONE, "Could not intialize bitmap.\n");
1476 /* If this guest is running we need to block it while the checkpoint occurs */
1477 if (vm->run_state == VM_RUNNING) {
1478 while (v3_raise_barrier(vm, NULL) == -1);
1483 // 1. Receive copy of bitmap
1485 PrintDebug(vm, VCORE_NONE, "Memory page iteration %d\n",i++);
1486 int retval = load_inc_memory(vm, &mod_pgs, chkpt);
1488 // end of receiving memory pages
1490 } else if (retval == -1) {
1491 PrintError(vm, VCORE_NONE, "Error receiving incremental memory.\n");
1499 if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
1500 if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
1501 PrintError(vm, VCORE_NONE, "Unable to load devices\n");
1507 if ((ret = load_header(vm, chkpt)) == -1) {
1508 PrintError(vm, VCORE_NONE, "Unable to load header\n");
1514 if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
1515 for (i = 0; i < vm->num_cores; i++) {
1516 if ((ret = load_core(&(vm->cores[i]), chkpt, opts)) == -1) {
1517 PrintError(vm, VCORE_NONE, "Error loading core state (core=%d)\n", i);
1525 PrintError(vm, VCORE_NONE, "Unable to receive VM\n");
1527 PrintDebug(vm, VCORE_NONE, "Done receving the VM\n");
1531 /* Resume the guest if it was running and we didn't just trash the state*/
1532 if (vm->run_state == VM_RUNNING) {
1534 PrintError(vm, VCORE_NONE, "VM was previously running. It is now borked. Pausing it. \n");
1535 vm->run_state = VM_STOPPED;
1538 /* We check the run state of the VM after every barrier
1539 So this will immediately halt the VM
1541 v3_lower_barrier(vm);
1545 if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
1546 v3_bitmap_deinit(&mod_pgs);