2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2011, Madhav Suresh <madhav@u.northwestern.edu>
11 * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Madhav Suresh <madhav@u.northwestern.edu>
15 * Arefin Huq <fig@arefin.net>
16 * Peter Dinda <pdinda@northwestern.edu> (store interface changes)
19 * This is free software. You are permitted to use,
20 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 #include <palacios/vmm.h>
24 #include <palacios/vmm_sprintf.h>
25 #include <palacios/vm_guest.h>
26 #include <palacios/svm.h>
27 #include <palacios/vmx.h>
28 #include <palacios/vmm_checkpoint.h>
29 #include <palacios/vmm_hashtable.h>
30 #include <palacios/vmm_direct_paging.h>
31 #include <palacios/vmm_debug.h>
33 #include <palacios/vmm_dev_mgr.h>
35 #ifdef V3_CONFIG_LIVE_MIGRATION
36 #include <palacios/vmm_time.h>
37 #include <palacios/vm_guest_mem.h>
38 #include <palacios/vmm_shadow_paging.h>
41 #ifndef V3_CONFIG_DEBUG_CHECKPOINT
43 #define PrintDebug(fmt, args...)
47 static struct hashtable * store_table = NULL;
51 typedef enum {SAVE, LOAD} chkpt_mode_t;
53 struct chkpt_interface {
55 // Opening a checkpoint should return a pointer to the internal representation
56 // of the checkpoint in the store. This will be passed back
57 // as "store_data". Return NULL if the context cannot be opened
58 void * (*open_chkpt)(char * url, chkpt_mode_t mode);
59 // Closing the checkpoint should return -1 on failure, 0 on success
60 int (*close_chkpt)(void * store_data);
62 // Opening a context on the checkpoint with a given name should return
63 // a pointer to an internal representation of the context. This pointer
64 // is then passed back as "ctx".
65 // We will open only a single context at a time.
66 void * (*open_ctx)(void * store_data, char *name);
67 // Closing the context should return -1 on failure, 0 on success
68 int (*close_ctx)(void * store_data, void * ctx);
70 // Save and load include a tagged data buffer. These are
71 // "all or nothing" writes and reads.
72 // return -1 on failure, and 0 on success
74 int (*save)(void * store_data, void * ctx, char * tag, uint64_t len, void * buf);
75 int (*load)(void * store_data, void * ctx, char * tag, uint64_t len, void * buf);
80 struct v3_vm_info * vm;
82 struct v3_chkpt_ctx *current_ctx;
84 struct chkpt_interface * interface;
92 static uint_t store_hash_fn(addr_t key) {
93 char * name = (char *)key;
94 return v3_hash_buffer((uint8_t *)name, strlen(name));
97 static int store_eq_fn(addr_t key1, addr_t key2) {
98 char * name1 = (char *)key1;
99 char * name2 = (char *)key2;
101 return (strcmp(name1, name2) == 0);
106 #include "vmm_chkpt_stores.h"
109 int V3_init_checkpoint() {
110 extern struct chkpt_interface * __start__v3_chkpt_stores[];
111 extern struct chkpt_interface * __stop__v3_chkpt_stores[];
112 struct chkpt_interface ** tmp_store = __start__v3_chkpt_stores;
115 store_table = v3_create_htable(0, store_hash_fn, store_eq_fn);
117 while (tmp_store != __stop__v3_chkpt_stores) {
118 V3_Print(VM_NONE, VCORE_NONE, "Registering Checkpoint Backing Store (%s)\n", (*tmp_store)->name);
120 if (v3_htable_search(store_table, (addr_t)((*tmp_store)->name))) {
121 PrintError(VM_NONE, VCORE_NONE, "Multiple instances of Checkpoint backing Store (%s)\n", (*tmp_store)->name);
125 if (v3_htable_insert(store_table, (addr_t)((*tmp_store)->name), (addr_t)(*tmp_store)) == 0) {
126 PrintError(VM_NONE, VCORE_NONE, "Could not register Checkpoint backing store (%s)\n", (*tmp_store)->name);
130 tmp_store = &(__start__v3_chkpt_stores[++i]);
136 int V3_deinit_checkpoint() {
137 v3_free_htable(store_table, 0, 0);
142 static char svm_chkpt_header[] = "v3vee palacios checkpoint version: x.x, SVM x.x";
143 static char vmx_chkpt_header[] = "v3vee palacios checkpoint version: x.x, VMX x.x";
145 static int chkpt_close(struct v3_chkpt * chkpt) {
149 rc = chkpt->interface->close_chkpt(chkpt->store_data);
154 PrintError(VM_NONE, VCORE_NONE, "Internal store failed to close valid checkpoint\n");
160 PrintError(VM_NONE, VCORE_NONE, "Attempt to close null checkpoint\n");
166 static struct v3_chkpt * chkpt_open(struct v3_vm_info * vm, char * store, char * url, chkpt_mode_t mode) {
167 struct chkpt_interface * iface = NULL;
168 struct v3_chkpt * chkpt = NULL;
169 void * store_data = NULL;
171 iface = (void *)v3_htable_search(store_table, (addr_t)store);
174 V3_Print(vm, VCORE_NONE, "Error: Could not locate Checkpoint interface for store (%s)\n", store);
178 store_data = iface->open_chkpt(url, mode);
180 if (store_data == NULL) {
181 PrintError(vm, VCORE_NONE, "Could not open url (%s) for backing store (%s)\n", url, store);
186 chkpt = V3_Malloc(sizeof(struct v3_chkpt));
189 PrintError(vm, VCORE_NONE, "Could not allocate checkpoint state, closing checkpoint\n");
190 iface->close_chkpt(store_data);
194 memset(chkpt,0,sizeof(struct v3_chkpt));
196 chkpt->interface = iface;
198 chkpt->store_data = store_data;
199 chkpt->current_ctx = NULL;
204 struct v3_chkpt_ctx * v3_chkpt_open_ctx(struct v3_chkpt * chkpt, char * name) {
205 struct v3_chkpt_ctx * ctx;
207 if (chkpt->current_ctx) {
208 PrintError(VM_NONE, VCORE_NONE, "Attempt to open context %s before old context has been closed\n", name);
212 ctx = V3_Malloc(sizeof(struct v3_chkpt_ctx));
215 PrintError(VM_NONE, VCORE_NONE, "Unable to allocate context\n");
219 memset(ctx, 0, sizeof(struct v3_chkpt_ctx));
222 ctx->store_ctx = chkpt->interface->open_ctx(chkpt->store_data, name);
224 if (!(ctx->store_ctx)) {
225 PrintError(VM_NONE, VCORE_NONE, "Underlying store failed to open context %s\n",name);
230 chkpt->current_ctx = ctx;
235 int v3_chkpt_close_ctx(struct v3_chkpt_ctx * ctx) {
236 struct v3_chkpt * chkpt = ctx->chkpt;
239 if (chkpt->current_ctx != ctx) {
240 PrintError(VM_NONE, VCORE_NONE, "Attempt to close a context that is not the current context on the store\n");
244 ret = chkpt->interface->close_ctx(chkpt->store_data, ctx->store_ctx);
247 PrintError(VM_NONE, VCORE_NONE, "Failed to close context on store, closing device-independent context anyway - bad\n");
251 chkpt->current_ctx=NULL;
262 int v3_chkpt_save(struct v3_chkpt_ctx * ctx, char * tag, uint64_t len, void * buf) {
263 struct v3_chkpt * chkpt;
267 PrintError(VM_NONE, VCORE_NONE, "Attempt to save tag %s on null context\n",tag);
273 if (chkpt->current_ctx != ctx) {
274 PrintError(VM_NONE, VCORE_NONE, "Attempt to save on context that is not the current context for the store\n");
278 rc = chkpt->interface->save(chkpt->store_data, ctx->store_ctx, tag , len, buf);
281 PrintError(VM_NONE, VCORE_NONE, "Underlying store failed to save tag %s on valid context\n",tag);
289 int v3_chkpt_load(struct v3_chkpt_ctx * ctx, char * tag, uint64_t len, void * buf) {
290 struct v3_chkpt * chkpt;
294 PrintError(VM_NONE, VCORE_NONE, "Attempt to load tag %s from null context\n",tag);
300 if (chkpt->current_ctx != ctx) {
301 PrintError(VM_NONE, VCORE_NONE, "Attempt to load from context that is not the current context for the store\n");
305 rc = chkpt->interface->load(chkpt->store_data, ctx->store_ctx, tag, len, buf);
308 PrintError(VM_NONE, VCORE_NONE, "Underlying store failed to load tag %s from valid context\n",tag);
317 static int load_memory(struct v3_vm_info * vm, struct v3_chkpt * chkpt) {
319 void * guest_mem_base = NULL;
322 uint64_t saved_mem_block_size;
323 uint32_t saved_num_base_regions;
326 extern uint64_t v3_mem_block_size;
328 ctx = v3_chkpt_open_ctx(chkpt, "memory_img");
331 PrintError(vm, VCORE_NONE, "Unable to open context for memory load\n");
335 if (V3_CHKPT_LOAD(ctx, "region_size",saved_mem_block_size)) {
336 PrintError(vm, VCORE_NONE, "Unable to load memory region size\n");
340 if (V3_CHKPT_LOAD(ctx, "num_regions",saved_num_base_regions)) {
341 PrintError(vm, VCORE_NONE, "Unable to load number of regions\n");
345 if (saved_mem_block_size != v3_mem_block_size) {
346 PrintError(vm, VCORE_NONE, "Unable to load as memory block size differs\n");
348 } // support will eventually be added for this
350 if (saved_num_base_regions != vm->mem_map.num_base_regions) {
351 PrintError(vm, VCORE_NONE, "Unable to laod as number of base regions differs\n");
353 } // support will eventually be added for this
355 for (i=0;i<vm->mem_map.num_base_regions;i++) {
356 guest_mem_base = V3_VAddr((void *)vm->mem_map.base_regions[i].host_addr);
357 sprintf(buf,"memory_img%d",i);
358 if (v3_chkpt_load(ctx, buf, v3_mem_block_size, guest_mem_base)) {
359 PrintError(vm, VCORE_NONE, "Unable to load all of memory (region %d) (requested=%llu bytes, result=%llu bytes\n",i,(uint64_t)(vm->mem_size),ret);
360 v3_chkpt_close_ctx(ctx);
365 v3_chkpt_close_ctx(ctx);
371 static int save_memory(struct v3_vm_info * vm, struct v3_chkpt * chkpt) {
372 void * guest_mem_base = NULL;
374 char buf[128]; // region name
376 extern uint64_t v3_mem_block_size;
380 ctx = v3_chkpt_open_ctx(chkpt, "memory_img");
383 PrintError(vm, VCORE_NONE, "Unable to open context to save memory\n");
387 if (V3_CHKPT_SAVE(ctx, "region_size",v3_mem_block_size)) {
388 PrintError(vm, VCORE_NONE, "Unable to save memory region size\n");
392 if (V3_CHKPT_SAVE(ctx, "num_regions",vm->mem_map.num_base_regions)) {
393 PrintError(vm, VCORE_NONE, "Unable to save number of regions\n");
397 for (i=0;i<vm->mem_map.num_base_regions;i++) {
398 guest_mem_base = V3_VAddr((void *)vm->mem_map.base_regions[i].host_addr);
399 sprintf(buf,"memory_img%d",i);
400 if (v3_chkpt_save(ctx, buf, v3_mem_block_size, guest_mem_base)) {
401 PrintError(vm, VCORE_NONE, "Unable to save all of memory (region %d) (requested=%llu, received=%llu)\n",i,(uint64_t)(vm->mem_size),ret);
402 v3_chkpt_close_ctx(ctx);
407 v3_chkpt_close_ctx(ctx);
412 #ifdef V3_CONFIG_LIVE_MIGRATION
414 struct mem_migration_state {
415 struct v3_vm_info *vm;
416 struct v3_bitmap modified_pages;
419 static int shadow_paging_callback(struct guest_info *core,
420 struct v3_shdw_pg_event *event,
423 struct mem_migration_state *m = (struct mem_migration_state *)priv_data;
425 if (event->event_type==SHADOW_PAGEFAULT &&
426 event->event_order==SHADOW_PREIMPL &&
427 event->error_code.write) { // Note, assumes VTLB behavior where we will see the write even if preceded by a read
429 if (!v3_gva_to_gpa(core,event->gva,&gpa)) {
430 // write to this page
431 v3_bitmap_set(&(m->modified_pages),gpa>>12);
433 // no worries, this isn't physical memory
436 // we don't care about other events
444 static int nested_paging_callback(struct guest_info *core,
445 struct v3_nested_pg_event *event,
448 struct mem_migration_state *m = (struct mem_migration_state *)priv_data;
450 if (event->event_type==NESTED_PAGEFAULT &&
451 event->event_order==NESTED_PREIMPL &&
452 event->error_code.write) { // Assumes we will see a write after reads
453 if (event->gpa<core->vm_info->mem_size) {
454 v3_bitmap_set(&(m->modified_pages),(event->gpa)>>12);
456 // no worries, this isn't physical memory
459 // we don't care about other events
467 static struct mem_migration_state *start_page_tracking(struct v3_vm_info *vm)
469 struct mem_migration_state *m;
472 m = (struct mem_migration_state *)V3_Malloc(sizeof(struct mem_migration_state));
475 PrintError(vm, VCORE_NONE, "Cannot allocate\n");
481 if (v3_bitmap_init(&(m->modified_pages),vm->mem_size >> 12) == -1) {
482 PrintError(vm, VCORE_NONE, "Failed to initialize modified_pages bit vector");
486 // We assume that the migrator has already verified that all cores are
487 // using the identical model (shadow or nested)
488 // This must not change over the execution of the migration
490 if (vm->cores[0].shdw_pg_mode==SHADOW_PAGING) {
491 v3_register_shadow_paging_event_callback(vm,shadow_paging_callback,m);
493 for (i=0;i<vm->num_cores;i++) {
494 v3_invalidate_shadow_pts(&(vm->cores[i]));
496 } else if (vm->cores[0].shdw_pg_mode==NESTED_PAGING) {
497 //v3_register_nested_paging_event_callback(vm,nested_paging_callback,m);
499 for (i=0;i<vm->num_cores;i++) {
500 //v3_invalidate_nested_addr_range(&(vm->cores[i]),0,vm->mem_size-1);
503 PrintError(vm, VCORE_NONE, "Unsupported paging mode\n");
504 v3_bitmap_deinit(&(m->modified_pages));
509 // and now we should get callbacks as writes happen
514 static void stop_page_tracking(struct mem_migration_state *m)
516 if (m->vm->cores[0].shdw_pg_mode==SHADOW_PAGING) {
517 v3_unregister_shadow_paging_event_callback(m->vm,shadow_paging_callback,m);
519 //v3_unregister_nested_paging_event_callback(m->vm,nested_paging_callback,m);
522 v3_bitmap_deinit(&(m->modified_pages));
535 // zero: done with this round
536 static int save_inc_memory(struct v3_vm_info * vm,
537 struct v3_bitmap * mod_pgs_to_send,
538 struct v3_chkpt * chkpt) {
539 int page_size_bytes = 1 << 12; // assuming 4k pages right now
542 int bitmap_num_bytes = (mod_pgs_to_send->num_bits / 8)
543 + ((mod_pgs_to_send->num_bits % 8) > 0);
546 PrintDebug(vm, VCORE_NONE, "Saving incremental memory.\n");
548 ctx = v3_chkpt_open_ctx(chkpt,"memory_bitmap_bits");
551 PrintError(vm, VCORE_NONE, "Cannot open context for dirty memory bitmap\n");
556 if (v3_chkpt_save(ctx,
557 "memory_bitmap_bits",
559 mod_pgs_to_send->bits)) {
560 PrintError(vm, VCORE_NONE, "Unable to write all of the dirty memory bitmap\n");
561 v3_chkpt_close_ctx(ctx);
565 v3_chkpt_close_ctx(ctx);
567 PrintDebug(vm, VCORE_NONE, "Sent bitmap bits.\n");
569 // Dirty memory pages are sent in bitmap order
570 for (i = 0; i < mod_pgs_to_send->num_bits; i++) {
571 if (v3_bitmap_check(mod_pgs_to_send, i)) {
572 struct v3_mem_region *region = v3_get_base_region(vm,page_size_bytes * i);
574 PrintError(vm, VCORE_NONE, "Failed to find base region for page %d\n",i);
577 // PrintDebug(vm, VCORE_NONE, "Sending memory page %d.\n",i);
578 ctx = v3_chkpt_open_ctx(chkpt, "memory_page");
580 PrintError(vm, VCORE_NONE, "Unable to open context to send memory page\n");
583 if (v3_chkpt_save(ctx,
586 (void*)(region->host_addr + page_size_bytes * i - region->guest_start))) {
587 PrintError(vm, VCORE_NONE, "Unable to send a memory page\n");
588 v3_chkpt_close_ctx(ctx);
592 v3_chkpt_close_ctx(ctx);
603 // zero: ok, but not done
604 // positive: ok, and also done
605 static int load_inc_memory(struct v3_vm_info * vm,
606 struct v3_bitmap * mod_pgs,
607 struct v3_chkpt * chkpt) {
608 int page_size_bytes = 1 << 12; // assuming 4k pages right now
611 bool empty_bitmap = true;
612 int bitmap_num_bytes = (mod_pgs->num_bits / 8)
613 + ((mod_pgs->num_bits % 8) > 0);
616 ctx = v3_chkpt_open_ctx(chkpt, "memory_bitmap_bits");
619 PrintError(vm, VCORE_NONE, "Cannot open context to receive memory bitmap\n");
623 if (v3_chkpt_load(ctx,
624 "memory_bitmap_bits",
627 PrintError(vm, VCORE_NONE, "Did not receive all of memory bitmap\n");
628 v3_chkpt_close_ctx(ctx);
632 v3_chkpt_close_ctx(ctx);
634 // Receive also follows bitmap order
635 for (i = 0; i < mod_pgs->num_bits; i ++) {
636 if (v3_bitmap_check(mod_pgs, i)) {
637 struct v3_mem_region *region = v3_get_base_region(vm,page_size_bytes * i);
639 PrintError(vm, VCORE_NONE, "Failed to find base region for page %d\n",i);
642 //PrintDebug(vm, VCORE_NONE, "Loading page %d\n", i);
643 empty_bitmap = false;
644 ctx = v3_chkpt_open_ctx(chkpt, "memory_page");
646 PrintError(vm, VCORE_NONE, "Cannot open context to receive memory page\n");
650 if (v3_chkpt_load(ctx,
653 (void*)(region->host_addr + page_size_bytes * i - region->guest_start))) {
654 PrintError(vm, VCORE_NONE, "Did not receive all of memory page\n");
655 v3_chkpt_close_ctx(ctx);
658 v3_chkpt_close_ctx(ctx);
663 // signal end of receiving pages
664 PrintDebug(vm, VCORE_NONE, "Finished receiving pages.\n");
675 int save_header(struct v3_vm_info * vm, struct v3_chkpt * chkpt) {
676 extern v3_cpu_arch_t v3_mach_type;
679 ctx = v3_chkpt_open_ctx(chkpt, "header");
681 PrintError(vm, VCORE_NONE, "Cannot open context to save header\n");
685 switch (v3_mach_type) {
687 case V3_SVM_REV3_CPU: {
688 if (v3_chkpt_save(ctx, "header", strlen(svm_chkpt_header), svm_chkpt_header)) {
689 PrintError(vm, VCORE_NONE, "Could not save all of SVM header\n");
690 v3_chkpt_close_ctx(ctx);
697 case V3_VMX_EPT_UG_CPU: {
698 if (v3_chkpt_save(ctx, "header", strlen(vmx_chkpt_header), vmx_chkpt_header)) {
699 PrintError(vm, VCORE_NONE, "Could not save all of VMX header\n");
700 v3_chkpt_close_ctx(ctx);
706 PrintError(vm, VCORE_NONE, "checkpoint not supported on this architecture\n");
707 v3_chkpt_close_ctx(ctx);
711 v3_chkpt_close_ctx(ctx);
716 static int load_header(struct v3_vm_info * vm, struct v3_chkpt * chkpt) {
717 extern v3_cpu_arch_t v3_mach_type;
720 ctx = v3_chkpt_open_ctx(chkpt, "header");
723 PrintError(vm, VCORE_NONE, "Cannot open context to load header\n");
727 switch (v3_mach_type) {
729 case V3_SVM_REV3_CPU: {
730 char header[strlen(svm_chkpt_header) + 1];
732 if (v3_chkpt_load(ctx, "header", strlen(svm_chkpt_header), header)) {
733 PrintError(vm, VCORE_NONE, "Could not load all of SVM header\n");
734 v3_chkpt_close_ctx(ctx);
738 header[strlen(svm_chkpt_header)] = 0;
744 case V3_VMX_EPT_UG_CPU: {
745 char header[strlen(vmx_chkpt_header) + 1];
747 if (v3_chkpt_load(ctx, "header", strlen(vmx_chkpt_header), header)) {
748 PrintError(vm, VCORE_NONE, "Could not load all of VMX header\n");
749 v3_chkpt_close_ctx(ctx);
753 header[strlen(vmx_chkpt_header)] = 0;
758 PrintError(vm, VCORE_NONE, "checkpoint not supported on this architecture\n");
759 v3_chkpt_close_ctx(ctx);
763 v3_chkpt_close_ctx(ctx);
769 static int load_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt_options_t opts) {
770 extern v3_cpu_arch_t v3_mach_type;
775 PrintDebug(info->vm_info, info, "Loading core\n");
777 memset(key_name, 0, 16);
779 snprintf(key_name, 16, "guest_info%d", info->vcpu_id);
781 ctx = v3_chkpt_open_ctx(chkpt, key_name);
784 PrintError(info->vm_info, info, "Could not open context to load core\n");
788 // Run state is needed to determine when AP cores need
789 // to be immediately run after resume
790 V3_CHKPT_LOAD(ctx,"run_state",info->core_run_state,loadfailout);
791 V3_CHKPT_LOAD(ctx,"cpu_mode",info->cpu_mode,loadfailout);
792 V3_CHKPT_LOAD(ctx,"mem_mode",info->mem_mode,loadfailout);
794 V3_CHKPT_LOAD(ctx,"CPL",info->cpl,loadfailout);
796 if (info->cpl != info->segments.ss.dpl) {
797 V3_Print(info->vm_info,info,"Strange, CPL=%d but ss.dpl=%d on core save\n",info->cpl,info->segments.ss.dpl);
801 V3_CHKPT_LOAD(ctx, "RIP", info->rip, loadfailout);
804 V3_CHKPT_LOAD(ctx,"RDI",info->vm_regs.rdi, loadfailout);
805 V3_CHKPT_LOAD(ctx,"RSI",info->vm_regs.rsi, loadfailout);
806 V3_CHKPT_LOAD(ctx,"RBP",info->vm_regs.rbp, loadfailout);
807 V3_CHKPT_LOAD(ctx,"RSP",info->vm_regs.rsp, loadfailout);
808 V3_CHKPT_LOAD(ctx,"RBX",info->vm_regs.rbx, loadfailout);
809 V3_CHKPT_LOAD(ctx,"RDX",info->vm_regs.rdx, loadfailout);
810 V3_CHKPT_LOAD(ctx,"RCX",info->vm_regs.rcx, loadfailout);
811 V3_CHKPT_LOAD(ctx,"RAX",info->vm_regs.rax, loadfailout);
812 V3_CHKPT_LOAD(ctx,"R8",info->vm_regs.r8, loadfailout);
813 V3_CHKPT_LOAD(ctx,"R9",info->vm_regs.r9, loadfailout);
814 V3_CHKPT_LOAD(ctx,"R10",info->vm_regs.r10, loadfailout);
815 V3_CHKPT_LOAD(ctx,"R11",info->vm_regs.r11, loadfailout);
816 V3_CHKPT_LOAD(ctx,"R12",info->vm_regs.r12, loadfailout);
817 V3_CHKPT_LOAD(ctx,"R13",info->vm_regs.r13, loadfailout);
818 V3_CHKPT_LOAD(ctx,"R14",info->vm_regs.r14, loadfailout);
819 V3_CHKPT_LOAD(ctx,"R15",info->vm_regs.r15, loadfailout);
822 V3_CHKPT_LOAD(ctx, "CR0", info->ctrl_regs.cr0, loadfailout);
824 V3_CHKPT_LOAD(ctx, "CR2", info->ctrl_regs.cr2, loadfailout);
825 V3_CHKPT_LOAD(ctx, "CR3", info->ctrl_regs.cr3, loadfailout);
826 V3_CHKPT_LOAD(ctx, "CR4", info->ctrl_regs.cr4, loadfailout);
827 // There are no CR5,6,7
828 // CR8 is derived from apic_tpr
829 tempreg = (info->ctrl_regs.apic_tpr >> 4) & 0xf;
830 V3_CHKPT_LOAD(ctx, "CR8", tempreg, loadfailout);
831 V3_CHKPT_LOAD(ctx, "APIC_TPR", info->ctrl_regs.apic_tpr, loadfailout);
832 V3_CHKPT_LOAD(ctx, "RFLAGS", info->ctrl_regs.rflags, loadfailout);
833 V3_CHKPT_LOAD(ctx, "EFER", info->ctrl_regs.efer, loadfailout);
836 V3_CHKPT_LOAD(ctx, "DR0", info->dbg_regs.dr0, loadfailout);
837 V3_CHKPT_LOAD(ctx, "DR1", info->dbg_regs.dr1, loadfailout);
838 V3_CHKPT_LOAD(ctx, "DR2", info->dbg_regs.dr2, loadfailout);
839 V3_CHKPT_LOAD(ctx, "DR3", info->dbg_regs.dr3, loadfailout);
840 // there is no DR4 or DR5
841 V3_CHKPT_LOAD(ctx, "DR6", info->dbg_regs.dr6, loadfailout);
842 V3_CHKPT_LOAD(ctx, "DR7", info->dbg_regs.dr7, loadfailout);
845 V3_CHKPT_LOAD(ctx, "CS", info->segments.cs, loadfailout);
846 V3_CHKPT_LOAD(ctx, "DS", info->segments.ds, loadfailout);
847 V3_CHKPT_LOAD(ctx, "ES", info->segments.es, loadfailout);
848 V3_CHKPT_LOAD(ctx, "FS", info->segments.fs, loadfailout);
849 V3_CHKPT_LOAD(ctx, "GS", info->segments.gs, loadfailout);
850 V3_CHKPT_LOAD(ctx, "SS", info->segments.ss, loadfailout);
851 V3_CHKPT_LOAD(ctx, "LDTR", info->segments.ldtr, loadfailout);
852 V3_CHKPT_LOAD(ctx, "GDTR", info->segments.gdtr, loadfailout);
853 V3_CHKPT_LOAD(ctx, "IDTR", info->segments.idtr, loadfailout);
854 V3_CHKPT_LOAD(ctx, "TR", info->segments.tr, loadfailout);
856 if (info->cpl != info->segments.ss.dpl) {
857 V3_Print(info->vm_info,info,"Strange, CPL=%d but ss.dpl=%d on core load\n",info->cpl,info->segments.ss.dpl);
861 V3_CHKPT_LOAD(ctx, "STAR", info->msrs.star, loadfailout);
862 V3_CHKPT_LOAD(ctx, "LSTAR", info->msrs.lstar, loadfailout);
863 V3_CHKPT_LOAD(ctx, "SFMASK", info->msrs.sfmask, loadfailout);
864 V3_CHKPT_LOAD(ctx, "KERN_GS_BASE", info->msrs.kern_gs_base, loadfailout);
866 // Some components of guest state captured in the shadow pager
867 V3_CHKPT_LOAD(ctx, "GUEST_CR3", info->shdw_pg_state.guest_cr3, loadfailout);
868 V3_CHKPT_LOAD(ctx, "GUEST_CR0", info->shdw_pg_state.guest_cr0, loadfailout);
869 V3_CHKPT_LOAD(ctx, "GUEST_EFER", info->shdw_pg_state.guest_efer, loadfailout);
872 if (v3_load_fp_state(ctx,info)) {
876 v3_chkpt_close_ctx(ctx); ctx=0;
878 PrintDebug(info->vm_info, info, "Finished reading guest_info information\n");
880 info->cpu_mode = v3_get_vm_cpu_mode(info);
881 info->mem_mode = v3_get_vm_mem_mode(info);
883 if (info->shdw_pg_mode == SHADOW_PAGING) {
884 if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
885 if (v3_activate_shadow_pt(info) == -1) {
886 PrintError(info->vm_info, info, "Failed to activate shadow page tables\n");
890 if (v3_activate_passthrough_pt(info) == -1) {
891 PrintError(info->vm_info, info, "Failed to activate passthrough page tables\n");
898 if (opts & V3_CHKPT_OPT_SKIP_ARCHDEP) {
902 switch (v3_mach_type) {
904 case V3_SVM_REV3_CPU: {
907 snprintf(key_name, 16, "vmcb_data%d", info->vcpu_id);
908 ctx = v3_chkpt_open_ctx(chkpt, key_name);
911 PrintError(info->vm_info, info, "Could not open context to load SVM core\n");
915 if (v3_svm_load_core(info, ctx) < 0 ) {
916 PrintError(info->vm_info, info, "Failed to patch core %d\n", info->vcpu_id);
920 v3_chkpt_close_ctx(ctx); ctx=0;
926 case V3_VMX_EPT_UG_CPU: {
929 snprintf(key_name, 16, "vmcs_data%d", info->vcpu_id);
931 ctx = v3_chkpt_open_ctx(chkpt, key_name);
934 PrintError(info->vm_info, info, "Could not open context to load VMX core\n");
938 if (v3_vmx_load_core(info, ctx) < 0) {
939 PrintError(info->vm_info, info, "VMX checkpoint failed\n");
943 v3_chkpt_close_ctx(ctx); ctx=0;
948 PrintError(info->vm_info, info, "Invalid CPU Type (%d)\n", v3_mach_type);
954 PrintDebug(info->vm_info, info, "Load of core succeeded\n");
956 v3_print_guest_state(info);
961 PrintError(info->vm_info, info, "Failed to load core\n");
962 if (ctx) { v3_chkpt_close_ctx(ctx);}
967 // GEM5 - Hypercall for initiating transfer to gem5 (checkpoint)
969 static int save_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt_options_t opts) {
970 extern v3_cpu_arch_t v3_mach_type;
975 PrintDebug(info->vm_info, info, "Saving core\n");
977 v3_print_guest_state(info);
979 memset(key_name, 0, 16);
981 snprintf(key_name, 16, "guest_info%d", info->vcpu_id);
983 ctx = v3_chkpt_open_ctx(chkpt, key_name);
986 PrintError(info->vm_info, info, "Unable to open context to save core\n");
990 V3_CHKPT_SAVE(ctx,"run_state",info->core_run_state,savefailout);
991 V3_CHKPT_SAVE(ctx,"cpu_mode",info->cpu_mode,savefailout);
992 V3_CHKPT_SAVE(ctx,"mem_mode",info->mem_mode,savefailout);
994 V3_CHKPT_SAVE(ctx,"CPL",info->cpl,savefailout);
996 V3_CHKPT_SAVE(ctx, "RIP", info->rip, savefailout);
999 V3_CHKPT_SAVE(ctx,"RDI",info->vm_regs.rdi, savefailout);
1000 V3_CHKPT_SAVE(ctx,"RSI",info->vm_regs.rsi, savefailout);
1001 V3_CHKPT_SAVE(ctx,"RBP",info->vm_regs.rbp, savefailout);
1002 V3_CHKPT_SAVE(ctx,"RSP",info->vm_regs.rsp, savefailout);
1003 V3_CHKPT_SAVE(ctx,"RBX",info->vm_regs.rbx, savefailout);
1004 V3_CHKPT_SAVE(ctx,"RDX",info->vm_regs.rdx, savefailout);
1005 V3_CHKPT_SAVE(ctx,"RCX",info->vm_regs.rcx, savefailout);
1006 V3_CHKPT_SAVE(ctx,"RAX",info->vm_regs.rax, savefailout);
1007 V3_CHKPT_SAVE(ctx,"R8",info->vm_regs.r8, savefailout);
1008 V3_CHKPT_SAVE(ctx,"R9",info->vm_regs.r9, savefailout);
1009 V3_CHKPT_SAVE(ctx,"R10",info->vm_regs.r10, savefailout);
1010 V3_CHKPT_SAVE(ctx,"R11",info->vm_regs.r11, savefailout);
1011 V3_CHKPT_SAVE(ctx,"R12",info->vm_regs.r12, savefailout);
1012 V3_CHKPT_SAVE(ctx,"R13",info->vm_regs.r13, savefailout);
1013 V3_CHKPT_SAVE(ctx,"R14",info->vm_regs.r14, savefailout);
1014 V3_CHKPT_SAVE(ctx,"R15",info->vm_regs.r15, savefailout);
1016 // Control registers
1017 V3_CHKPT_SAVE(ctx, "CR0", info->ctrl_regs.cr0, savefailout);
1019 V3_CHKPT_SAVE(ctx, "CR2", info->ctrl_regs.cr2, savefailout);
1020 V3_CHKPT_SAVE(ctx, "CR3", info->ctrl_regs.cr3, savefailout);
1021 V3_CHKPT_SAVE(ctx, "CR4", info->ctrl_regs.cr4, savefailout);
1022 // There are no CR5,6,7
1023 // CR8 is derived from apic_tpr
1024 tempreg = (info->ctrl_regs.apic_tpr >> 4) & 0xf;
1025 V3_CHKPT_SAVE(ctx, "CR8", tempreg, savefailout);
1026 V3_CHKPT_SAVE(ctx, "APIC_TPR", info->ctrl_regs.apic_tpr, savefailout);
1027 V3_CHKPT_SAVE(ctx, "RFLAGS", info->ctrl_regs.rflags, savefailout);
1028 V3_CHKPT_SAVE(ctx, "EFER", info->ctrl_regs.efer, savefailout);
1031 V3_CHKPT_SAVE(ctx, "DR0", info->dbg_regs.dr0, savefailout);
1032 V3_CHKPT_SAVE(ctx, "DR1", info->dbg_regs.dr1, savefailout);
1033 V3_CHKPT_SAVE(ctx, "DR2", info->dbg_regs.dr2, savefailout);
1034 V3_CHKPT_SAVE(ctx, "DR3", info->dbg_regs.dr3, savefailout);
1035 // there is no DR4 or DR5
1036 V3_CHKPT_SAVE(ctx, "DR6", info->dbg_regs.dr6, savefailout);
1037 V3_CHKPT_SAVE(ctx, "DR7", info->dbg_regs.dr7, savefailout);
1039 // Segment registers
1040 V3_CHKPT_SAVE(ctx, "CS", info->segments.cs, savefailout);
1041 V3_CHKPT_SAVE(ctx, "DS", info->segments.ds, savefailout);
1042 V3_CHKPT_SAVE(ctx, "ES", info->segments.es, savefailout);
1043 V3_CHKPT_SAVE(ctx, "FS", info->segments.fs, savefailout);
1044 V3_CHKPT_SAVE(ctx, "GS", info->segments.gs, savefailout);
1045 V3_CHKPT_SAVE(ctx, "SS", info->segments.ss, savefailout);
1046 V3_CHKPT_SAVE(ctx, "LDTR", info->segments.ldtr, savefailout);
1047 V3_CHKPT_SAVE(ctx, "GDTR", info->segments.gdtr, savefailout);
1048 V3_CHKPT_SAVE(ctx, "IDTR", info->segments.idtr, savefailout);
1049 V3_CHKPT_SAVE(ctx, "TR", info->segments.tr, savefailout);
1052 V3_CHKPT_SAVE(ctx, "STAR", info->msrs.star, savefailout);
1053 V3_CHKPT_SAVE(ctx, "LSTAR", info->msrs.lstar, savefailout);
1054 V3_CHKPT_SAVE(ctx, "SFMASK", info->msrs.sfmask, savefailout);
1055 V3_CHKPT_SAVE(ctx, "KERN_GS_BASE", info->msrs.kern_gs_base, savefailout);
1057 // Some components of guest state captured in the shadow pager
1058 V3_CHKPT_SAVE(ctx, "GUEST_CR3", info->shdw_pg_state.guest_cr3, savefailout);
1059 V3_CHKPT_SAVE(ctx, "GUEST_CR0", info->shdw_pg_state.guest_cr0, savefailout);
1060 V3_CHKPT_SAVE(ctx, "GUEST_EFER", info->shdw_pg_state.guest_efer, savefailout);
1063 if (v3_save_fp_state(ctx,info)) {
1067 v3_chkpt_close_ctx(ctx); ctx=0;
1069 if (opts & V3_CHKPT_OPT_SKIP_ARCHDEP) {
1073 //Architechture specific code
1074 switch (v3_mach_type) {
1076 case V3_SVM_REV3_CPU: {
1079 snprintf(key_name, 16, "vmcb_data%d", info->vcpu_id);
1081 ctx = v3_chkpt_open_ctx(chkpt, key_name);
1084 PrintError(info->vm_info, info, "Could not open context to store SVM core\n");
1088 if (v3_svm_save_core(info, ctx) < 0) {
1089 PrintError(info->vm_info, info, "VMCB Unable to be written\n");
1093 v3_chkpt_close_ctx(ctx); ctx=0;;
1097 case V3_VMX_EPT_CPU:
1098 case V3_VMX_EPT_UG_CPU: {
1101 snprintf(key_name, 16, "vmcs_data%d", info->vcpu_id);
1103 ctx = v3_chkpt_open_ctx(chkpt, key_name);
1106 PrintError(info->vm_info, info, "Could not open context to store VMX core\n");
1110 if (v3_vmx_save_core(info, ctx) == -1) {
1111 PrintError(info->vm_info, info, "VMX checkpoint failed\n");
1115 v3_chkpt_close_ctx(ctx); ctx=0;
1120 PrintError(info->vm_info, info, "Invalid CPU Type (%d)\n", v3_mach_type);
1130 PrintError(info->vm_info, info, "Failed to save core\n");
1131 if (ctx) { v3_chkpt_close_ctx(ctx); }
1137 // GEM5 - Madhav has debug code here for printing instrucions
1140 int v3_chkpt_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1141 struct v3_chkpt * chkpt = NULL;
1146 chkpt = chkpt_open(vm, store, url, SAVE);
1148 if (chkpt == NULL) {
1149 PrintError(vm, VCORE_NONE, "Error creating checkpoint store for url %s\n",url);
1153 /* If this guest is running we need to block it while the checkpoint occurs */
1154 if (vm->run_state == VM_RUNNING) {
1155 while (v3_raise_barrier(vm, NULL) == -1);
1158 if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
1159 if ((ret = save_memory(vm, chkpt)) == -1) {
1160 PrintError(vm, VCORE_NONE, "Unable to save memory\n");
1166 if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
1167 if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
1168 PrintError(vm, VCORE_NONE, "Unable to save devices\n");
1173 if ((ret = save_header(vm, chkpt)) == -1) {
1174 PrintError(vm, VCORE_NONE, "Unable to save header\n");
1178 if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
1179 for (i = 0; i < vm->num_cores; i++){
1180 if ((ret = save_core(&(vm->cores[i]), chkpt, opts)) == -1) {
1181 PrintError(vm, VCORE_NONE, "chkpt of core %d failed\n", i);
1189 /* Resume the guest if it was running */
1190 if (vm->run_state == VM_RUNNING) {
1191 v3_lower_barrier(vm);
1200 int v3_chkpt_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1201 struct v3_chkpt * chkpt = NULL;
1205 chkpt = chkpt_open(vm, store, url, LOAD);
1207 if (chkpt == NULL) {
1208 PrintError(vm, VCORE_NONE, "Error creating checkpoint store\n");
1212 /* If this guest is running we need to block it while the checkpoint occurs */
1213 if (vm->run_state == VM_RUNNING) {
1214 while (v3_raise_barrier(vm, NULL) == -1);
1217 if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
1218 if ((ret = load_memory(vm, chkpt)) == -1) {
1219 PrintError(vm, VCORE_NONE, "Unable to load memory\n");
1224 if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
1225 if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
1226 PrintError(vm, VCORE_NONE, "Unable to load devies\n");
1232 if ((ret = load_header(vm, chkpt)) == -1) {
1233 PrintError(vm, VCORE_NONE, "Unable to load header\n");
1238 if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
1239 for (i = 0; i < vm->num_cores; i++) {
1240 if ((ret = load_core(&(vm->cores[i]), chkpt, opts)) == -1) {
1241 PrintError(vm, VCORE_NONE, "Error loading core state (core=%d)\n", i);
1249 /* Resume the guest if it was running and we didn't just trash the state*/
1250 if (vm->run_state == VM_RUNNING) {
1253 vm->run_state = VM_STOPPED;
1256 /* We check the run state of the VM after every barrier
1257 So this will immediately halt the VM
1259 v3_lower_barrier(vm);
1269 #ifdef V3_CONFIG_LIVE_MIGRATION
1271 #define MOD_THRESHOLD 200 // pages below which we declare victory
1272 #define ITER_THRESHOLD 32 // iters below which we declare victory
1276 int v3_chkpt_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1277 struct v3_chkpt * chkpt = NULL;
1280 bool last_modpage_iteration=false;
1281 struct v3_bitmap modified_pages_to_send;
1282 uint64_t start_time=0;
1284 int num_mod_pages=0;
1285 struct mem_migration_state *mm_state;
1288 // Cores must all be in the same mode
1289 // or we must be skipping mmeory
1290 if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
1291 v3_paging_mode_t mode = vm->cores[0].shdw_pg_mode;
1292 for (i=1;i<vm->num_cores;i++) {
1293 if (vm->cores[i].shdw_pg_mode != mode) {
1294 PrintError(vm, VCORE_NONE, "Cores having different paging modes (nested and shadow) are not supported\n");
1301 chkpt = chkpt_open(vm, store, url, SAVE);
1303 if (chkpt == NULL) {
1304 PrintError(vm, VCORE_NONE, "Error creating checkpoint store\n");
1309 if (opts & V3_CHKPT_OPT_SKIP_MEM) {
1313 // In a send, the memory is copied incrementally first,
1314 // followed by the remainder of the state
1316 if (v3_bitmap_init(&modified_pages_to_send,
1317 vm->mem_size>>12 // number of pages in main region
1319 PrintError(vm, VCORE_NONE, "Could not intialize bitmap.\n");
1323 // 0. Initialize bitmap to all 1s
1324 for (i=0; i < modified_pages_to_send.num_bits; i++) {
1325 v3_bitmap_set(&modified_pages_to_send,i);
1329 while (!last_modpage_iteration) {
1330 PrintDebug(vm, VCORE_NONE, "Modified memory page iteration %d\n",i++);
1332 start_time = v3_get_host_time(&(vm->cores[0].time_state));
1334 // We will pause the VM for a short while
1335 // so that we can collect the set of changed pages
1336 if (v3_pause_vm(vm) == -1) {
1337 PrintError(vm, VCORE_NONE, "Could not pause VM\n");
1343 // special case, we already have the pages to send (all of them)
1344 // they are already in modified_pages_to_send
1346 // normally, we are in the middle of a round
1347 // We need to copy from the current tracking bitmap
1348 // to our send bitmap
1349 v3_bitmap_copy(&modified_pages_to_send,&(mm_state->modified_pages));
1350 // and now we need to remove our tracking
1351 stop_page_tracking(mm_state);
1354 // are we done? (note that we are still paused)
1355 num_mod_pages = v3_bitmap_count(&modified_pages_to_send);
1356 if (num_mod_pages<MOD_THRESHOLD || iter>ITER_THRESHOLD) {
1357 // we are done, so we will not restart page tracking
1358 // the vm is paused, and so we should be able
1359 // to just send the data
1360 PrintDebug(vm, VCORE_NONE, "Last modified memory page iteration.\n");
1361 last_modpage_iteration = true;
1363 // we are not done, so we will restart page tracking
1364 // to prepare for a second round of pages
1365 // we will resume the VM as this happens
1366 if (!(mm_state=start_page_tracking(vm))) {
1367 PrintError(vm, VCORE_NONE, "Error enabling page tracking.\n");
1371 if (v3_continue_vm(vm) == -1) {
1372 PrintError(vm, VCORE_NONE, "Error resuming the VM\n");
1373 stop_page_tracking(mm_state);
1378 stop_time = v3_get_host_time(&(vm->cores[0].time_state));
1379 PrintDebug(vm, VCORE_NONE, "num_mod_pages=%d\ndowntime=%llu\n",num_mod_pages,stop_time-start_time);
1383 // At this point, we are either paused and about to copy
1384 // the last chunk, or we are running, and will copy the last
1385 // round in parallel with current execution
1386 if (num_mod_pages>0) {
1387 if (save_inc_memory(vm, &modified_pages_to_send, chkpt) == -1) {
1388 PrintError(vm, VCORE_NONE, "Error sending incremental memory.\n");
1392 } // we don't want to copy an empty bitmap here
1397 if (v3_bitmap_reset(&modified_pages_to_send) == -1) {
1398 PrintError(vm, VCORE_NONE, "Error reseting bitmap.\n");
1403 // send bitmap of 0s to signal end of modpages
1404 if (save_inc_memory(vm, &modified_pages_to_send, chkpt) == -1) {
1405 PrintError(vm, VCORE_NONE, "Error sending incremental memory.\n");
1411 // save the non-memory state
1412 if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
1413 if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
1414 PrintError(vm, VCORE_NONE, "Unable to save devices\n");
1419 if ((ret = save_header(vm, chkpt)) == -1) {
1420 PrintError(vm, VCORE_NONE, "Unable to save header\n");
1424 if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
1425 for (i = 0; i < vm->num_cores; i++){
1426 if ((ret = save_core(&(vm->cores[i]), chkpt, opts)) == -1) {
1427 PrintError(vm, VCORE_NONE, "chkpt of core %d failed\n", i);
1433 if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
1434 stop_time = v3_get_host_time(&(vm->cores[0].time_state));
1435 PrintDebug(vm, VCORE_NONE, "num_mod_pages=%d\ndowntime=%llu\n",num_mod_pages,stop_time-start_time);
1436 PrintDebug(vm, VCORE_NONE, "Done sending VM!\n");
1438 v3_bitmap_deinit(&modified_pages_to_send);
1447 int v3_chkpt_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1448 struct v3_chkpt * chkpt = NULL;
1451 struct v3_bitmap mod_pgs;
1453 // Currently will work only for shadow paging
1454 for (i=0;i<vm->num_cores;i++) {
1455 if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING && !(opts & V3_CHKPT_OPT_SKIP_MEM)) {
1456 PrintError(vm, VCORE_NONE, "Cannot currently handle nested paging\n");
1461 chkpt = chkpt_open(vm, store, url, LOAD);
1463 if (chkpt == NULL) {
1464 PrintError(vm, VCORE_NONE, "Error creating checkpoint store\n");
1470 if (opts & V3_CHKPT_OPT_SKIP_MEM) {
1474 if (v3_bitmap_init(&mod_pgs,vm->mem_size>>12) == -1) {
1476 PrintError(vm, VCORE_NONE, "Could not intialize bitmap.\n");
1480 /* If this guest is running we need to block it while the checkpoint occurs */
1481 if (vm->run_state == VM_RUNNING) {
1482 while (v3_raise_barrier(vm, NULL) == -1);
1487 // 1. Receive copy of bitmap
1489 PrintDebug(vm, VCORE_NONE, "Memory page iteration %d\n",i++);
1490 int retval = load_inc_memory(vm, &mod_pgs, chkpt);
1492 // end of receiving memory pages
1494 } else if (retval == -1) {
1495 PrintError(vm, VCORE_NONE, "Error receiving incremental memory.\n");
1503 if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
1504 if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
1505 PrintError(vm, VCORE_NONE, "Unable to load devices\n");
1511 if ((ret = load_header(vm, chkpt)) == -1) {
1512 PrintError(vm, VCORE_NONE, "Unable to load header\n");
1518 if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
1519 for (i = 0; i < vm->num_cores; i++) {
1520 if ((ret = load_core(&(vm->cores[i]), chkpt, opts)) == -1) {
1521 PrintError(vm, VCORE_NONE, "Error loading core state (core=%d)\n", i);
1529 PrintError(vm, VCORE_NONE, "Unable to receive VM\n");
1531 PrintDebug(vm, VCORE_NONE, "Done receving the VM\n");
1535 /* Resume the guest if it was running and we didn't just trash the state*/
1536 if (vm->run_state == VM_RUNNING) {
1538 PrintError(vm, VCORE_NONE, "VM was previously running. It is now borked. Pausing it. \n");
1539 vm->run_state = VM_STOPPED;
1542 /* We check the run state of the VM after every barrier
1543 So this will immediately halt the VM
1545 v3_lower_barrier(vm);
1549 if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
1550 v3_bitmap_deinit(&mod_pgs);