2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2011, Madhav Suresh <madhav@u.northwestern.edu>
11 * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Madhav Suresh <madhav@u.northwestern.edu>
15 * Arefin Huq <fig@arefin.net>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_sprintf.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/svm.h>
26 #include <palacios/vmx.h>
27 #include <palacios/vmm_checkpoint.h>
28 #include <palacios/vmm_hashtable.h>
29 #include <palacios/vmm_direct_paging.h>
30 #include <palacios/vmm_debug.h>
32 #include <palacios/vmm_dev_mgr.h>
34 #ifdef V3_CONFIG_LIVE_MIGRATION
35 #include <palacios/vmm_time.h>
36 #include <palacios/vm_guest_mem.h>
37 #include <palacios/vmm_shadow_paging.h>
40 #ifndef V3_CONFIG_DEBUG_CHECKPOINT
42 #define PrintDebug(fmt, args...)
46 static struct hashtable * store_table = NULL;
50 typedef enum {SAVE, LOAD} chkpt_mode_t;
52 struct chkpt_interface {
54 void * (*open_chkpt)(char * url, chkpt_mode_t mode);
55 int (*close_chkpt)(void * store_data);
57 void * (*open_ctx)(void * store_data, void * parent_ctx, char * name);
58 int (*close_ctx)(void * store_data, void * ctx);
60 int (*save)(void * store_data, void * ctx, char * tag, uint64_t len, void * buf);
61 int (*load)(void * store_data, void * ctx, char * tag, uint64_t len, void * buf);
66 struct v3_vm_info * vm;
68 struct chkpt_interface * interface;
76 static uint_t store_hash_fn(addr_t key) {
77 char * name = (char *)key;
78 return v3_hash_buffer((uint8_t *)name, strlen(name));
81 static int store_eq_fn(addr_t key1, addr_t key2) {
82 char * name1 = (char *)key1;
83 char * name2 = (char *)key2;
85 return (strcmp(name1, name2) == 0);
90 #include "vmm_chkpt_stores.h"
93 int V3_init_checkpoint() {
94 extern struct chkpt_interface * __start__v3_chkpt_stores[];
95 extern struct chkpt_interface * __stop__v3_chkpt_stores[];
96 struct chkpt_interface ** tmp_store = __start__v3_chkpt_stores;
99 store_table = v3_create_htable(0, store_hash_fn, store_eq_fn);
101 while (tmp_store != __stop__v3_chkpt_stores) {
102 V3_Print("Registering Checkpoint Backing Store (%s)\n", (*tmp_store)->name);
104 if (v3_htable_search(store_table, (addr_t)((*tmp_store)->name))) {
105 PrintError("Multiple instances of Checkpoint backing Store (%s)\n", (*tmp_store)->name);
109 if (v3_htable_insert(store_table, (addr_t)((*tmp_store)->name), (addr_t)(*tmp_store)) == 0) {
110 PrintError("Could not register Checkpoint backing store (%s)\n", (*tmp_store)->name);
114 tmp_store = &(__start__v3_chkpt_stores[++i]);
120 int V3_deinit_checkpoint() {
121 v3_free_htable(store_table, 0, 0);
126 static char svm_chkpt_header[] = "v3vee palacios checkpoint version: x.x, SVM x.x";
127 static char vmx_chkpt_header[] = "v3vee palacios checkpoint version: x.x, VMX x.x";
129 static int chkpt_close(struct v3_chkpt * chkpt) {
130 chkpt->interface->close_chkpt(chkpt->store_data);
138 static struct v3_chkpt * chkpt_open(struct v3_vm_info * vm, char * store, char * url, chkpt_mode_t mode) {
139 struct chkpt_interface * iface = NULL;
140 struct v3_chkpt * chkpt = NULL;
141 void * store_data = NULL;
143 iface = (void *)v3_htable_search(store_table, (addr_t)store);
146 V3_Print("Error: Could not locate Checkpoint interface for store (%s)\n", store);
150 store_data = iface->open_chkpt(url, mode);
152 if (store_data == NULL) {
153 PrintError("Could not open url (%s) for backing store (%s)\n", url, store);
158 chkpt = V3_Malloc(sizeof(struct v3_chkpt));
161 PrintError("Could not allocate checkpoint state\n");
165 chkpt->interface = iface;
167 chkpt->store_data = store_data;
172 struct v3_chkpt_ctx * v3_chkpt_open_ctx(struct v3_chkpt * chkpt, struct v3_chkpt_ctx * parent, char * name) {
173 struct v3_chkpt_ctx * ctx = V3_Malloc(sizeof(struct v3_chkpt_ctx));
174 void * parent_store_ctx = NULL;
178 PrintError("Unable to allocate context\n");
182 memset(ctx, 0, sizeof(struct v3_chkpt_ctx));
185 ctx->parent = parent;
188 parent_store_ctx = parent->store_ctx;
191 ctx->store_ctx = chkpt->interface->open_ctx(chkpt->store_data, parent_store_ctx, name);
193 if (!(ctx->store_ctx)) {
194 PrintError("Warning: opening underlying representation returned null\n");
200 int v3_chkpt_close_ctx(struct v3_chkpt_ctx * ctx) {
201 struct v3_chkpt * chkpt = ctx->chkpt;
204 ret = chkpt->interface->close_ctx(chkpt->store_data, ctx->store_ctx);
215 int v3_chkpt_save(struct v3_chkpt_ctx * ctx, char * tag, uint64_t len, void * buf) {
216 struct v3_chkpt * chkpt = ctx->chkpt;
218 return chkpt->interface->save(chkpt->store_data, ctx->store_ctx, tag, len, buf);
223 int v3_chkpt_load(struct v3_chkpt_ctx * ctx, char * tag, uint64_t len, void * buf) {
224 struct v3_chkpt * chkpt = ctx->chkpt;
226 return chkpt->interface->load(chkpt->store_data, ctx->store_ctx, tag, len, buf);
231 static int load_memory(struct v3_vm_info * vm, struct v3_chkpt * chkpt) {
233 void * guest_mem_base = NULL;
237 guest_mem_base = V3_VAddr((void *)vm->mem_map.base_region.host_addr);
239 ctx = v3_chkpt_open_ctx(chkpt, NULL, "memory_img");
242 PrintError("Unable to open context for memory load\n");
246 if (v3_chkpt_load(ctx, "memory_img", vm->mem_size, guest_mem_base) == -1) {
247 PrintError("Unable to load all of memory (requested=%llu bytes, result=%llu bytes\n",(uint64_t)(vm->mem_size),ret);
248 v3_chkpt_close_ctx(ctx);
252 v3_chkpt_close_ctx(ctx);
258 static int save_memory(struct v3_vm_info * vm, struct v3_chkpt * chkpt) {
259 void * guest_mem_base = NULL;
263 guest_mem_base = V3_VAddr((void *)vm->mem_map.base_region.host_addr);
265 ctx = v3_chkpt_open_ctx(chkpt, NULL,"memory_img");
268 PrintError("Unable to open context to save memory\n");
272 if (v3_chkpt_save(ctx, "memory_img", vm->mem_size, guest_mem_base) == -1) {
273 PrintError("Unable to load all of memory (requested=%llu, received=%llu)\n",(uint64_t)(vm->mem_size),ret);
274 v3_chkpt_close_ctx(ctx);
278 v3_chkpt_close_ctx(ctx);
283 #ifdef V3_CONFIG_LIVE_MIGRATION
285 struct mem_migration_state {
286 struct v3_vm_info *vm;
287 struct v3_bitmap modified_pages;
290 static int paging_callback(struct guest_info *core,
291 struct v3_shdw_pg_event *event,
294 struct mem_migration_state *m = (struct mem_migration_state *)priv_data;
296 if (event->event_type==SHADOW_PAGEFAULT &&
297 event->event_order==SHADOW_PREIMPL &&
298 event->error_code.write) {
300 if (!v3_gva_to_gpa(core,event->gva,&gpa)) {
301 // write to this page
302 v3_bitmap_set(&(m->modified_pages),gpa>>12);
304 // no worries, this isn't physical memory
307 // we don't care about other events
315 static struct mem_migration_state *start_page_tracking(struct v3_vm_info *vm)
317 struct mem_migration_state *m;
320 m = (struct mem_migration_state *)V3_Malloc(sizeof(struct mem_migration_state));
323 PrintError("Cannot allocate\n");
329 if (v3_bitmap_init(&(m->modified_pages),vm->mem_size >> 12) == -1) {
330 PrintError("Failed to initialize modified_pages bit vector");
334 v3_register_shadow_paging_event_callback(vm,paging_callback,m);
336 for (i=0;i<vm->num_cores;i++) {
337 v3_invalidate_shadow_pts(&(vm->cores[i]));
340 // and now we should get callbacks as writes happen
345 static void stop_page_tracking(struct mem_migration_state *m)
347 v3_unregister_shadow_paging_event_callback(m->vm,paging_callback,m);
349 v3_bitmap_deinit(&(m->modified_pages));
362 // zero: done with this round
363 static int save_inc_memory(struct v3_vm_info * vm,
364 struct v3_bitmap * mod_pgs_to_send,
365 struct v3_chkpt * chkpt) {
366 int page_size_bytes = 1 << 12; // assuming 4k pages right now
369 void * guest_mem_base = NULL;
370 int bitmap_num_bytes = (mod_pgs_to_send->num_bits / 8)
371 + ((mod_pgs_to_send->num_bits % 8) > 0);
374 guest_mem_base = V3_VAddr((void *)vm->mem_map.base_region.host_addr);
376 PrintDebug("Saving incremental memory.\n");
378 ctx = v3_chkpt_open_ctx(chkpt, NULL,"memory_bitmap_bits");
381 PrintError("Cannot open context for dirty memory bitmap\n");
386 if (v3_chkpt_save(ctx,
387 "memory_bitmap_bits",
389 mod_pgs_to_send->bits) == -1) {
390 PrintError("Unable to write all of the dirty memory bitmap\n");
391 v3_chkpt_close_ctx(ctx);
395 v3_chkpt_close_ctx(ctx);
397 PrintDebug("Sent bitmap bits.\n");
399 // Dirty memory pages are sent in bitmap order
400 for (i = 0; i < mod_pgs_to_send->num_bits; i++) {
401 if (v3_bitmap_check(mod_pgs_to_send, i)) {
402 // PrintDebug("Sending memory page %d.\n",i);
403 ctx = v3_chkpt_open_ctx(chkpt, NULL,"memory_page");
405 PrintError("Unable to open context to send memory page\n");
408 if (v3_chkpt_save(ctx,
411 guest_mem_base + (page_size_bytes * i)) == -1) {
412 PrintError("Unable to send a memory page\n");
413 v3_chkpt_close_ctx(ctx);
417 v3_chkpt_close_ctx(ctx);
428 // zero: ok, but not done
429 // positive: ok, and also done
430 static int load_inc_memory(struct v3_vm_info * vm,
431 struct v3_bitmap * mod_pgs,
432 struct v3_chkpt * chkpt) {
433 int page_size_bytes = 1 << 12; // assuming 4k pages right now
436 void * guest_mem_base = NULL;
437 bool empty_bitmap = true;
438 int bitmap_num_bytes = (mod_pgs->num_bits / 8)
439 + ((mod_pgs->num_bits % 8) > 0);
442 guest_mem_base = V3_VAddr((void *)vm->mem_map.base_region.host_addr);
444 ctx = v3_chkpt_open_ctx(chkpt, NULL,"memory_bitmap_bits");
447 PrintError("Cannot open context to receive memory bitmap\n");
451 if (v3_chkpt_load(ctx,
452 "memory_bitmap_bits",
454 mod_pgs->bits) == -1) {
455 PrintError("Did not receive all of memory bitmap\n");
456 v3_chkpt_close_ctx(ctx);
460 v3_chkpt_close_ctx(ctx);
462 // Receive also follows bitmap order
463 for (i = 0; i < mod_pgs->num_bits; i ++) {
464 if (v3_bitmap_check(mod_pgs, i)) {
465 PrintDebug("Loading page %d\n", i);
466 empty_bitmap = false;
467 ctx = v3_chkpt_open_ctx(chkpt, NULL,"memory_page");
469 PrintError("Cannot open context to receive memory page\n");
473 if (v3_chkpt_load(ctx,
476 guest_mem_base + (page_size_bytes * i)) == -1) {
477 PrintError("Did not receive all of memory page\n");
478 v3_chkpt_close_ctx(ctx);
481 v3_chkpt_close_ctx(ctx);
486 // signal end of receiving pages
487 PrintDebug("Finished receiving pages.\n");
498 int save_header(struct v3_vm_info * vm, struct v3_chkpt * chkpt) {
499 extern v3_cpu_arch_t v3_mach_type;
502 ctx = v3_chkpt_open_ctx(chkpt, NULL, "header");
504 PrintError("Cannot open context to save header\n");
508 switch (v3_mach_type) {
510 case V3_SVM_REV3_CPU: {
511 if (v3_chkpt_save(ctx, "header", strlen(svm_chkpt_header), svm_chkpt_header) == -1) {
512 PrintError("Could not save all of SVM header\n");
513 v3_chkpt_close_ctx(ctx);
520 case V3_VMX_EPT_UG_CPU: {
521 if (v3_chkpt_save(ctx, "header", strlen(vmx_chkpt_header), vmx_chkpt_header) == -1) {
522 PrintError("Could not save all of VMX header\n");
523 v3_chkpt_close_ctx(ctx);
529 PrintError("checkpoint not supported on this architecture\n");
530 v3_chkpt_close_ctx(ctx);
534 v3_chkpt_close_ctx(ctx);
539 static int load_header(struct v3_vm_info * vm, struct v3_chkpt * chkpt) {
540 extern v3_cpu_arch_t v3_mach_type;
543 ctx = v3_chkpt_open_ctx(chkpt, NULL, "header");
545 switch (v3_mach_type) {
547 case V3_SVM_REV3_CPU: {
548 char header[strlen(svm_chkpt_header) + 1];
550 if (v3_chkpt_load(ctx, "header", strlen(svm_chkpt_header), header) == -1) {
551 PrintError("Could not load all of SVM header\n");
552 v3_chkpt_close_ctx(ctx);
556 header[strlen(svm_chkpt_header)] = 0;
562 case V3_VMX_EPT_UG_CPU: {
563 char header[strlen(vmx_chkpt_header) + 1];
565 if (v3_chkpt_load(ctx, "header", strlen(vmx_chkpt_header), header) == -1) {
566 PrintError("Could not load all of VMX header\n");
567 v3_chkpt_close_ctx(ctx);
571 header[strlen(vmx_chkpt_header)] = 0;
576 PrintError("checkpoint not supported on this architecture\n");
577 v3_chkpt_close_ctx(ctx);
581 v3_chkpt_close_ctx(ctx);
587 static int load_core(struct guest_info * info, struct v3_chkpt * chkpt) {
588 extern v3_cpu_arch_t v3_mach_type;
592 memset(key_name, 0, 16);
594 snprintf(key_name, 16, "guest_info%d", info->vcpu_id);
596 ctx = v3_chkpt_open_ctx(chkpt, NULL, key_name);
599 PrintError("Could not open context to load core\n");
603 // These really need to have error checking
605 v3_chkpt_load_64(ctx, "RIP", &(info->rip));
607 V3_CHKPT_STD_LOAD(ctx, info->vm_regs);
609 V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.cr0);
610 V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.cr2);
611 V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.cr4);
612 V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.cr8);
613 V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.rflags);
614 V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.efer);
616 V3_CHKPT_STD_LOAD(ctx, info->dbg_regs);
617 V3_CHKPT_STD_LOAD(ctx, info->segments);
618 V3_CHKPT_STD_LOAD(ctx, info->shdw_pg_state.guest_cr3);
619 V3_CHKPT_STD_LOAD(ctx, info->shdw_pg_state.guest_cr0);
620 V3_CHKPT_STD_LOAD(ctx, info->shdw_pg_state.guest_efer);
622 v3_chkpt_close_ctx(ctx);
624 PrintDebug("Finished reading guest_info information\n");
626 info->cpu_mode = v3_get_vm_cpu_mode(info);
627 info->mem_mode = v3_get_vm_mem_mode(info);
629 if (info->shdw_pg_mode == SHADOW_PAGING) {
630 if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
631 if (v3_activate_shadow_pt(info) == -1) {
632 PrintError("Failed to activate shadow page tables\n");
636 if (v3_activate_passthrough_pt(info) == -1) {
637 PrintError("Failed to activate passthrough page tables\n");
644 switch (v3_mach_type) {
646 case V3_SVM_REV3_CPU: {
649 snprintf(key_name, 16, "vmcb_data%d", info->vcpu_id);
650 ctx = v3_chkpt_open_ctx(chkpt, NULL, key_name);
653 PrintError("Could not open context to load SVM core\n");
657 if (v3_svm_load_core(info, ctx) == -1) {
658 PrintError("Failed to patch core %d\n", info->vcpu_id);
659 v3_chkpt_close_ctx(ctx);
663 v3_chkpt_close_ctx(ctx);
669 case V3_VMX_EPT_UG_CPU: {
672 snprintf(key_name, 16, "vmcs_data%d", info->vcpu_id);
674 ctx = v3_chkpt_open_ctx(chkpt, NULL, key_name);
677 PrintError("Could not open context to load VMX core\n");
681 if (v3_vmx_load_core(info, ctx) < 0) {
682 PrintError("VMX checkpoint failed\n");
683 v3_chkpt_close_ctx(ctx);
687 v3_chkpt_close_ctx(ctx);
692 PrintError("Invalid CPU Type (%d)\n", v3_mach_type);
696 v3_print_guest_state(info);
702 static int save_core(struct guest_info * info, struct v3_chkpt * chkpt) {
703 extern v3_cpu_arch_t v3_mach_type;
707 memset(key_name, 0, 16);
709 v3_print_guest_state(info);
712 snprintf(key_name, 16, "guest_info%d", info->vcpu_id);
714 ctx = v3_chkpt_open_ctx(chkpt, NULL, key_name);
717 PrintError("Unable to open context to save core\n");
722 // Error checking of all this needs to happen
723 v3_chkpt_save_64(ctx, "RIP", &(info->rip));
725 V3_CHKPT_STD_SAVE(ctx, info->vm_regs);
727 V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.cr0);
728 V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.cr2);
729 V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.cr4);
730 V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.cr8);
731 V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.rflags);
732 V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.efer);
734 V3_CHKPT_STD_SAVE(ctx, info->dbg_regs);
735 V3_CHKPT_STD_SAVE(ctx, info->segments);
736 V3_CHKPT_STD_SAVE(ctx, info->shdw_pg_state.guest_cr3);
737 V3_CHKPT_STD_SAVE(ctx, info->shdw_pg_state.guest_cr0);
738 V3_CHKPT_STD_SAVE(ctx, info->shdw_pg_state.guest_efer);
740 v3_chkpt_close_ctx(ctx);
742 //Architechture specific code
743 switch (v3_mach_type) {
745 case V3_SVM_REV3_CPU: {
749 snprintf(key_name, 16, "vmcb_data%d", info->vcpu_id);
751 ctx = v3_chkpt_open_ctx(chkpt, NULL, key_name);
754 PrintError("Could not open context to store SVM core\n");
758 if (v3_svm_save_core(info, ctx) == -1) {
759 PrintError("VMCB Unable to be written\n");
760 v3_chkpt_close_ctx(ctx);
764 v3_chkpt_close_ctx(ctx);
769 case V3_VMX_EPT_UG_CPU: {
773 snprintf(key_name, 16, "vmcs_data%d", info->vcpu_id);
775 ctx = v3_chkpt_open_ctx(chkpt, NULL, key_name);
778 PrintError("Could not open context to store VMX core\n");
782 if (v3_vmx_save_core(info, ctx) == -1) {
783 PrintError("VMX checkpoint failed\n");
784 v3_chkpt_close_ctx(ctx);
788 v3_chkpt_close_ctx(ctx);
793 PrintError("Invalid CPU Type (%d)\n", v3_mach_type);
801 int v3_chkpt_save_vm(struct v3_vm_info * vm, char * store, char * url) {
802 struct v3_chkpt * chkpt = NULL;
807 chkpt = chkpt_open(vm, store, url, SAVE);
810 PrintError("Error creating checkpoint store for url %s\n",url);
814 /* If this guest is running we need to block it while the checkpoint occurs */
815 if (vm->run_state == VM_RUNNING) {
816 while (v3_raise_barrier(vm, NULL) == -1);
819 if ((ret = save_memory(vm, chkpt)) == -1) {
820 PrintError("Unable to save memory\n");
825 if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
826 PrintError("Unable to save devices\n");
831 if ((ret = save_header(vm, chkpt)) == -1) {
832 PrintError("Unable to save header\n");
836 for (i = 0; i < vm->num_cores; i++){
837 if ((ret = save_core(&(vm->cores[i]), chkpt)) == -1) {
838 PrintError("chkpt of core %d failed\n", i);
845 /* Resume the guest if it was running */
846 if (vm->run_state == VM_RUNNING) {
847 v3_lower_barrier(vm);
856 int v3_chkpt_load_vm(struct v3_vm_info * vm, char * store, char * url) {
857 struct v3_chkpt * chkpt = NULL;
861 chkpt = chkpt_open(vm, store, url, LOAD);
864 PrintError("Error creating checkpoint store\n");
868 /* If this guest is running we need to block it while the checkpoint occurs */
869 if (vm->run_state == VM_RUNNING) {
870 while (v3_raise_barrier(vm, NULL) == -1);
873 if ((ret = load_memory(vm, chkpt)) == -1) {
874 PrintError("Unable to save memory\n");
879 if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
880 PrintError("Unable to load devies\n");
885 if ((ret = load_header(vm, chkpt)) == -1) {
886 PrintError("Unable to load header\n");
891 for (i = 0; i < vm->num_cores; i++) {
892 if ((ret = load_core(&(vm->cores[i]), chkpt)) == -1) {
893 PrintError("Error loading core state (core=%d)\n", i);
900 /* Resume the guest if it was running and we didn't just trash the state*/
901 if (vm->run_state == VM_RUNNING) {
904 vm->run_state = VM_STOPPED;
907 /* We check the run state of the VM after every barrier
908 So this will immediately halt the VM
910 v3_lower_barrier(vm);
920 #ifdef V3_CONFIG_LIVE_MIGRATION
922 #define MOD_THRESHOLD 200 // pages below which we declare victory
923 #define ITER_THRESHOLD 32 // iters below which we declare victory
927 int v3_chkpt_send_vm(struct v3_vm_info * vm, char * store, char * url) {
928 struct v3_chkpt * chkpt = NULL;
931 bool last_modpage_iteration=false;
932 struct v3_bitmap modified_pages_to_send;
936 struct mem_migration_state *mm_state;
939 // Currently will work only for shadow paging
940 for (i=0;i<vm->num_cores;i++) {
941 if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING) {
942 PrintError("Cannot currently handle nested paging\n");
948 chkpt = chkpt_open(vm, store, url, SAVE);
951 PrintError("Error creating checkpoint store\n");
956 // In a send, the memory is copied incrementally first,
957 // followed by the remainder of the state
959 if (v3_bitmap_init(&modified_pages_to_send,
960 vm->mem_size>>12 // number of pages in main region
962 PrintError("Could not intialize bitmap.\n");
966 // 0. Initialize bitmap to all 1s
967 for (i=0; i < modified_pages_to_send.num_bits; i++) {
968 v3_bitmap_set(&modified_pages_to_send,i);
972 while (!last_modpage_iteration) {
973 PrintDebug("Modified memory page iteration %d\n",i++);
975 start_time = v3_get_host_time(&(vm->cores[0].time_state));
977 // We will pause the VM for a short while
978 // so that we can collect the set of changed pages
979 if (v3_pause_vm(vm) == -1) {
980 PrintError("Could not pause VM\n");
986 // special case, we already have the pages to send (all of them)
987 // they are already in modified_pages_to_send
989 // normally, we are in the middle of a round
990 // We need to copy from the current tracking bitmap
991 // to our send bitmap
992 v3_bitmap_copy(&modified_pages_to_send,&(mm_state->modified_pages));
993 // and now we need to remove our tracking
994 stop_page_tracking(mm_state);
997 // are we done? (note that we are still paused)
998 num_mod_pages = v3_bitmap_count(&modified_pages_to_send);
999 if (num_mod_pages<MOD_THRESHOLD || iter>ITER_THRESHOLD) {
1000 // we are done, so we will not restart page tracking
1001 // the vm is paused, and so we should be able
1002 // to just send the data
1003 PrintDebug("Last modified memory page iteration.\n");
1004 last_modpage_iteration = true;
1006 // we are not done, so we will restart page tracking
1007 // to prepare for a second round of pages
1008 // we will resume the VM as this happens
1009 if (!(mm_state=start_page_tracking(vm))) {
1010 PrintError("Error enabling page tracking.\n");
1014 if (v3_continue_vm(vm) == -1) {
1015 PrintError("Error resuming the VM\n");
1016 stop_page_tracking(mm_state);
1021 stop_time = v3_get_host_time(&(vm->cores[0].time_state));
1022 PrintDebug("num_mod_pages=%d\ndowntime=%llu\n",num_mod_pages,stop_time-start_time);
1026 // At this point, we are either paused and about to copy
1027 // the last chunk, or we are running, and will copy the last
1028 // round in parallel with current execution
1029 if (num_mod_pages>0) {
1030 if (save_inc_memory(vm, &modified_pages_to_send, chkpt) == -1) {
1031 PrintError("Error sending incremental memory.\n");
1035 } // we don't want to copy an empty bitmap here
1040 if (v3_bitmap_reset(&modified_pages_to_send) == -1) {
1041 PrintError("Error reseting bitmap.\n");
1046 // send bitmap of 0s to signal end of modpages
1047 if (save_inc_memory(vm, &modified_pages_to_send, chkpt) == -1) {
1048 PrintError("Error sending incremental memory.\n");
1053 // save the non-memory state
1054 if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
1055 PrintError("Unable to save devices\n");
1060 if ((ret = save_header(vm, chkpt)) == -1) {
1061 PrintError("Unable to save header\n");
1065 for (i = 0; i < vm->num_cores; i++){
1066 if ((ret = save_core(&(vm->cores[i]), chkpt)) == -1) {
1067 PrintError("chkpt of core %d failed\n", i);
1072 stop_time = v3_get_host_time(&(vm->cores[0].time_state));
1073 PrintDebug("num_mod_pages=%d\ndowntime=%llu\n",num_mod_pages,stop_time-start_time);
1074 PrintDebug("Done sending VM!\n");
1076 v3_bitmap_deinit(&modified_pages_to_send);
1083 int v3_chkpt_receive_vm(struct v3_vm_info * vm, char * store, char * url) {
1084 struct v3_chkpt * chkpt = NULL;
1087 struct v3_bitmap mod_pgs;
1089 // Currently will work only for shadow paging
1090 for (i=0;i<vm->num_cores;i++) {
1091 if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING) {
1092 PrintError("Cannot currently handle nested paging\n");
1097 chkpt = chkpt_open(vm, store, url, LOAD);
1099 if (chkpt == NULL) {
1100 PrintError("Error creating checkpoint store\n");
1105 if (v3_bitmap_init(&mod_pgs,vm->mem_size>>12) == -1) {
1107 PrintError("Could not intialize bitmap.\n");
1111 /* If this guest is running we need to block it while the checkpoint occurs */
1112 if (vm->run_state == VM_RUNNING) {
1113 while (v3_raise_barrier(vm, NULL) == -1);
1118 // 1. Receive copy of bitmap
1120 PrintDebug("Memory page iteration %d\n",i++);
1121 int retval = load_inc_memory(vm, &mod_pgs, chkpt);
1123 // end of receiving memory pages
1125 } else if (retval == -1) {
1126 PrintError("Error receiving incremental memory.\n");
1132 if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
1133 PrintError("Unable to load devices\n");
1139 if ((ret = load_header(vm, chkpt)) == -1) {
1140 PrintError("Unable to load header\n");
1146 for (i = 0; i < vm->num_cores; i++) {
1147 if ((ret = load_core(&(vm->cores[i]), chkpt)) == -1) {
1148 PrintError("Error loading core state (core=%d)\n", i);
1155 PrintError("Unable to receive VM\n");
1157 PrintDebug("Done receving the VM\n");
1161 /* Resume the guest if it was running and we didn't just trash the state*/
1162 if (vm->run_state == VM_RUNNING) {
1164 PrintError("VM was previously running. It is now borked. Pausing it. \n");
1165 vm->run_state = VM_STOPPED;
1168 /* We check the run state of the VM after every barrier
1169 So this will immediately halt the VM
1171 v3_lower_barrier(vm);
1174 v3_bitmap_deinit(&mod_pgs);