struct v3_chkpt_info {
char store[128];
char url[256]; /* This might need to be bigger... */
+ unsigned long long opts;
+#define V3_CHKPT_OPT_NONE 0
+#define V3_CHKPT_OPT_SKIP_MEM 1 // don't write memory to store
+#define V3_CHKPT_OPT_SKIP_DEVS 2 // don't write devices to store
+#define V3_CHKPT_OPT_SKIP_CORES 4 // don't write core arch ind data to store
+#define V3_CHKPT_OPT_SKIP_ARCHDEP 8 // don't write core arch dep data to store
} __attribute__((packed));
NOTICE("Saving Guest to %s:%s\n", chkpt.store, chkpt.url);
- if (v3_save_vm(guest->v3_ctx, chkpt.store, chkpt.url) == -1) {
+ if (v3_save_vm(guest->v3_ctx, chkpt.store, chkpt.url, chkpt.opts) == -1) {
WARNING("Error checkpointing VM state\n");
return -EFAULT;
}
NOTICE("Loading Guest from %s:%s\n", chkpt.store, chkpt.url);
- if (v3_load_vm(guest->v3_ctx, chkpt.store, chkpt.url) == -1) {
+ if (v3_load_vm(guest->v3_ctx, chkpt.store, chkpt.url, chkpt.opts) == -1) {
WARNING("Error Loading VM state\n");
return -EFAULT;
}
NOTICE("Sending (live-migrating) Guest to %s:%s\n",chkpt_info.store, chkpt_info.url);
- if (v3_send_vm(guest->v3_ctx, chkpt_info.store, chkpt_info.url) == -1) {
+ if (v3_send_vm(guest->v3_ctx, chkpt_info.store, chkpt_info.url, chkpt_info.opts) == -1) {
WARNING("Error sending VM\n");
return -EFAULT;
}
NOTICE("Receiving (live-migrating) Guest to %s:%s\n",chkpt_info.store, chkpt_info.url);
- if (v3_receive_vm(guest->v3_ctx, chkpt_info.store, chkpt_info.url) == -1) {
+ if (v3_receive_vm(guest->v3_ctx, chkpt_info.store, chkpt_info.url, chkpt_info.opts) == -1) {
WARNING("Error receiving VM\n");
return -EFAULT;
}
struct v3_chkpt_info {
char store[128];
char url[256]; /* This might need to be bigger... */
+ unsigned long long opts;
+#define V3_CHKPT_OPT_NONE 0
+#define V3_CHKPT_OPT_SKIP_MEM 1 // don't write memory to store
+#define V3_CHKPT_OPT_SKIP_DEVS 2 // don't write devices to store
+#define V3_CHKPT_OPT_SKIP_CORES 4 // don't write core arch ind data to store
+#define V3_CHKPT_OPT_SKIP_ARCHDEP 8 // don't write core arch dep data to store
} __attribute__((packed));
char * vm_dev = NULL;
if (argc < 4) {
- printf("usage: v3_load <vm_device> <store> <url>\n");
+ printf("usage: v3_load <vm_device> <store> <url> [optionmask]\n");
+ printf(" optionmask consists of the sum of any of the following\n");
+ printf(" 0 none\n");
+ printf(" 1 skip memory\n");
+ printf(" 2 skip devices\n");
+ printf(" 4 skip cores\n");
+ printf(" 8 skip architecture-specific core state\n");
return -1;
}
strncpy(chkpt.url, argv[3], MAX_URL_LEN);
+ if (argc>4) {
+ chkpt.opts = atoll(argv[4]);
+ } else {
+ chkpt.opts = V3_CHKPT_OPT_NONE;
+ }
+
vm_fd = open(vm_dev, O_RDONLY);
if (vm_fd == -1) {
printf("Error opening VM device: %s\n", vm_dev);
char * vm_dev = NULL;
if (argc < 4) {
- printf("Usage: ./v3_receive <vm_device> <store> <url>\n");
+ printf("Usage: ./v3_receive <vm_device> <store> <url> [optionmask]\n");
+ printf(" optionmask consists of the sum of any of the following\n");
+ printf(" 0 none\n");
+ printf(" 1 skip memory\n");
+ printf(" 2 skip devices\n");
+ printf(" 4 skip cores\n");
+ printf(" 8 skip architecture-specific core state\n");
return -1;
}
strncpy(chkpt.url, argv[3], MAX_URL_LEN);
+ if (argc>4) {
+ chkpt.opts = atoll(argv[4]);
+ } else {
+ chkpt.opts = V3_CHKPT_OPT_NONE;
+ }
+
vm_fd = open(vm_dev, O_RDONLY);
if (vm_fd == -1) {
printf("Error opening VM device: %s\n", vm_dev);
int vm_fd;
char * vm_dev = NULL;
+
+
if (argc < 4) {
- printf("usage: v3_save <vm_device> <store> <url>\n");
+ printf("usage: v3_save <vm_device> <store> <url> [optionmask]\n");
+ printf(" optionmask consists of the sum of any of the following\n");
+ printf(" 0 none\n");
+ printf(" 1 skip memory\n");
+ printf(" 2 skip devices\n");
+ printf(" 4 skip cores\n");
+ printf(" 8 skip architecture-specific core state\n");
return -1;
}
strncpy(chkpt.store, argv[2], MAX_STORE_LEN);
-
if (strlen(argv[3]) >= MAX_URL_LEN) {
printf("ERROR: Checkpoint URL longer than maximum size (%d)\n", MAX_URL_LEN);
return -1;
strncpy(chkpt.url, argv[3], MAX_URL_LEN);
+ if (argc>4) {
+ chkpt.opts = atoll(argv[4]);
+ } else {
+ chkpt.opts = V3_CHKPT_OPT_NONE;
+ }
+
+
vm_fd = open(vm_dev, O_RDONLY);
if (vm_fd == -1) {
printf("Error opening VM device: %s\n", vm_dev);
char * vm_dev = NULL;
if (argc < 4) {
- printf("Usage: ./v3_send <vm_device> <store> <url>\n");
+ printf("Usage: ./v3_send <vm_device> <store> <url> [optionmask]\n");
+ printf(" optionmask consists of the sum of any of the following\n");
+ printf(" 0 none\n");
+ printf(" 1 skip memory\n");
+ printf(" 2 skip devices\n");
+ printf(" 4 skip cores\n");
+ printf(" 8 skip architecture-specific core state\n");
return -1;
}
strncpy(chkpt.url, argv[3], MAX_URL_LEN);
+ if (argc>4) {
+ chkpt.opts = atoll(argv[4]);
+ } else {
+ chkpt.opts = V3_CHKPT_OPT_NONE;
+ }
+
vm_fd = open(vm_dev, O_RDONLY);
if (vm_fd == -1) {
printf("Error opening VM device: %s\n", vm_dev);
int v3_continue_vm(struct v3_vm_info * vm);
int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs);
-int v3_save_vm(struct v3_vm_info * vm, char * store, char * url);
-int v3_load_vm(struct v3_vm_info * vm, char * store, char * url);
+int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, unsigned long long opts);
+int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, unsigned long long opts);
-int v3_send_vm(struct v3_vm_info * vm, char * store, char * url);
-int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url);
+int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, unsigned long long opts);
+int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, unsigned long long opts);
int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu);
struct v3_chkpt_ctx * v3_chkpt_open_ctx(struct v3_chkpt * chkpt, char * name);
int v3_chkpt_close_ctx(struct v3_chkpt_ctx * ctx);
-int v3_chkpt_save_vm(struct v3_vm_info * vm, char * store, char * url);
-int v3_chkpt_load_vm(struct v3_vm_info * vm, char * store, char * url);
+
+typedef uint64_t v3_chkpt_options_t;
+// The options are a bitwise or of the following
+#define V3_CHKPT_OPT_NONE 0
+#define V3_CHKPT_OPT_SKIP_MEM 1 // don't write memory to store
+#define V3_CHKPT_OPT_SKIP_DEVS 2 // don't write devices to store
+#define V3_CHKPT_OPT_SKIP_CORES 4 // don't write core arch ind data to store
+#define V3_CHKPT_OPT_SKIP_ARCHDEP 8 // don't write core arch dep data to store
+
+int v3_chkpt_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts);
+int v3_chkpt_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts);
#ifdef V3_CONFIG_LIVE_MIGRATION
-int v3_chkpt_send_vm(struct v3_vm_info * vm, char * store, char * url);
-int v3_chkpt_receive_vm(struct v3_vm_info * vm, char * store, char * url);
+int v3_chkpt_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts);
+int v3_chkpt_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts);
#endif
int V3_init_checkpoint();
#ifdef V3_CONFIG_CHECKPOINT
#include <palacios/vmm_checkpoint.h>
-int v3_save_vm(struct v3_vm_info * vm, char * store, char * url) {
- return v3_chkpt_save_vm(vm, store, url);
+int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
+ return v3_chkpt_save_vm(vm, store, url, opts);
}
-int v3_load_vm(struct v3_vm_info * vm, char * store, char * url) {
- return v3_chkpt_load_vm(vm, store, url);
+int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
+ return v3_chkpt_load_vm(vm, store, url, opts);
}
#ifdef V3_CONFIG_LIVE_MIGRATION
-int v3_send_vm(struct v3_vm_info * vm, char * store, char * url) {
- return v3_chkpt_send_vm(vm, store, url);
+int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
+ return v3_chkpt_send_vm(vm, store, url, opts);
}
-int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url) {
- return v3_chkpt_receive_vm(vm, store, url);
+int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
+ return v3_chkpt_receive_vm(vm, store, url, opts);
}
#endif
}
-static int load_core(struct guest_info * info, struct v3_chkpt * chkpt) {
+static int load_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt_options_t opts) {
extern v3_cpu_arch_t v3_mach_type;
void * ctx = NULL;
char key_name[16];
}
+ if (opts & V3_CHKPT_OPT_SKIP_ARCHDEP) {
+ goto donearch;
+ }
+
switch (v3_mach_type) {
case V3_SVM_CPU:
case V3_SVM_REV3_CPU: {
goto loadfailout;
}
+ donearch:
+
PrintDebug(info->vm_info, info, "Load of core succeeded\n");
v3_print_guest_state(info);
// GEM5 - Hypercall for initiating transfer to gem5 (checkpoint)
-static int save_core(struct guest_info * info, struct v3_chkpt * chkpt) {
+static int save_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt_options_t opts) {
extern v3_cpu_arch_t v3_mach_type;
void * ctx = NULL;
char key_name[16];
v3_chkpt_close_ctx(ctx); ctx=0;
+ if (opts & V3_CHKPT_OPT_SKIP_ARCHDEP) {
+ goto donearch;
+ }
+
//Architechture specific code
switch (v3_mach_type) {
case V3_SVM_CPU:
goto savefailout;
}
+
+ donearch:
return 0;
// GEM5 - Madhav has debug code here for printing instrucions
//
-int v3_chkpt_save_vm(struct v3_vm_info * vm, char * store, char * url) {
+int v3_chkpt_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
struct v3_chkpt * chkpt = NULL;
int ret = 0;;
int i = 0;
while (v3_raise_barrier(vm, NULL) == -1);
}
- if ((ret = save_memory(vm, chkpt)) == -1) {
+ if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ if ((ret = save_memory(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to save memory\n");
goto out;
+ }
}
- if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
+ if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
+ if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to save devices\n");
goto out;
+ }
}
-
if ((ret = save_header(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to save header\n");
goto out;
}
-
- for (i = 0; i < vm->num_cores; i++){
- if ((ret = save_core(&(vm->cores[i]), chkpt)) == -1) {
- PrintError(vm, VCORE_NONE, "chkpt of core %d failed\n", i);
- goto out;
+
+ if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
+ for (i = 0; i < vm->num_cores; i++){
+ if ((ret = save_core(&(vm->cores[i]), chkpt, opts)) == -1) {
+ PrintError(vm, VCORE_NONE, "chkpt of core %d failed\n", i);
+ goto out;
}
+ }
}
out:
}
-int v3_chkpt_load_vm(struct v3_vm_info * vm, char * store, char * url) {
+int v3_chkpt_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
struct v3_chkpt * chkpt = NULL;
int i = 0;
int ret = 0;
while (v3_raise_barrier(vm, NULL) == -1);
}
- if ((ret = load_memory(vm, chkpt)) == -1) {
+ if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ if ((ret = load_memory(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to load memory\n");
goto out;
+ }
}
-
- if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
+ if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
+ if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to load devies\n");
goto out;
+ }
}
}
//per core cloning
- for (i = 0; i < vm->num_cores; i++) {
- if ((ret = load_core(&(vm->cores[i]), chkpt)) == -1) {
- PrintError(vm, VCORE_NONE, "Error loading core state (core=%d)\n", i);
- goto out;
+ if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
+ for (i = 0; i < vm->num_cores; i++) {
+ if ((ret = load_core(&(vm->cores[i]), chkpt, opts)) == -1) {
+ PrintError(vm, VCORE_NONE, "Error loading core state (core=%d)\n", i);
+ goto out;
}
+ }
}
out:
-int v3_chkpt_send_vm(struct v3_vm_info * vm, char * store, char * url) {
+int v3_chkpt_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
struct v3_chkpt * chkpt = NULL;
int ret = 0;;
int iter = 0;
// Currently will work only for shadow paging
for (i=0;i<vm->num_cores;i++) {
- if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING) {
- PrintError(vm, VCORE_NONE, "Cannot currently handle nested paging\n");
- return -1;
- }
+ if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING && !(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ PrintError(vm, VCORE_NONE, "Cannot currently handle nested paging\n");
+ return -1;
+ }
}
return -1;
}
+ if (opts & V3_CHKPT_OPT_SKIP_MEM) {
+ goto memdone;
+ }
+
// In a send, the memory is copied incrementally first,
// followed by the remainder of the state
ret = -1;
goto out;
}
-
+
+ memdone:
// save the non-memory state
- if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
+ if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
+ if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to save devices\n");
goto out;
+ }
}
-
if ((ret = save_header(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to save header\n");
goto out;
}
- for (i = 0; i < vm->num_cores; i++){
- if ((ret = save_core(&(vm->cores[i]), chkpt)) == -1) {
- PrintError(vm, VCORE_NONE, "chkpt of core %d failed\n", i);
- goto out;
+ if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
+ for (i = 0; i < vm->num_cores; i++){
+ if ((ret = save_core(&(vm->cores[i]), chkpt, opts)) == -1) {
+ PrintError(vm, VCORE_NONE, "chkpt of core %d failed\n", i);
+ goto out;
}
+ }
}
-
- stop_time = v3_get_host_time(&(vm->cores[0].time_state));
- PrintDebug(vm, VCORE_NONE, "num_mod_pages=%d\ndowntime=%llu\n",num_mod_pages,stop_time-start_time);
- PrintDebug(vm, VCORE_NONE, "Done sending VM!\n");
- out:
- v3_bitmap_deinit(&modified_pages_to_send);
+
+ if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ stop_time = v3_get_host_time(&(vm->cores[0].time_state));
+ PrintDebug(vm, VCORE_NONE, "num_mod_pages=%d\ndowntime=%llu\n",num_mod_pages,stop_time-start_time);
+ PrintDebug(vm, VCORE_NONE, "Done sending VM!\n");
+ out:
+ v3_bitmap_deinit(&modified_pages_to_send);
+ }
+
chkpt_close(chkpt);
return ret;
}
-int v3_chkpt_receive_vm(struct v3_vm_info * vm, char * store, char * url) {
+int v3_chkpt_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
struct v3_chkpt * chkpt = NULL;
int i = 0;
int ret = 0;
// Currently will work only for shadow paging
for (i=0;i<vm->num_cores;i++) {
- if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING) {
- PrintError(vm, VCORE_NONE, "Cannot currently handle nested paging\n");
- return -1;
- }
+ if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING && !(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ PrintError(vm, VCORE_NONE, "Cannot currently handle nested paging\n");
+ return -1;
+ }
}
chkpt = chkpt_open(vm, store, url, LOAD);
return -1;
}
+
+ if (opts & V3_CHKPT_OPT_SKIP_MEM) {
+ goto memdone;
+ }
+
if (v3_bitmap_init(&mod_pgs,vm->mem_size>>12) == -1) {
chkpt_close(chkpt);
PrintError(vm, VCORE_NONE, "Could not intialize bitmap.\n");
goto out;
}
}
+
+ memdone:
- if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
+ if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
+ if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to load devices\n");
ret = -1;
goto out;
+ }
}
-
if ((ret = load_header(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to load header\n");
ret = -1;
}
//per core cloning
- for (i = 0; i < vm->num_cores; i++) {
- if ((ret = load_core(&(vm->cores[i]), chkpt)) == -1) {
- PrintError(vm, VCORE_NONE, "Error loading core state (core=%d)\n", i);
- goto out;
+ if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
+ for (i = 0; i < vm->num_cores; i++) {
+ if ((ret = load_core(&(vm->cores[i]), chkpt, opts)) == -1) {
+ PrintError(vm, VCORE_NONE, "Error loading core state (core=%d)\n", i);
+ goto out;
}
+ }
}
-
+
out:
if (ret==-1) {
PrintError(vm, VCORE_NONE, "Unable to receive VM\n");
v3_lower_barrier(vm);
}
- v3_bitmap_deinit(&mod_pgs);
+
+ if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ v3_bitmap_deinit(&mod_pgs);
+ }
+
chkpt_close(chkpt);
return ret;