void * guest_mem_base = NULL;
void * ctx = NULL;
uint64_t ret = 0;
-
- guest_mem_base = V3_VAddr((void *)vm->mem_map.base_region.host_addr);
+ int i;
+ extern uint64_t v3_mem_block_size;
ctx = v3_chkpt_open_ctx(chkpt, "memory_img");
return -1;
}
- if (v3_chkpt_load(ctx, "memory_img", vm->mem_size, guest_mem_base)) {
- PrintError(vm, VCORE_NONE, "Unable to load all of memory (requested=%llu bytes, result=%llu bytes\n",(uint64_t)(vm->mem_size),ret);
- v3_chkpt_close_ctx(ctx);
- return -1;
+
+ for (i=0;i<vm->mem_map.num_base_regions;i++) {
+ guest_mem_base = V3_VAddr((void *)vm->mem_map.base_regions[i].host_addr);
+ if (v3_chkpt_load(ctx, "memory_img", v3_mem_block_size, guest_mem_base)) {
+ PrintError(vm, VCORE_NONE, "Unable to load all of memory (region %d) (requested=%llu bytes, result=%llu bytes\n",i,(uint64_t)(vm->mem_size),ret);
+ v3_chkpt_close_ctx(ctx);
+ return -1;
+ }
}
v3_chkpt_close_ctx(ctx);
void * guest_mem_base = NULL;
void * ctx = NULL;
uint64_t ret = 0;
+ extern uint64_t v3_mem_block_size;
+ int i;
- guest_mem_base = V3_VAddr((void *)vm->mem_map.base_region.host_addr);
ctx = v3_chkpt_open_ctx(chkpt, "memory_img");
return -1;
}
- if (v3_chkpt_save(ctx, "memory_img", vm->mem_size, guest_mem_base)) {
- PrintError(vm, VCORE_NONE, "Unable to save all of memory (requested=%llu, received=%llu)\n",(uint64_t)(vm->mem_size),ret);
- v3_chkpt_close_ctx(ctx);
- return -1;
+ for (i=0;i<vm->mem_map.num_base_regions;i++) {
+ guest_mem_base = V3_VAddr((void *)vm->mem_map.base_regions[i].host_addr);
+ if (v3_chkpt_save(ctx, "memory_img", v3_mem_block_size, guest_mem_base)) {
+ PrintError(vm, VCORE_NONE, "Unable to save all of memory (region %d) (requested=%llu, received=%llu)\n",i,(uint64_t)(vm->mem_size),ret);
+ v3_chkpt_close_ctx(ctx);
+ return -1;
+ }
}
v3_chkpt_close_ctx(ctx);
int page_size_bytes = 1 << 12; // assuming 4k pages right now
void * ctx = NULL;
int i = 0;
- void * guest_mem_base = NULL;
int bitmap_num_bytes = (mod_pgs_to_send->num_bits / 8)
+ ((mod_pgs_to_send->num_bits % 8) > 0);
- guest_mem_base = V3_VAddr((void *)vm->mem_map.base_region.host_addr);
-
PrintDebug(vm, VCORE_NONE, "Saving incremental memory.\n");
ctx = v3_chkpt_open_ctx(chkpt,"memory_bitmap_bits");
// Dirty memory pages are sent in bitmap order
for (i = 0; i < mod_pgs_to_send->num_bits; i++) {
if (v3_bitmap_check(mod_pgs_to_send, i)) {
- // PrintDebug(vm, VCORE_NONE, "Sending memory page %d.\n",i);
+ struct v3_mem_region *region = v3_get_base_region(vm,page_size_bytes * i);
+ if (!region) {
+ PrintError(vm, VCORE_NONE, "Failed to find base region for page %d\n",i);
+ return -1;
+ }
+ // PrintDebug(vm, VCORE_NONE, "Sending memory page %d.\n",i);
ctx = v3_chkpt_open_ctx(chkpt, "memory_page");
if (!ctx) {
PrintError(vm, VCORE_NONE, "Unable to open context to send memory page\n");
if (v3_chkpt_save(ctx,
"memory_page",
page_size_bytes,
- guest_mem_base + (page_size_bytes * i))) {
+ (void*)(region->host_addr + page_size_bytes * i - region->guest_start))) {
PrintError(vm, VCORE_NONE, "Unable to send a memory page\n");
v3_chkpt_close_ctx(ctx);
return -1;
int page_size_bytes = 1 << 12; // assuming 4k pages right now
void * ctx = NULL;
int i = 0;
- void * guest_mem_base = NULL;
bool empty_bitmap = true;
int bitmap_num_bytes = (mod_pgs->num_bits / 8)
+ ((mod_pgs->num_bits % 8) > 0);
- guest_mem_base = V3_VAddr((void *)vm->mem_map.base_region.host_addr);
-
ctx = v3_chkpt_open_ctx(chkpt, "memory_bitmap_bits");
if (!ctx) {
// Receive also follows bitmap order
for (i = 0; i < mod_pgs->num_bits; i ++) {
if (v3_bitmap_check(mod_pgs, i)) {
- PrintDebug(vm, VCORE_NONE, "Loading page %d\n", i);
+ struct v3_mem_region *region = v3_get_base_region(vm,page_size_bytes * i);
+ if (!region) {
+ PrintError(vm, VCORE_NONE, "Failed to find base region for page %d\n",i);
+ return -1;
+ }
+ //PrintDebug(vm, VCORE_NONE, "Loading page %d\n", i);
empty_bitmap = false;
ctx = v3_chkpt_open_ctx(chkpt, "memory_page");
if (!ctx) {
if (v3_chkpt_load(ctx,
"memory_page",
page_size_bytes,
- guest_mem_base + (page_size_bytes * i))) {
+ (void*)(region->host_addr + page_size_bytes * i - region->guest_start))) {
PrintError(vm, VCORE_NONE, "Did not receive all of memory page\n");
v3_chkpt_close_ctx(ctx);
return -1;
}
-static int load_core(struct guest_info * info, struct v3_chkpt * chkpt) {
+static int load_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt_options_t opts) {
extern v3_cpu_arch_t v3_mach_type;
void * ctx = NULL;
char key_name[16];
// Some components of guest state captured in the shadow pager
V3_CHKPT_LOAD(ctx, "GUEST_CR3", info->shdw_pg_state.guest_cr3, loadfailout);
- V3_CHKPT_LOAD(ctx, "GUEST_CRO", info->shdw_pg_state.guest_cr0, loadfailout);
+ V3_CHKPT_LOAD(ctx, "GUEST_CR0", info->shdw_pg_state.guest_cr0, loadfailout);
V3_CHKPT_LOAD(ctx, "GUEST_EFER", info->shdw_pg_state.guest_efer, loadfailout);
v3_chkpt_close_ctx(ctx); ctx=0;
}
+ if (opts & V3_CHKPT_OPT_SKIP_ARCHDEP) {
+ goto donearch;
+ }
+
switch (v3_mach_type) {
case V3_SVM_CPU:
case V3_SVM_REV3_CPU: {
goto loadfailout;
}
+ donearch:
+
PrintDebug(info->vm_info, info, "Load of core succeeded\n");
v3_print_guest_state(info);
// GEM5 - Hypercall for initiating transfer to gem5 (checkpoint)
-static int save_core(struct guest_info * info, struct v3_chkpt * chkpt) {
+static int save_core(struct guest_info * info, struct v3_chkpt * chkpt, v3_chkpt_options_t opts) {
extern v3_cpu_arch_t v3_mach_type;
void * ctx = NULL;
char key_name[16];
// Some components of guest state captured in the shadow pager
V3_CHKPT_SAVE(ctx, "GUEST_CR3", info->shdw_pg_state.guest_cr3, savefailout);
- V3_CHKPT_SAVE(ctx, "GUEST_CRO", info->shdw_pg_state.guest_cr0, savefailout);
+ V3_CHKPT_SAVE(ctx, "GUEST_CR0", info->shdw_pg_state.guest_cr0, savefailout);
V3_CHKPT_SAVE(ctx, "GUEST_EFER", info->shdw_pg_state.guest_efer, savefailout);
v3_chkpt_close_ctx(ctx); ctx=0;
+ if (opts & V3_CHKPT_OPT_SKIP_ARCHDEP) {
+ goto donearch;
+ }
+
//Architechture specific code
switch (v3_mach_type) {
case V3_SVM_CPU:
goto savefailout;
}
+
+ donearch:
return 0;
// GEM5 - Madhav has debug code here for printing instrucions
//
-int v3_chkpt_save_vm(struct v3_vm_info * vm, char * store, char * url) {
+int v3_chkpt_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
struct v3_chkpt * chkpt = NULL;
int ret = 0;;
int i = 0;
while (v3_raise_barrier(vm, NULL) == -1);
}
- if ((ret = save_memory(vm, chkpt)) == -1) {
+ if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ if ((ret = save_memory(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to save memory\n");
goto out;
+ }
}
- if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
+ if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
+ if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to save devices\n");
goto out;
+ }
}
-
if ((ret = save_header(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to save header\n");
goto out;
}
-
- for (i = 0; i < vm->num_cores; i++){
- if ((ret = save_core(&(vm->cores[i]), chkpt)) == -1) {
- PrintError(vm, VCORE_NONE, "chkpt of core %d failed\n", i);
- goto out;
+
+ if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
+ for (i = 0; i < vm->num_cores; i++){
+ if ((ret = save_core(&(vm->cores[i]), chkpt, opts)) == -1) {
+ PrintError(vm, VCORE_NONE, "chkpt of core %d failed\n", i);
+ goto out;
}
+ }
}
out:
}
-int v3_chkpt_load_vm(struct v3_vm_info * vm, char * store, char * url) {
+int v3_chkpt_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
struct v3_chkpt * chkpt = NULL;
int i = 0;
int ret = 0;
while (v3_raise_barrier(vm, NULL) == -1);
}
- if ((ret = load_memory(vm, chkpt)) == -1) {
+ if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ if ((ret = load_memory(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to load memory\n");
goto out;
+ }
}
-
- if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
+ if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
+ if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to load devies\n");
goto out;
+ }
}
}
//per core cloning
- for (i = 0; i < vm->num_cores; i++) {
- if ((ret = load_core(&(vm->cores[i]), chkpt)) == -1) {
- PrintError(vm, VCORE_NONE, "Error loading core state (core=%d)\n", i);
- goto out;
+ if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
+ for (i = 0; i < vm->num_cores; i++) {
+ if ((ret = load_core(&(vm->cores[i]), chkpt, opts)) == -1) {
+ PrintError(vm, VCORE_NONE, "Error loading core state (core=%d)\n", i);
+ goto out;
}
+ }
}
out:
-int v3_chkpt_send_vm(struct v3_vm_info * vm, char * store, char * url) {
+int v3_chkpt_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
struct v3_chkpt * chkpt = NULL;
int ret = 0;;
int iter = 0;
// Currently will work only for shadow paging
for (i=0;i<vm->num_cores;i++) {
- if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING) {
- PrintError(vm, VCORE_NONE, "Cannot currently handle nested paging\n");
- return -1;
- }
+ if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING && !(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ PrintError(vm, VCORE_NONE, "Cannot currently handle nested paging\n");
+ return -1;
+ }
}
return -1;
}
+ if (opts & V3_CHKPT_OPT_SKIP_MEM) {
+ goto memdone;
+ }
+
// In a send, the memory is copied incrementally first,
// followed by the remainder of the state
ret = -1;
goto out;
}
-
+
+ memdone:
// save the non-memory state
- if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
+ if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
+ if ((ret = v3_save_vm_devices(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to save devices\n");
goto out;
+ }
}
-
if ((ret = save_header(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to save header\n");
goto out;
}
- for (i = 0; i < vm->num_cores; i++){
- if ((ret = save_core(&(vm->cores[i]), chkpt)) == -1) {
- PrintError(vm, VCORE_NONE, "chkpt of core %d failed\n", i);
- goto out;
+ if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
+ for (i = 0; i < vm->num_cores; i++){
+ if ((ret = save_core(&(vm->cores[i]), chkpt, opts)) == -1) {
+ PrintError(vm, VCORE_NONE, "chkpt of core %d failed\n", i);
+ goto out;
}
+ }
}
-
- stop_time = v3_get_host_time(&(vm->cores[0].time_state));
- PrintDebug(vm, VCORE_NONE, "num_mod_pages=%d\ndowntime=%llu\n",num_mod_pages,stop_time-start_time);
- PrintDebug(vm, VCORE_NONE, "Done sending VM!\n");
- out:
- v3_bitmap_deinit(&modified_pages_to_send);
+
+ if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ stop_time = v3_get_host_time(&(vm->cores[0].time_state));
+ PrintDebug(vm, VCORE_NONE, "num_mod_pages=%d\ndowntime=%llu\n",num_mod_pages,stop_time-start_time);
+ PrintDebug(vm, VCORE_NONE, "Done sending VM!\n");
+ out:
+ v3_bitmap_deinit(&modified_pages_to_send);
+ }
+
chkpt_close(chkpt);
return ret;
}
-int v3_chkpt_receive_vm(struct v3_vm_info * vm, char * store, char * url) {
+int v3_chkpt_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
struct v3_chkpt * chkpt = NULL;
int i = 0;
int ret = 0;
// Currently will work only for shadow paging
for (i=0;i<vm->num_cores;i++) {
- if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING) {
- PrintError(vm, VCORE_NONE, "Cannot currently handle nested paging\n");
- return -1;
- }
+ if (vm->cores[i].shdw_pg_mode!=SHADOW_PAGING && !(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ PrintError(vm, VCORE_NONE, "Cannot currently handle nested paging\n");
+ return -1;
+ }
}
chkpt = chkpt_open(vm, store, url, LOAD);
return -1;
}
+
+ if (opts & V3_CHKPT_OPT_SKIP_MEM) {
+ goto memdone;
+ }
+
if (v3_bitmap_init(&mod_pgs,vm->mem_size>>12) == -1) {
chkpt_close(chkpt);
PrintError(vm, VCORE_NONE, "Could not intialize bitmap.\n");
goto out;
}
}
+
+ memdone:
- if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
+ if (!(opts & V3_CHKPT_OPT_SKIP_DEVS)) {
+ if ((ret = v3_load_vm_devices(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to load devices\n");
ret = -1;
goto out;
+ }
}
-
if ((ret = load_header(vm, chkpt)) == -1) {
PrintError(vm, VCORE_NONE, "Unable to load header\n");
ret = -1;
}
//per core cloning
- for (i = 0; i < vm->num_cores; i++) {
- if ((ret = load_core(&(vm->cores[i]), chkpt)) == -1) {
- PrintError(vm, VCORE_NONE, "Error loading core state (core=%d)\n", i);
- goto out;
+ if (!(opts & V3_CHKPT_OPT_SKIP_CORES)) {
+ for (i = 0; i < vm->num_cores; i++) {
+ if ((ret = load_core(&(vm->cores[i]), chkpt, opts)) == -1) {
+ PrintError(vm, VCORE_NONE, "Error loading core state (core=%d)\n", i);
+ goto out;
}
+ }
}
-
+
out:
if (ret==-1) {
PrintError(vm, VCORE_NONE, "Unable to receive VM\n");
v3_lower_barrier(vm);
}
- v3_bitmap_deinit(&mod_pgs);
+
+ if (!(opts & V3_CHKPT_OPT_SKIP_MEM)) {
+ v3_bitmap_deinit(&mod_pgs);
+ }
+
chkpt_close(chkpt);
return ret;