}
-#define MAX_VCORES 32
+#define MAX_VCORES 256
+#define MAX_REGIONS 256
static int read_guests(char * buf, char ** start, off_t off, int count,
int * eof, void * data)
{
int len = 0;
unsigned int i = 0;
+ struct v3_vm_base_state *base=0;
+ struct v3_vm_core_state *core=0;
+ struct v3_vm_mem_state *mem=0;
- struct v3_vm_state *s =palacios_alloc(sizeof(struct v3_vm_state)+MAX_VCORES*sizeof(struct v3_vcore_state));
+ base = palacios_alloc(sizeof(struct v3_vm_base_state));
- if (!s) {
- ERROR("No space for state structure\n");
+ if (!base) {
+ ERROR("No space for base state structure\n");
goto out;
}
+
+ core = palacios_alloc(sizeof(struct v3_vm_core_state) + MAX_VCORES*sizeof(struct v3_vm_vcore_state));
+
+ if (!core) {
+ ERROR("No space for core state structure\n");
+ goto out;
+ }
+
+ mem = palacios_alloc(sizeof(struct v3_vm_mem_state) + MAX_REGIONS*sizeof(struct v3_vm_mem_region));
+ if (!mem) {
+ ERROR("No space for memory state structure\n");
+ goto out;
+ }
+
for(i = 0; i < MAX_VMS; i++) {
if (guest_map[i] != NULL) {
if (len>=count) {
goto out;
} else {
// Get extended data
- s->num_vcores=MAX_VCORES; // max we can handle
- if (v3_get_state_vm(guest_map[i]->v3_ctx, s)) {
+ core->num_vcores=MAX_VCORES; // max we can handle
+ mem->num_regions=MAX_REGIONS; // max we can handle
+ if (v3_get_state_vm(guest_map[i]->v3_ctx, base, core, mem)) {
ERROR("Cannot get VM info\n");
*(buf+len-1)='\n';
goto out;
unsigned long j;
len+=snprintf(buf+len, count-len,
- "%s [0x%p-0x%p] %lu vcores ",
- s->state==V3_VM_INVALID ? "INVALID" :
- s->state==V3_VM_RUNNING ? "running" :
- s->state==V3_VM_STOPPED ? "stopped" :
- s->state==V3_VM_PAUSED ? "paused" :
- s->state==V3_VM_ERROR ? "ERROR" :
- s->state==V3_VM_SIMULATING ? "simulating" : "UNKNOWN",
- s->mem_base_paddr, s->mem_base_paddr+s->mem_size-1,
- s->num_vcores);
+ "%s %lu regions [ ",
+ base->state==V3_VM_INVALID ? "INVALID" :
+ base->state==V3_VM_RUNNING ? "running" :
+ base->state==V3_VM_STOPPED ? "stopped" :
+ base->state==V3_VM_PAUSED ? "paused" :
+ base->state==V3_VM_ERROR ? "ERROR" :
+ base->state==V3_VM_SIMULATING ? "simulating" : "UNKNOWN",
+ mem->num_regions);
+
if (len>=count) {
*(buf+len-1)='\n';
goto out;
}
- for (j=0;j<s->num_vcores;j++) {
+
+ for (j=0;j<mem->num_regions;j++) {
+ len+=snprintf(buf+len, count-len,
+ "(region %lu 0x%p-0x%p) ",
+ j, mem->region[j].host_paddr, mem->region[j].host_paddr+mem->region[j].size);
+ if (len>=count) {
+ *(buf+len-1)='\n';
+ goto out;
+ }
+ }
+
+ len+=snprintf(buf+len, count-len,
+ "] %lu vcores [ ",
+ core->num_vcores);
+
+ if (len>=count) {
+ *(buf+len-1)='\n';
+ goto out;
+ }
+
+ for (j=0;j<core->num_vcores;j++) {
len+=snprintf(buf+len, count-len,
- "[vcore %lu %s on pcore %lu %llu exits rip=0x%p %s %s %s] ",
+ "(vcore %lu %s on pcore %lu %llu exits rip=0x%p %s %s %s) ",
j,
- s->vcore[j].state==V3_VCORE_INVALID ? "INVALID" :
- s->vcore[j].state==V3_VCORE_RUNNING ? "running" :
- s->vcore[j].state==V3_VCORE_STOPPED ? "stopped" : "UNKNOWN",
- s->vcore[j].pcore,
- s->vcore[j].num_exits,
- s->vcore[j].last_rip,
- s->vcore[j].cpu_mode==V3_VCORE_CPU_REAL ? "real" :
- s->vcore[j].cpu_mode==V3_VCORE_CPU_PROTECTED ? "protected" :
- s->vcore[j].cpu_mode==V3_VCORE_CPU_PROTECTED_PAE ? "protectedpae" :
- s->vcore[j].cpu_mode==V3_VCORE_CPU_LONG ? "long" :
- s->vcore[j].cpu_mode==V3_VCORE_CPU_LONG_32_COMPAT ? "long32" :
- s->vcore[j].cpu_mode==V3_VCORE_CPU_LONG_16_COMPAT ? "long16" : "UNKNOWN",
- s->vcore[j].mem_mode==V3_VCORE_MEM_MODE_PHYSICAL ? "physical" :
- s->vcore[j].mem_mode==V3_VCORE_MEM_MODE_VIRTUAL ? "virtual" : "UNKNOWN",
- s->vcore[j].mem_state==V3_VCORE_MEM_STATE_SHADOW ? "shadow" :
- s->vcore[j].mem_state==V3_VCORE_MEM_STATE_NESTED ? "nested" : "UNKNOWN");
+ core->vcore[j].state==V3_VCORE_INVALID ? "INVALID" :
+ core->vcore[j].state==V3_VCORE_RUNNING ? "running" :
+ core->vcore[j].state==V3_VCORE_STOPPED ? "stopped" : "UNKNOWN",
+ core->vcore[j].pcore,
+ core->vcore[j].num_exits,
+ core->vcore[j].last_rip,
+ core->vcore[j].cpu_mode==V3_VCORE_CPU_REAL ? "real" :
+ core->vcore[j].cpu_mode==V3_VCORE_CPU_PROTECTED ? "protected" :
+ core->vcore[j].cpu_mode==V3_VCORE_CPU_PROTECTED_PAE ? "protectedpae" :
+ core->vcore[j].cpu_mode==V3_VCORE_CPU_LONG ? "long" :
+ core->vcore[j].cpu_mode==V3_VCORE_CPU_LONG_32_COMPAT ? "long32" :
+ core->vcore[j].cpu_mode==V3_VCORE_CPU_LONG_16_COMPAT ? "long16" : "UNKNOWN",
+ core->vcore[j].mem_mode==V3_VCORE_MEM_MODE_PHYSICAL ? "physical" :
+ core->vcore[j].mem_mode==V3_VCORE_MEM_MODE_VIRTUAL ? "virtual" : "UNKNOWN",
+ core->vcore[j].mem_state==V3_VCORE_MEM_STATE_SHADOW ? "shadow" :
+ core->vcore[j].mem_state==V3_VCORE_MEM_STATE_NESTED ? "nested" : "UNKNOWN");
if (len>=count) {
- *(buf+len-1)='\n';
- goto out;
+ *(buf+len-1)='\n';
+ goto out;
}
}
+ len+=snprintf(buf+len, count-len,
+ "] ");
+
+ if (len>=count) {
+ *(buf+len-1)='\n';
+ goto out;
+ }
+
*(buf+len-1)='\n';
}
}
out:
- if (s) { palacios_free(s); }
+ if (mem) { palacios_free(mem); }
+ if (core) { palacios_free(core); }
+ if (base) { palacios_free(base); }
return len;
}
void * guest_mem_base = NULL;
void * ctx = NULL;
uint64_t ret = 0;
-
- guest_mem_base = V3_VAddr((void *)vm->mem_map.base_region.host_addr);
+ int i;
ctx = v3_chkpt_open_ctx(chkpt, "memory_img");
return -1;
}
- if (v3_chkpt_load(ctx, "memory_img", vm->mem_size, guest_mem_base)) {
- PrintError(vm, VCORE_NONE, "Unable to load all of memory (requested=%llu bytes, result=%llu bytes\n",(uint64_t)(vm->mem_size),ret);
- v3_chkpt_close_ctx(ctx);
- return -1;
+
+ for (i=0;i<vm->mem_map.num_base_regions;i++) {
+ guest_mem_base = V3_VAddr((void *)vm->mem_map.base_regions[i].host_addr);
+ if (v3_chkpt_load(ctx, "memory_img", V3_CONFIG_MEM_BLOCK_SIZE, guest_mem_base)) {
+ PrintError(vm, VCORE_NONE, "Unable to load all of memory (region %d) (requested=%llu bytes, result=%llu bytes\n",i,(uint64_t)(vm->mem_size),ret);
+ v3_chkpt_close_ctx(ctx);
+ return -1;
+ }
}
v3_chkpt_close_ctx(ctx);
void * guest_mem_base = NULL;
void * ctx = NULL;
uint64_t ret = 0;
+ int i;
- guest_mem_base = V3_VAddr((void *)vm->mem_map.base_region.host_addr);
ctx = v3_chkpt_open_ctx(chkpt, "memory_img");
return -1;
}
- if (v3_chkpt_save(ctx, "memory_img", vm->mem_size, guest_mem_base)) {
- PrintError(vm, VCORE_NONE, "Unable to save all of memory (requested=%llu, received=%llu)\n",(uint64_t)(vm->mem_size),ret);
- v3_chkpt_close_ctx(ctx);
- return -1;
+ for (i=0;i<vm->mem_map.num_base_regions;i++) {
+ guest_mem_base = V3_VAddr((void *)vm->mem_map.base_regions[i].host_addr);
+ if (v3_chkpt_save(ctx, "memory_img", V3_CONFIG_MEM_BLOCK_SIZE, guest_mem_base)) {
+ PrintError(vm, VCORE_NONE, "Unable to save all of memory (region %d) (requested=%llu, received=%llu)\n",i,(uint64_t)(vm->mem_size),ret);
+ v3_chkpt_close_ctx(ctx);
+ return -1;
+ }
}
v3_chkpt_close_ctx(ctx);
int page_size_bytes = 1 << 12; // assuming 4k pages right now
void * ctx = NULL;
int i = 0;
- void * guest_mem_base = NULL;
int bitmap_num_bytes = (mod_pgs_to_send->num_bits / 8)
+ ((mod_pgs_to_send->num_bits % 8) > 0);
- guest_mem_base = V3_VAddr((void *)vm->mem_map.base_region.host_addr);
-
PrintDebug(vm, VCORE_NONE, "Saving incremental memory.\n");
ctx = v3_chkpt_open_ctx(chkpt,"memory_bitmap_bits");
// Dirty memory pages are sent in bitmap order
for (i = 0; i < mod_pgs_to_send->num_bits; i++) {
if (v3_bitmap_check(mod_pgs_to_send, i)) {
- // PrintDebug(vm, VCORE_NONE, "Sending memory page %d.\n",i);
+ struct v3_mem_region *region = v3_get_base_region(vm,page_size_bytes * i);
+ if (!region) {
+ PrintError(vm, VCORE_NONE, "Failed to find base region for page %d\n",i);
+ return -1;
+ }
+ // PrintDebug(vm, VCORE_NONE, "Sending memory page %d.\n",i);
ctx = v3_chkpt_open_ctx(chkpt, "memory_page");
if (!ctx) {
PrintError(vm, VCORE_NONE, "Unable to open context to send memory page\n");
if (v3_chkpt_save(ctx,
"memory_page",
page_size_bytes,
- guest_mem_base + (page_size_bytes * i))) {
+ (void*)(region->host_addr + page_size_bytes * i - region->guest_start))) {
PrintError(vm, VCORE_NONE, "Unable to send a memory page\n");
v3_chkpt_close_ctx(ctx);
return -1;
int page_size_bytes = 1 << 12; // assuming 4k pages right now
void * ctx = NULL;
int i = 0;
- void * guest_mem_base = NULL;
bool empty_bitmap = true;
int bitmap_num_bytes = (mod_pgs->num_bits / 8)
+ ((mod_pgs->num_bits % 8) > 0);
- guest_mem_base = V3_VAddr((void *)vm->mem_map.base_region.host_addr);
-
ctx = v3_chkpt_open_ctx(chkpt, "memory_bitmap_bits");
if (!ctx) {
// Receive also follows bitmap order
for (i = 0; i < mod_pgs->num_bits; i ++) {
if (v3_bitmap_check(mod_pgs, i)) {
- PrintDebug(vm, VCORE_NONE, "Loading page %d\n", i);
+ struct v3_mem_region *region = v3_get_base_region(vm,page_size_bytes * i);
+ if (!region) {
+ PrintError(vm, VCORE_NONE, "Failed to find base region for page %d\n",i);
+ return -1;
+ }
+ //PrintDebug(vm, VCORE_NONE, "Loading page %d\n", i);
empty_bitmap = false;
ctx = v3_chkpt_open_ctx(chkpt, "memory_page");
if (!ctx) {
if (v3_chkpt_load(ctx,
"memory_page",
page_size_bytes,
- guest_mem_base + (page_size_bytes * i))) {
+ (void*)(region->host_addr + page_size_bytes * i - region->guest_start))) {
PrintError(vm, VCORE_NONE, "Did not receive all of memory page\n");
v3_chkpt_close_ctx(ctx);
return -1;