{
unsigned int i = 0;
unsigned int j = 0;
+ uint64_t num_vcores, num_regions;
struct v3_vm_base_state *base=0;
struct v3_vm_core_state *core=0;
struct v3_vm_mem_state *mem=0;
goto out;
}
- core = palacios_alloc(sizeof(struct v3_vm_core_state) + MAX_VCORES*sizeof(struct v3_vm_vcore_state));
-
- if (!core) {
- ERROR("No space for core state structure\n");
- goto out;
- }
+ for(i = 0; i < MAX_VMS; i++) {
+
+ if (guest_map[i] != NULL) {
+
+ v3_get_state_sizes_vm(guest_map[i]->v3_ctx,&num_vcores,&num_regions);
+
+ core = palacios_alloc(sizeof(struct v3_vm_core_state) + num_vcores*sizeof(struct v3_vm_vcore_state));
+
+ if (!core) {
+ ERROR("No space for core state structure\n");
+ goto out;
+ }
- mem = palacios_alloc(sizeof(struct v3_vm_mem_state) + MAX_REGIONS*sizeof(struct v3_vm_mem_region));
+ mem = palacios_alloc(sizeof(struct v3_vm_mem_state) + num_regions*sizeof(struct v3_vm_mem_region));
- if (!mem) {
- ERROR("No space for memory state structure\n");
- goto out;
- }
+ if (!mem) {
+ ERROR("No space for memory state structure\n");
+ goto out;
+ }
- for(i = 0; i < MAX_VMS; i++) {
- if (guest_map[i] != NULL) {
seq_printf(s,
"---------------------------------------------------------------------------------------\n");
seq_printf(s,
i,guest_map[i]->name, i);
// Get extended data
- core->num_vcores=MAX_VCORES; // max we can handle
- mem->num_regions=MAX_REGIONS; // max we can handle
+ core->num_vcores=num_vcores;
+ mem->num_regions=num_regions;
if (v3_get_state_vm(guest_map[i]->v3_ctx, base, core, mem)) {
ERROR("Cannot get VM info\n");
seq_printf(s, "<unable to get data for this VM>\n");
} else {
seq_printf(s,
+ "Type: %s\n"
"State: %s\n"
- "Cores: %lu\n"
- "Regions: %lu\n\n",
+ "Cores: %llu\n"
+ "Regions: %llu\n"
+ "Memsize: %llu (%llu ROS)\n\n",
+ base->vm_type==V3_VM_GENERAL ? "general" :
+ base->vm_type==V3_VM_HVM ? "HVM" : "UNKNOWN",
base->state==V3_VM_INVALID ? "INVALID" :
base->state==V3_VM_RUNNING ? "running" :
base->state==V3_VM_STOPPED ? "stopped" :
base->state==V3_VM_SIMULATING ? "simulating" :
base->state==V3_VM_RESETTING ? "resetting" : "UNKNOWN",
core->num_vcores,
- mem->num_regions);
+ mem->num_regions,
+ mem->mem_size,
+ mem->ros_mem_size);
+
seq_printf(s, "Core States\n");
for (j=0;j<core->num_vcores;j++) {
seq_printf(s,
- " vcore %u %s on pcore %lu %llu exits rip=0x%p %s %s %s\n",
+ " vcore %u %s on pcore %lu %llu exits rip=0x%p %s %s %s %s\n",
j,
core->vcore[j].state==V3_VCORE_INVALID ? "INVALID" :
core->vcore[j].state==V3_VCORE_RUNNING ? "running" :
core->vcore[j].mem_mode==V3_VCORE_MEM_MODE_PHYSICAL ? "physical" :
core->vcore[j].mem_mode==V3_VCORE_MEM_MODE_VIRTUAL ? "virtual" : "UNKNOWN",
core->vcore[j].mem_state==V3_VCORE_MEM_STATE_SHADOW ? "shadow" :
- core->vcore[j].mem_state==V3_VCORE_MEM_STATE_NESTED ? "nested" : "UNKNOWN");
+ core->vcore[j].mem_state==V3_VCORE_MEM_STATE_NESTED ? "nested" : "UNKNOWN",
+ core->vcore[j].vcore_type==V3_VCORE_GENERAL ? "" :
+ core->vcore[j].vcore_type==V3_VCORE_ROS ? "ros" :
+ core->vcore[j].vcore_type==V3_VCORE_HRT ? "hrt" : "UNKNOWN");
}
seq_printf(s, "\nMemory Regions\n");
for (j=0;j<mem->num_regions;j++) {
- seq_printf(s," region %u has HPAs 0x%p-0x%p (node %d) %s %s\n",
- j, mem->region[j].host_paddr, mem->region[j].host_paddr+mem->region[j].size,
+ seq_printf(s," region %u has HPAs 0x%016llx-0x%016llx (node %d) GPA 0x%016llx %s %s\n",
+ j, (uint64_t)mem->region[j].host_paddr, (uint64_t)mem->region[j].host_paddr+mem->region[j].size,
numa_addr_to_node((uintptr_t)(mem->region[j].host_paddr)),
+ (uint64_t)mem->region[j].guest_paddr,
mem->region[j].swapped ? "swapped" : "",
mem->region[j].pinned ? "pinned" : "");
}
}
seq_printf(s,
"---------------------------------------------------------------------------------------\n");
+
+ palacios_free(mem); mem=0;
+ palacios_free(core); core=0;
+
}
+
}
ERROR("Cannot get VM info\n");
seq_printf(s, "\t<unable to get data for this VM>\n");
} else {
- seq_printf(s,"\t%s\t%lu vcores\t%lu regions\n",
+ seq_printf(s,"\t%s\t%llu vcores\t%llu regions\t%llu mem\t%s\n",
base->state==V3_VM_INVALID ? "INVALID" :
base->state==V3_VM_RUNNING ? "running" :
base->state==V3_VM_STOPPED ? "stopped" :
base->state==V3_VM_ERROR ? "ERROR" :
base->state==V3_VM_SIMULATING ? "simulating" : "UNKNOWN",
core->num_vcores,
- mem->num_regions);
+ mem->num_regions,
+ mem->mem_size,
+ base->vm_type == V3_VM_GENERAL ? "general" :
+ base->vm_type == V3_VM_HVM ? "hvm" : "UNKNOWN");
}
}
}
typedef enum {V3_VCORE_MEM_STATE_UNKNOWN, V3_VCORE_MEM_STATE_SHADOW, V3_VCORE_MEM_STATE_NESTED} v3_vcore_mem_state_t;
typedef enum {V3_VCORE_MEM_MODE_UNKNOWN, V3_VCORE_MEM_MODE_PHYSICAL, V3_VCORE_MEM_MODE_VIRTUAL} v3_vcore_mem_mode_t;
+typedef enum {V3_VM_GENERAL, V3_VM_HVM} v3_vm_type_t;
+typedef enum {V3_VCORE_GENERAL, V3_VCORE_ROS, V3_VCORE_HRT} v3_vcore_type_t;
+
struct v3_vm_base_state {
v3_vm_state_t state;
+ v3_vm_type_t vm_type;
};
struct v3_vm_vcore_state {
v3_vcore_cpu_mode_t cpu_mode;
v3_vcore_mem_state_t mem_state;
v3_vcore_mem_mode_t mem_mode;
+ v3_vcore_type_t vcore_type;
unsigned long pcore;
void * last_rip;
unsigned long long num_exits;
};
struct v3_vm_core_state {
- unsigned long num_vcores;
+ unsigned long long num_vcores;
struct v3_vm_vcore_state vcore[];
};
struct v3_vm_mem_region {
+ void *guest_paddr;
void *host_paddr;
unsigned long long size;
- int swapped;
- int pinned;
+ int swapped:1;
+ int pinned:1;
};
struct v3_vm_mem_state {
unsigned long long mem_size;
- unsigned long num_regions;
+ unsigned long long ros_mem_size;
+ unsigned long long num_regions;
struct v3_vm_mem_region region[];
};
int v3_free_vm(struct v3_vm_info * vm);
+int v3_get_state_sizes_vm(struct v3_vm_info *vm,
+ unsigned long long *num_vcores,
+ unsigned long long *num_regions);
+
int v3_get_state_vm(struct v3_vm_info *vm,
struct v3_vm_base_state *base,
struct v3_vm_core_state *core,
uint32_t numcores;
uint32_t numregions;
extern uint64_t v3_mem_block_size;
+ void *cur_gpa;
if (!vm || !base || !core || !mem) {
- PrintError(VM_NONE, VCORE_NONE, "Invalid rquest to v3_get_state_vm\n");
+ PrintError(VM_NONE, VCORE_NONE, "Invalid request to v3_get_state_vm\n");
return -1;
}
default: base->state = V3_VM_UNKNOWN; break;
}
+ base->vm_type = V3_VM_GENERAL;
+
+#ifdef V3_CONFIG_HVM
+ if (vm->hvm_state.is_hvm) {
+ base->vm_type = V3_VM_HVM;
+ }
+#endif
+
for (i=0;i<numcores;i++) {
switch (vm->cores[i].core_run_state) {
case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
}
+
+ core->vcore[i].vcore_type = V3_VCORE_GENERAL;
+
+#ifdef V3_CONFIG_HVM
+ if (vm->hvm_state.is_hvm) {
+ if (v3_is_hvm_ros_core(&vm->cores[i])) {
+ core->vcore[i].vcore_type = V3_VCORE_ROS;
+ } else {
+ core->vcore[i].vcore_type = V3_VCORE_HRT;
+ }
+ }
+#endif
core->vcore[i].pcore=vm->cores[i].pcpu_id;
core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
core->num_vcores=numcores;
+ cur_gpa=0;
+
for (i=0;i<numregions;i++) {
+ mem->region[i].guest_paddr = cur_gpa;
mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
mem->region[i].size = v3_mem_block_size;
#ifdef V3_CONFIG_SWAPPING
mem->region[i].pinned = 0;
#endif
+ cur_gpa += mem->region[i].size;
}
mem->num_regions=numregions;
+
+
+ mem->mem_size=vm->mem_size;
+ mem->ros_mem_size=vm->mem_size;
+
+#ifdef V3_CONFIG_HVM
+ if (vm->hvm_state.is_hvm) {
+ mem->ros_mem_size=v3_get_hvm_ros_memsize(vm);
+ }
+#endif
+
+ return 0;
+}
+
+int v3_get_state_sizes_vm(struct v3_vm_info *vm,
+ unsigned long long *num_vcores,
+ unsigned long long *num_regions)
+{
+ if (!vm || !num_vcores || !num_regions) {
+ PrintError(VM_NONE, VCORE_NONE, "Invalid request to v3_get_state_sizes\n");
+ return -1;
+ }
+
+ *num_vcores = vm->num_cores;
+ *num_regions = vm->mem_map.num_base_regions;
return 0;
}