X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm.c;h=9337cdd6b1d41296d797abcb45d1badaeca72cf9;hb=e94507c7055f81abcf6a95132cb7ad90f1b5e6ca;hp=6e32e88a8d982583cfdc1e69665a28288297d8ec;hpb=a19f42cc45ff4c4a07bf917a78a2a422319a78bb;p=palacios.git diff --git a/palacios/src/palacios/vmm.c b/palacios/src/palacios/vmm.c index 6e32e88..9337cdd 100644 --- a/palacios/src/palacios/vmm.c +++ b/palacios/src/palacios/vmm.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -27,6 +28,7 @@ #include #include #include +#include #ifdef V3_CONFIG_SVM #include @@ -48,7 +50,6 @@ int v3_dbg_enable = 0; - static void init_cpu(void * arg) { uint32_t cpu_id = (uint32_t)(addr_t)arg; @@ -106,9 +107,14 @@ void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *op V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n"); + // Set global variables. os_hooks = hooks; + if (num_cpus>V3_CONFIG_MAX_CPUS) { + PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS); + } + // Determine the global machine type v3_mach_type = V3_INVALID_CPU; @@ -119,18 +125,27 @@ void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *op // Parse host-os defined options into an easily-accessed format. v3_parse_options(options); + // Memory manager initialization + v3_init_mem(); + // Register all the possible device types V3_init_devices(); // Register all shadow paging handlers V3_init_shdw_paging(); + // Initialize the cpu_mapper framework (must be before extensions) + V3_init_cpu_mapper(); + // Initialize the scheduler framework (must be before extensions) V3_init_scheduling(); // Register all extensions V3_init_extensions(); + // Enabling cpu_mapper + V3_enable_cpu_mapper(); + // Enabling scheduler V3_enable_scheduler(); @@ -145,7 +160,7 @@ void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *op if ((hooks) && (hooks->call_on_cpu)) { - for (i = 0; i < num_cpus; i++) { + for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) { major = i / 8; minor = i % 8; @@ -166,19 +181,9 @@ void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *op void Shutdown_V3() { int i; - V3_deinit_devices(); - V3_deinit_shdw_paging(); - - V3_deinit_extensions(); - -#ifdef V3_CONFIG_SYMMOD - V3_deinit_symmod(); -#endif - -#ifdef V3_CONFIG_CHECKPOINT - V3_deinit_checkpoint(); -#endif + // Reverse order of Init_V3 + // bring down CPUs if ((os_hooks) && (os_hooks->call_on_cpu)) { for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) { @@ -189,6 +194,33 @@ void Shutdown_V3() { } } +#ifdef V3_CONFIG_CHECKPOINT + V3_deinit_checkpoint(); +#endif + +#ifdef V3_CONFIG_SYMMOD + V3_deinit_symmod(); +#endif + + V3_disable_scheduler(); + + V3_disable_cpu_mapper(); + + V3_deinit_extensions(); + + V3_deinit_scheduling(); + + V3_deinit_cpu_mapper(); + + V3_deinit_shdw_paging(); + + V3_deinit_devices(); + + v3_deinit_mem(); + + v3_deinit_options(); + + } @@ -264,16 +296,13 @@ static int start_core(void * p) return 0; } - -// For the moment very ugly. Eventually we will shift the cpu_mask to an arbitrary sized type... -#define MAX_CORES 32 - - int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { - uint32_t i; + + uint32_t i,j; uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier uint32_t avail_cores = 0; int vcore_id = 0; + extern uint64_t v3_mem_block_size; if (vm->run_state != VM_STOPPED) { @@ -285,18 +314,18 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { // Do not run if any core is using shadow paging and we are out of 4 GB bounds for (i=0;inum_cores;i++) { if (vm->cores[i].shdw_pg_mode == SHADOW_PAGING) { - if ((vm->mem_map.base_region.host_addr + vm->mem_size ) >= 0x100000000ULL) { - PrintError(vm, VCORE_NONE, "Base memory region exceeds 4 GB boundary with shadow paging enabled on core %d.\n",i); - PrintError(vm, VCORE_NONE, "Any use of non-64 bit mode in the guest is likely to fail in this configuration.\n"); - PrintError(vm, VCORE_NONE, "If you would like to proceed anyway, remove this check and recompile Palacios.\n"); - PrintError(vm, VCORE_NONE, "Alternatively, change this VM to use nested paging.\n"); - return -1; + for (j=0;jmem_map.num_base_regions;j++) { + if ((vm->mem_map.base_regions[i].host_addr + v3_mem_block_size) >= 0x100000000ULL) { + PrintError(vm, VCORE_NONE, "Base memory region %d exceeds 4 GB boundary with shadow paging enabled on core %d.\n",j, i); + PrintError(vm, VCORE_NONE, "Any use of non-64 bit mode in the guest is likely to fail in this configuration.\n"); + PrintError(vm, VCORE_NONE, "If you would like to proceed anyway, remove this check and recompile Palacios.\n"); + PrintError(vm, VCORE_NONE, "Alternatively, change this VM to use nested paging.\n"); + return -1; + } } } } - - - + /// CHECK IF WE ARE MULTICORE ENABLED.... V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores); @@ -304,7 +333,7 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { // Check that enough cores are present in the mask to handle vcores - for (i = 0; i < MAX_CORES; i++) { + for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) { int major = i / 8; int minor = i % 8; @@ -317,80 +346,46 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { } } - vm->avail_cores = avail_cores; if (v3_scheduler_admit_vm(vm) != 0){ - PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name); + PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name); } - vm->run_state = VM_RUNNING; + if (v3_cpu_mapper_admit_vm(vm) != 0){ + PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name); + } - // Spawn off threads for each core. - // We work backwards, so that core 0 is always started last. - for (i = 0, vcore_id = vm->num_cores - 1; (i < MAX_CORES) && (vcore_id >= 0); i++) { - int major = 0; - int minor = 0; - struct guest_info * core = &(vm->cores[vcore_id]); - char * specified_cpu = v3_cfg_val(core->core_cfg_data, "target_cpu"); - uint32_t core_idx = 0; - - if (specified_cpu != NULL) { - core_idx = atoi(specified_cpu); - - if ((core_idx < 0) || (core_idx >= MAX_CORES)) { - PrintError(vm, VCORE_NONE, "Target CPU out of bounds (%d) (MAX_CORES=%d)\n", core_idx, MAX_CORES); - } + vm->run_state = VM_RUNNING; - i--; // We reset the logical core idx. Not strictly necessary I guess... - } else { - core_idx = i; - } + if(v3_cpu_mapper_register_vm(vm,cpu_mask) == -1) { - major = core_idx / 8; - minor = core_idx % 8; + PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n"); + } - if ((core_mask[major] & (0x1 << minor)) == 0) { - PrintError(vm, VCORE_NONE, "Logical CPU %d not available for virtual core %d; not started\n", - core_idx, vcore_id); - if (specified_cpu != NULL) { - PrintError(vm, VCORE_NONE, "CPU was specified explicitly (%d). HARD ERROR\n", core_idx); - v3_stop_vm(vm); - return -1; - } + for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) { - continue; - } + struct guest_info * core = &(vm->cores[vcore_id]); PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n", - vcore_id, core_idx); + vcore_id, core->pcpu_id); sprintf(core->exec_name, "%s-%u", vm->name, vcore_id); - PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n", - core_idx, start_core, core, core->exec_name); + PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n", + core->pcpu_id, start_core, core, core->exec_name); core->core_run_state = CORE_STOPPED; // core zero will turn itself on - core->pcpu_id = core_idx; - core->core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name); + core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name); if (core->core_thread == NULL) { PrintError(vm, VCORE_NONE, "Thread launch failed\n"); v3_stop_vm(vm); return -1; } - - vcore_id--; } - if (vcore_id >= 0) { - PrintError(vm, VCORE_NONE, "Error starting VM: Not enough available CPU cores\n"); - v3_stop_vm(vm); - return -1; - } - - return 0; } @@ -454,6 +449,11 @@ int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) { V3_Print(vm, core, "Moving Core\n"); + if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){ + PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu); + return -1; + } + #ifdef V3_CONFIG_VMX switch (v3_cpu_types[core->pcpu_id]) { @@ -661,82 +661,91 @@ int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) { } -int v3_get_state_vm(struct v3_vm_info *vm, struct v3_vm_state *s) +int v3_get_state_vm(struct v3_vm_info *vm, + struct v3_vm_base_state *base, + struct v3_vm_core_state *core, + struct v3_vm_mem_state *mem) { - uint32_t i; - uint32_t numcores = s->num_vcores > vm->num_cores ? vm->num_cores : s->num_vcores; - - switch (vm->run_state) { - case VM_INVALID: s->state = V3_VM_INVALID; break; - case VM_RUNNING: s->state = V3_VM_RUNNING; break; - case VM_STOPPED: s->state = V3_VM_STOPPED; break; - case VM_PAUSED: s->state = V3_VM_PAUSED; break; - case VM_ERROR: s->state = V3_VM_ERROR; break; - case VM_SIMULATING: s->state = V3_VM_SIMULATING; break; - default: s->state = V3_VM_UNKNOWN; break; - } + uint32_t i; + uint32_t numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores; + uint32_t numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions; + extern uint64_t v3_mem_block_size; + + switch (vm->run_state) { + case VM_INVALID: base->state = V3_VM_INVALID; break; + case VM_RUNNING: base->state = V3_VM_RUNNING; break; + case VM_STOPPED: base->state = V3_VM_STOPPED; break; + case VM_PAUSED: base->state = V3_VM_PAUSED; break; + case VM_ERROR: base->state = V3_VM_ERROR; break; + case VM_SIMULATING: base->state = V3_VM_SIMULATING; break; + default: base->state = V3_VM_UNKNOWN; break; + } + + for (i=0;icores[i].core_run_state) { + case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break; + case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break; + case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break; + default: core->vcore[i].state = V3_VCORE_UNKNOWN; break; + } + switch (vm->cores[i].cpu_mode) { + case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break; + case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break; + case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break; + case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break; + case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break; + case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break; + default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break; + } + switch (vm->cores[i].shdw_pg_mode) { + case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break; + case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break; + default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break; + } + switch (vm->cores[i].mem_mode) { + case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break; + case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break; + default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break; + } + + core->vcore[i].pcore=vm->cores[i].pcpu_id; + core->vcore[i].last_rip=(void*)(vm->cores[i].rip); + core->vcore[i].num_exits=vm->cores[i].num_exits; + } + + core->num_vcores=numcores; - s->mem_base_paddr = (void*)(vm->mem_map.base_region.host_addr); - s->mem_size = vm->mem_size; - - s->num_vcores = numcores; - - for (i=0;icores[i].core_run_state) { - case CORE_INVALID: s->vcore[i].state = V3_VCORE_INVALID; break; - case CORE_RUNNING: s->vcore[i].state = V3_VCORE_RUNNING; break; - case CORE_STOPPED: s->vcore[i].state = V3_VCORE_STOPPED; break; - default: s->vcore[i].state = V3_VCORE_UNKNOWN; break; - } - switch (vm->cores[i].cpu_mode) { - case REAL: s->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break; - case PROTECTED: s->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break; - case PROTECTED_PAE: s->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break; - case LONG: s->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break; - case LONG_32_COMPAT: s->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break; - case LONG_16_COMPAT: s->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break; - default: s->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break; - } - switch (vm->cores[i].shdw_pg_mode) { - case SHADOW_PAGING: s->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break; - case NESTED_PAGING: s->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break; - default: s->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break; - } - switch (vm->cores[i].mem_mode) { - case PHYSICAL_MEM: s->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break; - case VIRTUAL_MEM: s->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break; - default: s->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break; - } - - s->vcore[i].pcore=vm->cores[i].pcpu_id; - s->vcore[i].last_rip=(void*)(vm->cores[i].rip); - s->vcore[i].num_exits=vm->cores[i].num_exits; - } + for (i=0;imem_map.num_base_regions;i++) { + mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr); + mem->region[i].size = v3_mem_block_size; + } - return 0; + mem->num_regions=numregions; + + return 0; } #ifdef V3_CONFIG_CHECKPOINT #include -int v3_save_vm(struct v3_vm_info * vm, char * store, char * url) { - return v3_chkpt_save_vm(vm, store, url); +int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) { + return v3_chkpt_save_vm(vm, store, url, opts); } -int v3_load_vm(struct v3_vm_info * vm, char * store, char * url) { - return v3_chkpt_load_vm(vm, store, url); +int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) { + return v3_chkpt_load_vm(vm, store, url, opts); } #ifdef V3_CONFIG_LIVE_MIGRATION -int v3_send_vm(struct v3_vm_info * vm, char * store, char * url) { - return v3_chkpt_send_vm(vm, store, url); +int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) { + return v3_chkpt_send_vm(vm, store, url, opts); } -int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url) { - return v3_chkpt_receive_vm(vm, store, url); +int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) { + return v3_chkpt_receive_vm(vm, store, url, opts); } #endif