X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm.c;h=a162da5ab3dc644a5bf3178f0dfa137c12a1f75b;hb=013d95f63ad584b8307ca90a82f8649b0bd7a818;hp=148b86e65c60df88d9b7158b371c8c1b0f30fc1d;hpb=acaadd79c597c8d5180fbfbec79c01fef3dff003;p=palacios.git diff --git a/palacios/src/palacios/vmm.c b/palacios/src/palacios/vmm.c index 148b86e..a162da5 100644 --- a/palacios/src/palacios/vmm.c +++ b/palacios/src/palacios/vmm.c @@ -26,7 +26,8 @@ #include #include #include - +#include +#include #ifdef V3_CONFIG_SVM #include @@ -99,8 +100,7 @@ static void deinit_cpu(void * arg) { } } - -void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus) { +void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) { int i = 0; int minor = 0; int major = 0; @@ -117,15 +117,30 @@ void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus) { v3_cpu_types[i] = V3_INVALID_CPU; } + // Parse host-os defined options into an easily-accessed format. + v3_parse_options(options); + // Register all the possible device types V3_init_devices(); // Register all shadow paging handlers V3_init_shdw_paging(); + // Initialize the cpu_mapper framework (must be before extensions) + V3_init_cpu_mapper(); + + // Initialize the scheduler framework (must be before extensions) + V3_init_scheduling(); + // Register all extensions V3_init_extensions(); + // Enabling cpu_mapper + V3_enable_cpu_mapper(); + + // Enabling scheduler + V3_enable_scheduler(); + #ifdef V3_CONFIG_SYMMOD V3_init_symmod(); @@ -208,6 +223,15 @@ struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) { memset(vm->name, 0, 128); strncpy(vm->name, name, 127); + /* + * Register this VM with the palacios scheduler. It will ask for admission + * prior to launch. + */ + if(v3_scheduler_register_vm(vm) == -1) { + + PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n"); + } + return vm; } @@ -218,6 +242,9 @@ static int start_core(void * p) { struct guest_info * core = (struct guest_info *)p; + if (v3_scheduler_register_core(core) == -1){ + PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id); + } PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n", core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip); @@ -244,12 +271,8 @@ static int start_core(void * p) return 0; } - -// For the moment very ugly. Eventually we will shift the cpu_mask to an arbitrary sized type... -#define MAX_CORES 32 - - int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { + uint32_t i; uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier uint32_t avail_cores = 0; @@ -275,8 +298,6 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { } } - - /// CHECK IF WE ARE MULTICORE ENABLED.... V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores); @@ -284,7 +305,7 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { // Check that enough cores are present in the mask to handle vcores - for (i = 0; i < MAX_CORES; i++) { + for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) { int major = i / 8; int minor = i % 8; @@ -297,80 +318,46 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { } } + vm->avail_cores = avail_cores; + + if (v3_scheduler_admit_vm(vm) != 0){ + PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name); + } - if (vm->num_cores > avail_cores) { - PrintError(vm, VCORE_NONE, "Attempted to start a VM with too many cores (vm->num_cores = %d, avail_cores = %d, MAX=%d)\n", - vm->num_cores, avail_cores, MAX_CORES); - return -1; + if (v3_cpu_mapper_admit_vm(vm) != 0){ + PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name); } vm->run_state = VM_RUNNING; - // Spawn off threads for each core. - // We work backwards, so that core 0 is always started last. - for (i = 0, vcore_id = vm->num_cores - 1; (i < MAX_CORES) && (vcore_id >= 0); i++) { - int major = 0; - int minor = 0; - struct guest_info * core = &(vm->cores[vcore_id]); - char * specified_cpu = v3_cfg_val(core->core_cfg_data, "target_cpu"); - uint32_t core_idx = 0; - - if (specified_cpu != NULL) { - core_idx = atoi(specified_cpu); - - if ((core_idx < 0) || (core_idx >= MAX_CORES)) { - PrintError(vm, VCORE_NONE, "Target CPU out of bounds (%d) (MAX_CORES=%d)\n", core_idx, MAX_CORES); - } + if(v3_cpu_mapper_register_vm(vm,cpu_mask) == -1) { - i--; // We reset the logical core idx. Not strictly necessary I guess... - } else { - core_idx = i; - } + PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n"); + } - major = core_idx / 8; - minor = core_idx % 8; - if ((core_mask[major] & (0x1 << minor)) == 0) { - PrintError(vm, VCORE_NONE, "Logical CPU %d not available for virtual core %d; not started\n", - core_idx, vcore_id); + for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) { - if (specified_cpu != NULL) { - PrintError(vm, VCORE_NONE, "CPU was specified explicitly (%d). HARD ERROR\n", core_idx); - v3_stop_vm(vm); - return -1; - } - - continue; - } + struct guest_info * core = &(vm->cores[vcore_id]); PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n", - vcore_id, core_idx); + vcore_id, core->pcpu_id); sprintf(core->exec_name, "%s-%u", vm->name, vcore_id); - PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n", - core_idx, start_core, core, core->exec_name); + PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n", + core->pcpu_id, start_core, core, core->exec_name); core->core_run_state = CORE_STOPPED; // core zero will turn itself on - core->pcpu_id = core_idx; - core->core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name); + core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name); if (core->core_thread == NULL) { PrintError(vm, VCORE_NONE, "Thread launch failed\n"); v3_stop_vm(vm); return -1; } - - vcore_id--; } - if (vcore_id >= 0) { - PrintError(vm, VCORE_NONE, "Error starting VM: Not enough available CPU cores\n"); - v3_stop_vm(vm); - return -1; - } - - return 0; } @@ -434,6 +421,11 @@ int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) { V3_Print(vm, core, "Moving Core\n"); + if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){ + PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu); + return -1; + } + #ifdef V3_CONFIG_VMX switch (v3_cpu_types[core->pcpu_id]) { @@ -700,23 +692,23 @@ int v3_get_state_vm(struct v3_vm_info *vm, struct v3_vm_state *s) #ifdef V3_CONFIG_CHECKPOINT #include -int v3_save_vm(struct v3_vm_info * vm, char * store, char * url) { - return v3_chkpt_save_vm(vm, store, url); +int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) { + return v3_chkpt_save_vm(vm, store, url, opts); } -int v3_load_vm(struct v3_vm_info * vm, char * store, char * url) { - return v3_chkpt_load_vm(vm, store, url); +int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) { + return v3_chkpt_load_vm(vm, store, url, opts); } #ifdef V3_CONFIG_LIVE_MIGRATION -int v3_send_vm(struct v3_vm_info * vm, char * store, char * url) { - return v3_chkpt_send_vm(vm, store, url); +int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) { + return v3_chkpt_send_vm(vm, store, url, opts); } -int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url) { - return v3_chkpt_receive_vm(vm, store, url); +int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) { + return v3_chkpt_receive_vm(vm, store, url, opts); } #endif @@ -780,53 +772,6 @@ v3_cpu_mode_t v3_get_host_cpu_mode() { #endif - - - - -void v3_yield_cond(struct guest_info * info, int usec) { - uint64_t cur_cycle; - cur_cycle = v3_get_host_time(&info->time_state); - - if (cur_cycle > (info->yield_start_cycle + info->vm_info->yield_cycle_period)) { - //PrintDebug(info->vm_info, info, "Conditional Yield (cur_cyle=%p, start_cycle=%p, period=%p)\n", - // (void *)cur_cycle, (void *)info->yield_start_cycle, - // (void *)info->yield_cycle_period); - - if (usec < 0) { - V3_Yield(); - } else { - V3_Sleep(usec); - } - - info->yield_start_cycle += info->vm_info->yield_cycle_period; - } -} - - -/* - * unconditional cpu yield - * if the yielding thread is a guest context, the guest quantum is reset on resumption - * Non guest context threads should call this function with a NULL argument - * - * usec <0 => the non-timed yield is used - * usec >=0 => the timed yield is used, which also usually implies interruptible - */ -void v3_yield(struct guest_info * info, int usec) { - if (usec < 0) { - V3_Yield(); - } else { - V3_Sleep(usec); - } - - if (info) { - info->yield_start_cycle += info->vm_info->yield_cycle_period; - } -} - - - - void v3_print_cond(const char * fmt, ...) { if (v3_dbg_enable == 1) { char buf[2048];