X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm.c;h=a162da5ab3dc644a5bf3178f0dfa137c12a1f75b;hb=013d95f63ad584b8307ca90a82f8649b0bd7a818;hp=c5a7f001e056f758ae4f86f7dee260257b6c8d3d;hpb=298a05652b5704f9881af0683e3f16fc4cd03959;p=palacios.git diff --git a/palacios/src/palacios/vmm.c b/palacios/src/palacios/vmm.c index c5a7f00..a162da5 100644 --- a/palacios/src/palacios/vmm.c +++ b/palacios/src/palacios/vmm.c @@ -26,7 +26,8 @@ #include #include #include - +#include +#include #ifdef V3_CONFIG_SVM #include @@ -99,83 +100,6 @@ static void deinit_cpu(void * arg) { } } -/* Options are space-separated values of the form "X=Y", for example - * scheduler=EDF CPUs=1,2,3,4 - * THe following code pushes them into a hashtable for each of access - * by other code. Storage is allocated for keys and values as part - * of this process. XXX Need a way to deallocate this storage if the - * module is removed XXX - */ -static char *option_storage; -static struct hashtable *option_table; -static char *truevalue = "true"; - -static uint_t option_hash_fn(addr_t key) { - char * name = (char *)key; - return v3_hash_buffer((uint8_t *)name, strlen(name)); -} -static int option_eq_fn(addr_t key1, addr_t key2) { - char * name1 = (char *)key1; - char * name2 = (char *)key2; - - return (strcmp(name1, name2) == 0); -} - -void V3_parse_options(char *options) -{ - char *currKey = NULL, *currVal = NULL; - int parseKey = 1; - int len = strlen(options); - char *c; - - option_storage = V3_Malloc(len + 1); - strcpy(option_storage, options); - c = option_storage; - - option_table = v3_create_htable(0, option_hash_fn, option_eq_fn); - while (c && *c) { - /* Skip whitespace */ - if (*c == ' ') { - *c = 0; - if (currKey) { - if (!currVal) { - currVal = truevalue; - } - v3_htable_insert(option_table, (addr_t)currKey, (addr_t)currVal); - parseKey = 1; - currKey = NULL; - currVal = NULL; - } - c++; - } else if (parseKey) { - if (!currKey) { - currKey = c; - } - if (*c == '=') { - parseKey = 0; - *c = 0; - } - c++; - } else /* !parseKey */ { - if (!currVal) { - currVal = c; - } - c++; - } - } - if (currKey) { - if (!currVal) { - currVal = truevalue; - } - v3_htable_insert(option_table, (addr_t)currKey, (addr_t)currVal); - } - return; -} - -char *v3_lookup_option(char *key) { - return (char *)v3_htable_search(option_table, (addr_t)(key)); -} - void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) { int i = 0; int minor = 0; @@ -194,7 +118,7 @@ void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *op } // Parse host-os defined options into an easily-accessed format. - V3_parse_options(options); + v3_parse_options(options); // Register all the possible device types V3_init_devices(); @@ -202,12 +126,18 @@ void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *op // Register all shadow paging handlers V3_init_shdw_paging(); + // Initialize the cpu_mapper framework (must be before extensions) + V3_init_cpu_mapper(); + // Initialize the scheduler framework (must be before extensions) V3_init_scheduling(); // Register all extensions V3_init_extensions(); + // Enabling cpu_mapper + V3_enable_cpu_mapper(); + // Enabling scheduler V3_enable_scheduler(); @@ -297,7 +227,7 @@ struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) { * Register this VM with the palacios scheduler. It will ask for admission * prior to launch. */ - if(v3_scheduler_register_vm(vm) != -1) { + if(v3_scheduler_register_vm(vm) == -1) { PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n"); } @@ -341,12 +271,8 @@ static int start_core(void * p) return 0; } - -// For the moment very ugly. Eventually we will shift the cpu_mask to an arbitrary sized type... -#define MAX_CORES 32 - - int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { + uint32_t i; uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier uint32_t avail_cores = 0; @@ -372,8 +298,6 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { } } - - /// CHECK IF WE ARE MULTICORE ENABLED.... V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores); @@ -381,7 +305,7 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { // Check that enough cores are present in the mask to handle vcores - for (i = 0; i < MAX_CORES; i++) { + for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) { int major = i / 8; int minor = i % 8; @@ -394,80 +318,46 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { } } - vm->avail_cores = avail_cores; if (v3_scheduler_admit_vm(vm) != 0){ - PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name); + PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name); } - vm->run_state = VM_RUNNING; + if (v3_cpu_mapper_admit_vm(vm) != 0){ + PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name); + } - // Spawn off threads for each core. - // We work backwards, so that core 0 is always started last. - for (i = 0, vcore_id = vm->num_cores - 1; (i < MAX_CORES) && (vcore_id >= 0); i++) { - int major = 0; - int minor = 0; - struct guest_info * core = &(vm->cores[vcore_id]); - char * specified_cpu = v3_cfg_val(core->core_cfg_data, "target_cpu"); - uint32_t core_idx = 0; - - if (specified_cpu != NULL) { - core_idx = atoi(specified_cpu); - - if ((core_idx < 0) || (core_idx >= MAX_CORES)) { - PrintError(vm, VCORE_NONE, "Target CPU out of bounds (%d) (MAX_CORES=%d)\n", core_idx, MAX_CORES); - } + vm->run_state = VM_RUNNING; - i--; // We reset the logical core idx. Not strictly necessary I guess... - } else { - core_idx = i; - } + if(v3_cpu_mapper_register_vm(vm,cpu_mask) == -1) { - major = core_idx / 8; - minor = core_idx % 8; + PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n"); + } - if ((core_mask[major] & (0x1 << minor)) == 0) { - PrintError(vm, VCORE_NONE, "Logical CPU %d not available for virtual core %d; not started\n", - core_idx, vcore_id); - if (specified_cpu != NULL) { - PrintError(vm, VCORE_NONE, "CPU was specified explicitly (%d). HARD ERROR\n", core_idx); - v3_stop_vm(vm); - return -1; - } + for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) { - continue; - } + struct guest_info * core = &(vm->cores[vcore_id]); PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n", - vcore_id, core_idx); + vcore_id, core->pcpu_id); sprintf(core->exec_name, "%s-%u", vm->name, vcore_id); - PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n", - core_idx, start_core, core, core->exec_name); + PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n", + core->pcpu_id, start_core, core, core->exec_name); core->core_run_state = CORE_STOPPED; // core zero will turn itself on - core->pcpu_id = core_idx; - core->core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name); + core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name); if (core->core_thread == NULL) { PrintError(vm, VCORE_NONE, "Thread launch failed\n"); v3_stop_vm(vm); return -1; } - - vcore_id--; } - if (vcore_id >= 0) { - PrintError(vm, VCORE_NONE, "Error starting VM: Not enough available CPU cores\n"); - v3_stop_vm(vm); - return -1; - } - - return 0; } @@ -531,6 +421,11 @@ int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) { V3_Print(vm, core, "Moving Core\n"); + if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){ + PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu); + return -1; + } + #ifdef V3_CONFIG_VMX switch (v3_cpu_types[core->pcpu_id]) { @@ -797,23 +692,23 @@ int v3_get_state_vm(struct v3_vm_info *vm, struct v3_vm_state *s) #ifdef V3_CONFIG_CHECKPOINT #include -int v3_save_vm(struct v3_vm_info * vm, char * store, char * url) { - return v3_chkpt_save_vm(vm, store, url); +int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) { + return v3_chkpt_save_vm(vm, store, url, opts); } -int v3_load_vm(struct v3_vm_info * vm, char * store, char * url) { - return v3_chkpt_load_vm(vm, store, url); +int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) { + return v3_chkpt_load_vm(vm, store, url, opts); } #ifdef V3_CONFIG_LIVE_MIGRATION -int v3_send_vm(struct v3_vm_info * vm, char * store, char * url) { - return v3_chkpt_send_vm(vm, store, url); +int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) { + return v3_chkpt_send_vm(vm, store, url, opts); } -int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url) { - return v3_chkpt_receive_vm(vm, store, url); +int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) { + return v3_chkpt_receive_vm(vm, store, url, opts); } #endif