From: Oscar Mondragon Date: Mon, 6 May 2013 20:51:47 +0000 (-0600) Subject: Add CPU mapper extension and fix to scheduler for time dilation X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=commitdiff_plain;h=2fff50d3e72abf29655326449ed4dc5cf6e8f429;p=palacios.releases.git Add CPU mapper extension and fix to scheduler for time dilation --- diff --git a/Kconfig b/Kconfig index c44bc12..b2479b3 100644 --- a/Kconfig +++ b/Kconfig @@ -372,6 +372,13 @@ config DEBUG_SCHEDULER help This turns on debugging for scheduler +config DEBUG_CPU_MAPPER + bool "CPU Mapper" + default n + depends on DEBUG_ON + help + This turns on debugging for CPU Mapper + config DEBUG_IO bool "IO" default n diff --git a/palacios/src/extensions/Kconfig b/palacios/src/extensions/Kconfig index 000d99f..eb646bc 100644 --- a/palacios/src/extensions/Kconfig +++ b/palacios/src/extensions/Kconfig @@ -42,4 +42,15 @@ config DEBUG_EXT_SCHED_EDF default n depends on DEBUG_ON && EXT_SCHED_EDF +config EXT_CPU_MAPPER_EDF + bool "CPU Mapper for EDF Scheduler" + default n + help + Provides an CPU Mapper for EDF scheduler + +config DEBUG_EXT_CPU_MAPPER_EDF + bool "Debugging for EDF CPU Mapper" + default n + depends on DEBUG_ON && EXT_CPU_MAPPER_EDF + endmenu diff --git a/palacios/src/extensions/Makefile b/palacios/src/extensions/Makefile index 7f4b5cb..1c66658 100644 --- a/palacios/src/extensions/Makefile +++ b/palacios/src/extensions/Makefile @@ -6,3 +6,4 @@ obj-$(V3_CONFIG_EXT_INSPECTOR) += ext_inspector.o obj-$(V3_CONFIG_EXT_MACH_CHECK) += ext_mcheck.o obj-$(V3_CONFIG_EXT_VMWARE) += ext_vmware.o obj-$(V3_CONFIG_EXT_SCHED_EDF) += ext_sched_edf.o +obj-$(V3_CONFIG_EXT_CPU_MAPPER_EDF) += ext_cpu_mapper_edf.o diff --git a/palacios/src/extensions/ext_sched_edf.c b/palacios/src/extensions/ext_sched_edf.c index 45f29b7..6cc7319 100644 --- a/palacios/src/extensions/ext_sched_edf.c +++ b/palacios/src/extensions/ext_sched_edf.c @@ -29,7 +29,7 @@ #include -#ifndef V3_CONFIG_DEBUG_EXT_EDF_SCHED +#ifndef V3_CONFIG_DEBUG_EXT_SCHED_EDF #undef PrintDebug #define PrintDebug(fmt, args...) #endif @@ -184,8 +184,13 @@ priv_data_init(struct v3_vm_info *vm){ static bool is_admissible_core(struct vm_core_edf_sched * new_sched_core, struct vm_edf_rq *runqueue){ + struct v3_vm_info * vm = new_sched_core->info->vm_info; + + struct v3_time *vm_ts = &(vm->time_state); + int tdf = vm_ts->td_denom; + int curr_utilization = runqueue->cpu_u; - int new_utilization = curr_utilization + (100 * new_sched_core->slice / new_sched_core->period); + int new_utilization = curr_utilization + ((100/tdf) * new_sched_core->slice / new_sched_core->period); int cpu_percent = (runqueue->edf_config).cpu_percent; if (new_utilization <= cpu_percent) @@ -339,6 +344,11 @@ wakeup_core(struct guest_info *info){ static void activate_core(struct vm_core_edf_sched * core, struct vm_edf_rq *runqueue){ + + struct v3_vm_info * vm = core->info->vm_info; + + struct v3_time *vm_ts = &(vm->time_state); + int tdf = vm_ts->td_denom; if (is_admissible_core(core, runqueue)){ @@ -359,7 +369,7 @@ activate_core(struct vm_core_edf_sched * core, struct vm_edf_rq *runqueue){ ins = insert_core_edf(core, runqueue); } - runqueue->cpu_u += 100 * core->slice / core->period; + runqueue->cpu_u += (100/tdf) * core->slice / core->period; runqueue->nr_vCPU ++; /* diff --git a/palacios/src/palacios/Makefile b/palacios/src/palacios/Makefile index 777d8a8..e274ee1 100644 --- a/palacios/src/palacios/Makefile +++ b/palacios/src/palacios/Makefile @@ -3,6 +3,7 @@ obj-y := \ vm_guest_mem.o \ vmm.o \ vmm_config.o \ + vmm_cpu_mapper.o \ vmm_ctrl_regs.o \ vmm_debug.o \ vmm_decoder.o \ diff --git a/palacios/src/palacios/vmm.c b/palacios/src/palacios/vmm.c index 61aa245..a162da5 100644 --- a/palacios/src/palacios/vmm.c +++ b/palacios/src/palacios/vmm.c @@ -27,6 +27,7 @@ #include #include #include +#include #ifdef V3_CONFIG_SVM #include @@ -125,12 +126,18 @@ void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *op // Register all shadow paging handlers V3_init_shdw_paging(); + // Initialize the cpu_mapper framework (must be before extensions) + V3_init_cpu_mapper(); + // Initialize the scheduler framework (must be before extensions) V3_init_scheduling(); // Register all extensions V3_init_extensions(); + // Enabling cpu_mapper + V3_enable_cpu_mapper(); + // Enabling scheduler V3_enable_scheduler(); @@ -264,12 +271,8 @@ static int start_core(void * p) return 0; } - -// For the moment very ugly. Eventually we will shift the cpu_mask to an arbitrary sized type... -#define MAX_CORES 32 - - int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { + uint32_t i; uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier uint32_t avail_cores = 0; @@ -295,8 +298,6 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { } } - - /// CHECK IF WE ARE MULTICORE ENABLED.... V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores); @@ -304,7 +305,7 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { // Check that enough cores are present in the mask to handle vcores - for (i = 0; i < MAX_CORES; i++) { + for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) { int major = i / 8; int minor = i % 8; @@ -317,80 +318,46 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { } } - vm->avail_cores = avail_cores; if (v3_scheduler_admit_vm(vm) != 0){ - PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name); + PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name); + } + + if (v3_cpu_mapper_admit_vm(vm) != 0){ + PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name); } vm->run_state = VM_RUNNING; - // Spawn off threads for each core. - // We work backwards, so that core 0 is always started last. - for (i = 0, vcore_id = vm->num_cores - 1; (i < MAX_CORES) && (vcore_id >= 0); i++) { - int major = 0; - int minor = 0; - struct guest_info * core = &(vm->cores[vcore_id]); - char * specified_cpu = v3_cfg_val(core->core_cfg_data, "target_cpu"); - uint32_t core_idx = 0; - - if (specified_cpu != NULL) { - core_idx = atoi(specified_cpu); - - if ((core_idx < 0) || (core_idx >= MAX_CORES)) { - PrintError(vm, VCORE_NONE, "Target CPU out of bounds (%d) (MAX_CORES=%d)\n", core_idx, MAX_CORES); - } + if(v3_cpu_mapper_register_vm(vm,cpu_mask) == -1) { - i--; // We reset the logical core idx. Not strictly necessary I guess... - } else { - core_idx = i; - } + PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n"); + } - major = core_idx / 8; - minor = core_idx % 8; - if ((core_mask[major] & (0x1 << minor)) == 0) { - PrintError(vm, VCORE_NONE, "Logical CPU %d not available for virtual core %d; not started\n", - core_idx, vcore_id); + for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) { - if (specified_cpu != NULL) { - PrintError(vm, VCORE_NONE, "CPU was specified explicitly (%d). HARD ERROR\n", core_idx); - v3_stop_vm(vm); - return -1; - } - - continue; - } + struct guest_info * core = &(vm->cores[vcore_id]); PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n", - vcore_id, core_idx); + vcore_id, core->pcpu_id); sprintf(core->exec_name, "%s-%u", vm->name, vcore_id); - PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n", - core_idx, start_core, core, core->exec_name); + PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n", + core->pcpu_id, start_core, core, core->exec_name); core->core_run_state = CORE_STOPPED; // core zero will turn itself on - core->pcpu_id = core_idx; - core->core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name); + core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name); if (core->core_thread == NULL) { PrintError(vm, VCORE_NONE, "Thread launch failed\n"); v3_stop_vm(vm); return -1; } - - vcore_id--; } - if (vcore_id >= 0) { - PrintError(vm, VCORE_NONE, "Error starting VM: Not enough available CPU cores\n"); - v3_stop_vm(vm); - return -1; - } - - return 0; } @@ -454,6 +421,11 @@ int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) { V3_Print(vm, core, "Moving Core\n"); + if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){ + PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu); + return -1; + } + #ifdef V3_CONFIG_VMX switch (v3_cpu_types[core->pcpu_id]) { diff --git a/palacios/src/palacios/vmm_scheduler.c b/palacios/src/palacios/vmm_scheduler.c index f4366cd..25a84fc 100644 --- a/palacios/src/palacios/vmm_scheduler.c +++ b/palacios/src/palacios/vmm_scheduler.c @@ -94,12 +94,13 @@ int V3_enable_scheduler() { scheduler = v3_scheduler_lookup(default_strategy); } - PrintDebug(VM_NONE, VCORE_NONE,"Scheduler %s found",scheduler->name); - if (!scheduler) { PrintError(VM_NONE, VCORE_NONE,"Specified Palacios scheduler \"%s\" not found.\n", default_strategy); return -1; } + + PrintDebug(VM_NONE, VCORE_NONE,"Scheduler %s found",scheduler->name); + if (scheduler->init) { return scheduler->init(); } else {