--- /dev/null
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu>
+ * Copyright (c) 2013, Patrick G. Bridges <bridges@cs.unm.edu>
+ * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Oscar Mondragon <omondrag@cs.unm.edu>
+ * Patrick G. Bridges <bridges@cs.unm.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#ifndef __VMM_CPU_MAPPER_H__
+#define __VMM_CPU_MAPPER_H__
+
+struct vm_cpu_mapper_impl {
+ char *name;
+ int (*init)();
+ int (*deinit)();
+ int (*vm_init)(struct v3_vm_info *vm, unsigned int cpu_mask);
+ int (*vm_deinit)(struct v3_vm_info *vm);
+ int (*admit_core)(struct v3_vm_info * vm, int vcore_id, int target_cpu);
+ int (*admit)(struct v3_vm_info *vm);
+
+};
+
+int v3_cpu_mapper_register_vm(struct v3_vm_info *vm, unsigned int cpu_mask);
+int v3_cpu_mapper_admit_vm(struct v3_vm_info *vm);
+int v3_cpu_mapper_admit_core(struct v3_vm_info * vm, int vcore_id, int target_cpu);
+int V3_init_cpu_mapper();
+int v3_register_cpu_mapper(struct vm_cpu_mapper_impl *vm);
+struct vm_cpu_mapper_impl *v3_cpu_mapper_lookup(char *name);
+int V3_enable_cpu_mapper();
+
+#endif /* __VMM_cpu_mapper_H__ */
--- /dev/null
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu>
+ * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Oscar Mondragon <omondrag@cs.unm.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+
+#include <palacios/vmm.h>
+#include <palacios/vm_guest.h>
+#include <palacios/vmm_extensions.h>
+#include <palacios/vmm_cpu_mapper.h>
+
+
+#ifndef V3_CONFIG_DEBUG_EXT_CPU_MAPPER_EDF
+#undef PrintDebug
+#define PrintDebug(fmt, args...)
+#endif
+
+
+/* Overview
+ *
+ * cpu_mapper for EDF Scheduling
+ *
+ */
+
+#define MAX_TDF 20
+#define MIN_TDF 1
+
+
+// First Next Fit heuristic implementation
+
+int firstNextFit(int save, int tdf,struct v3_vm_info *vm){
+
+ V3_Print(vm, VCORE_NONE,"firstNextFit for tdf %d \n", tdf);
+
+ int V = vm->num_cores; // Number of virtual cores
+ int L = vm->avail_cores; // Number of Logical cores
+ int ULCores[L]; // Utilization array for logical cores
+ int uVCores[V]; // Utilization array for virtual cores
+ int mapping[V];
+ int vc=0; // virtual core id
+ int lc=0; // logical core id
+
+ int i=0;
+
+ for(i=0;i<L;i++){
+ ULCores[i] = 0;
+ }
+
+ for(i=0;i<V;i++){
+ uVCores[i] = 0;
+ }
+
+ for(i=0;i<V;i++){
+ mapping[i] = 0;
+ }
+
+ // Initialize Virtual cores utilization vector
+ v3_cfg_tree_t * cfg_tree = vm->cfg_data->cfg;
+ v3_cfg_tree_t * core = v3_cfg_subtree(v3_cfg_subtree(cfg_tree, "cores"), "core");
+
+ while (core){
+
+ char *period = v3_cfg_val(core, "period");
+ char *slice = v3_cfg_val(core, "slice");
+ uint64_t p = atoi(period);
+ uint64_t s = atoi(slice);
+ uVCores[vc]= 100 * s / p;
+ vc++;
+ core = v3_cfg_next_branch(core);
+ }
+
+ vc = 0;
+
+ // TODO Control Target CPU case
+
+ while(vc < V){
+
+ if( ULCores[lc] + (uVCores[vc])/tdf <= 100 ){
+ ULCores[lc] = ULCores[lc] + (uVCores[vc])/tdf;
+ mapping[vc] = lc;
+ }
+ else{
+ if ((lc+1)< L){
+ lc = lc+1;
+ ULCores[lc] = ULCores[lc] + (uVCores[vc])/tdf;
+ mapping[vc] = lc;
+ }
+ else{
+ return -1; // Could not map
+ }
+
+ }
+
+ vc = vc +1;
+ }
+
+ if(save ==0){
+
+ vc=0;
+
+ // Assing computed TDF
+ struct v3_time *vm_ts = &(vm->time_state);
+ vm_ts->td_denom = tdf;
+
+ // mapping virtual cores in logical cores
+ for (vc = 0; vc < vm->num_cores; vc++) {
+ struct guest_info * core = &(vm->cores[vc]);
+ core-> pcpu_id = mapping[vc];
+ }
+
+
+ int x = 0;
+ for(x=0;x<V;x++){
+ PrintDebug(vm, VCORE_NONE,"%d ",mapping[x]);
+ }
+
+ for(x=0;x<L; x++){
+ PrintDebug(vm, VCORE_NONE,"%d ",ULCores[x]);
+ }
+ PrintDebug(vm, VCORE_NONE,"\n");
+
+ }
+
+ return 0;
+}
+
+
+int edf_mapper_vm_init(struct v3_vm_info *vm, unsigned int cpu_mask){
+
+ V3_Print(vm, VCORE_NONE,"mapper. Initializing edf cpu_mapper");
+
+ int min_tdf = MIN_TDF;
+ int max_tdf = MAX_TDF;
+ int tdf = MAX_TDF; // Time dilation factor
+
+
+ // Binary Search of TDF
+
+ int mappable = 0;
+
+ while( (max_tdf-min_tdf) > 0 ){
+
+ mappable = firstNextFit(-1,tdf,vm);
+
+ if(mappable != -1){
+ max_tdf = tdf/2;
+ }
+ else{
+ max_tdf = tdf * 2;
+ min_tdf = tdf;
+ }
+ tdf = max_tdf;
+ }
+
+ firstNextFit(0,tdf,vm);
+
+
+ return 0;
+}
+
+
+int edf_mapper_admit(struct v3_vm_info *vm){
+ // TODO
+ PrintDebug(vm, VCORE_NONE,"mapper. Edf cpu_mapper admit");
+ return 0;
+}
+
+int edf_mapper_admit_core(struct v3_vm_info * vm, int vcore_id, int target_cpu){
+ // TODO
+ PrintDebug(vm, VCORE_NONE,"mapper. Edf cpu_mapper admit core");
+ return 0;
+}
+
+
+static struct vm_cpu_mapper_impl edf_mapper = {
+
+ .name = "edf",
+ .init = NULL,
+ .deinit = NULL,
+ .vm_init = edf_mapper_vm_init,
+ .vm_deinit = NULL,
+ .admit = edf_mapper_admit,
+ .admit_core = edf_mapper_admit_core
+
+};
+
+
+static int
+ext_mapper_edf_init() {
+ PrintDebug(VM_NONE, VCORE_NONE,"mapper. Creating (%s) cpu_mapper\n",edf_mapper.name);
+ return v3_register_cpu_mapper(&edf_mapper);
+}
+
+static int
+ext_mapper_edf_vm_init() {
+ return 0;
+}
+
+static struct v3_extension_impl mapper_edf_impl = {
+
+ .name = "cpu_mapper for EDF Scheduler",
+ .init = ext_mapper_edf_init,
+ .vm_init = ext_mapper_edf_vm_init,
+ .vm_deinit = NULL,
+ .core_init = NULL,
+ .core_deinit = NULL,
+ .on_entry = NULL,
+ .on_exit = NULL
+};
+
+register_extension(&mapper_edf_impl);
--- /dev/null
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu>
+ * Copyright (c) 2013, Patrick G. Bridges <bridges@cs.unm.edu>
+ * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Oscar Mondragon <omondrag@cs.unm.edu>
+ * Patrick G. Bridges <bridges@cs.unm.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#include <palacios/vmm.h>
+#include <palacios/vm_guest.h>
+#include <palacios/vmm_cpu_mapper.h>
+#include <palacios/vmm_hashtable.h>
+
+#ifndef V3_CONFIG_DEBUG_CPU_MAPPER
+#undef PrintDebug
+#define PrintDebug(fmt, args...)
+#endif
+
+static char default_strategy[] = "default";
+static struct hashtable * master_cpu_mapper_table = NULL;
+static int create_default_cpu_mapper();
+
+static struct vm_cpu_mapper_impl *cpu_mapper = NULL;
+
+static uint_t cpu_mapper_hash_fn(addr_t key) {
+ char * name = (char *)key;
+ return v3_hash_buffer((uint8_t *)name, strlen(name));
+}
+
+static int cpu_mapper_eq_fn(addr_t key1, addr_t key2) {
+ char * name1 = (char *)key1;
+ char * name2 = (char *)key2;
+
+ return (strcmp(name1, name2) == 0);
+}
+
+int V3_init_cpu_mapper() {
+
+ PrintDebug(VM_NONE, VCORE_NONE,"Initializing cpu_mapper");
+
+ master_cpu_mapper_table = v3_create_htable(0, cpu_mapper_hash_fn, cpu_mapper_eq_fn);
+ return create_default_cpu_mapper();
+}
+
+
+int v3_register_cpu_mapper(struct vm_cpu_mapper_impl *s) {
+
+ PrintDebug(VM_NONE, VCORE_NONE,"Registering cpu_mapper (%s)\n", s->name);
+
+ if (v3_htable_search(master_cpu_mapper_table, (addr_t)(s->name))) {
+ PrintError(VM_NONE, VCORE_NONE, "Multiple instances of cpu_mapper (%s)\n", s->name);
+ return -1;
+ }
+
+ if (v3_htable_insert(master_cpu_mapper_table,
+ (addr_t)(s->name),
+ (addr_t)(s)) == 0) {
+ PrintError(VM_NONE, VCORE_NONE, "Could not register cpu_mapper (%s)\n", s->name);
+ return -1;
+ }
+
+ return 0;
+}
+
+struct vm_cpu_mapper_impl *v3_cpu_mapper_lookup(char *name)
+{
+ return (struct vm_cpu_mapper_impl *)v3_htable_search(master_cpu_mapper_table, (addr_t)(name));
+}
+
+int V3_enable_cpu_mapper() {
+ char *mapper_name;
+
+ cpu_mapper = NULL;
+ mapper_name = v3_lookup_option("cpu_mapper");
+
+ if (mapper_name) {
+ cpu_mapper = v3_cpu_mapper_lookup(mapper_name);
+ }
+
+ if (!cpu_mapper) {
+ cpu_mapper = v3_cpu_mapper_lookup(default_strategy);
+ }
+
+ if (!cpu_mapper) {
+ PrintError(VM_NONE, VCORE_NONE,"Specified Palacios cpu_mapper \"%s\" not found.\n", default_strategy);
+ return -1;
+ }
+
+ PrintDebug(VM_NONE, VCORE_NONE,"cpu_mapper %s found",cpu_mapper->name);
+
+ if (cpu_mapper->init) {
+ return cpu_mapper->init();
+ } else {
+ return 0;
+ }
+}
+
+int v3_cpu_mapper_register_vm(struct v3_vm_info *vm,unsigned int cpu_mask) {
+ if (cpu_mapper->vm_init) {
+ return cpu_mapper->vm_init(vm,cpu_mask);
+ } else {
+ return 0;
+ }
+}
+
+int v3_cpu_mapper_admit_vm(struct v3_vm_info *vm) {
+ if (cpu_mapper->admit) {
+ return cpu_mapper->admit(vm);
+ } else {
+ return 0;
+ }
+}
+
+
+int v3_cpu_mapper_admit_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
+ if (cpu_mapper->admit_core) {
+ return cpu_mapper->admit_core(vm,vcore_id,target_cpu);
+ } else {
+ return 0;
+ }
+}
+
+
+
+int default_mapper_vm_init(struct v3_vm_info *vm, unsigned int cpu_mask)
+{
+
+ PrintDebug(vm, VCORE_NONE,"mapper. default_mapper_init\n");
+
+ uint32_t i;
+ int vcore_id = 0;
+ uint8_t * core_mask = (uint8_t *)&cpu_mask;
+
+ for (i = 0, vcore_id = vm->num_cores - 1; vcore_id >= 0; i++) {
+
+ int major = 0;
+ int minor = 0;
+ struct guest_info * core = &(vm->cores[vcore_id]);
+ char * specified_cpu = v3_cfg_val(core->core_cfg_data, "target_cpu");
+ uint32_t core_idx = 0;
+
+ if (specified_cpu != NULL) {
+ core_idx = atoi(specified_cpu);
+
+ if (core_idx < 0) {
+ PrintError(vm, VCORE_NONE, "Target CPU out of bounds (%d) \n", core_idx);
+ }
+
+ i--; // We reset the logical core idx. Not strictly necessary I guess...
+ } else {
+ core_idx = i;
+ }
+
+ major = core_idx / 8;
+ minor = core_idx % 8;
+
+ if ((core_mask[major] & (0x1 << minor)) == 0) {
+ PrintError(vm, VCORE_NONE, "Logical CPU %d not available for virtual core %d; not started\n",
+ core_idx, vcore_id);
+
+ if (specified_cpu != NULL) {
+ PrintError(vm, VCORE_NONE, "CPU was specified explicitly (%d). HARD ERROR\n", core_idx);
+ v3_stop_vm(vm);
+ return -1;
+ }
+
+ continue;
+
+ }
+
+ core->pcpu_id = core_idx;
+ vcore_id--;
+ }
+
+ if (vcore_id >= 0) {
+ v3_stop_vm(vm);
+ return -1;
+ }
+
+ return 0;
+
+}
+
+int default_mapper_admit_core(struct v3_vm_info * vm, int vcore_id, int target_cpu){
+ return 0;
+}
+
+
+int default_mapper_admit(struct v3_vm_info *vm){
+ return 0;
+}
+
+
+static struct vm_cpu_mapper_impl default_mapper_impl = {
+ .name = "default",
+ .init = NULL,
+ .deinit = NULL,
+ .vm_init = default_mapper_vm_init,
+ .vm_deinit = NULL,
+ .admit_core = default_mapper_admit_core,
+ .admit = default_mapper_admit
+
+};
+
+static int create_default_cpu_mapper()
+{
+ v3_register_cpu_mapper(&default_mapper_impl);
+ return 0;
+}