-/*
- * This file is part of the Palacios Virtual Machine Monitor developed
- * by the V3VEE Project with funding from the United States National
- * Science Foundation and the Department of Energy.
- *
- * The V3VEE Project is a joint project between Northwestern University
- * and the University of New Mexico. You can find out more at
- * http://www.v3vee.org
- *
- * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu>
- * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org>
- * All rights reserved.
- *
- * Author: Oscar Mondragon <omondrag@cs.unm.edu>
- * Patrick G. Bridges <bridges@cs.unm.edu>
- *
- * This is free software. You are permitted to use,
- * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
- */
-
-#ifndef __VMM_SCHEDULER_H__
-#define __VMM_SCHEDULER_H__
-
-struct vm_scheduler_impl {
- char *name;
- int (*init)();
- int (*deinit)();
- int (*vm_init)(struct v3_vm_info *vm);
- int (*vm_deinit)(struct v3_vm_info *vm);
- int (*core_init)(struct guest_info *vm);
- int (*core_deinit)(struct guest_info *vm);
- void (*schedule)(struct guest_info *vm);
- void (*yield)(struct guest_info *vm, int usec);
- int (*admit)(struct v3_vm_info *vm);
- int (*remap)(struct v3_vm_info *vm);
- int (*dvfs)(struct v3_vm_info *vm);
-};
-
-struct vm_sched_state {
- struct vm_scheduler *sched;
- void *priv_data;
-};
-
-struct vm_core_sched_state {
- struct vm_scheduler *sched;
- void *priv_data;
-};
-
-void v3_schedule(struct guest_info *core);
-void v3_yield(struct guest_info *core, int usec);
-
-int v3_scheduler_register_vm(struct v3_vm_info *vm);
-int v3_scheduler_register_core(struct guest_info *vm); /* ? */
-int v3_scheduler_admit_vm(struct v3_vm_info *vm);
-
-void v3_scheduler_remap_notify(struct v3_vm_info *vm);
-void v3_scheduler_dvfs_notify(struct v3_vm_info *vm);
-
-int V3_init_scheduling();
-int v3_register_scheduler(struct vm_scheduler_impl *vm);
-struct vm_scheduler_impl *v3_scheduler_lookup(char *name);
-int V3_enable_scheduler();
-
-#endif /* __VMM_SCHEDULER_H__ */
+/*\r
+ * This file is part of the Palacios Virtual Machine Monitor developed\r
+ * by the V3VEE Project with funding from the United States National \r
+ * Science Foundation and the Department of Energy. \r
+ *\r
+ * The V3VEE Project is a joint project between Northwestern University\r
+ * and the University of New Mexico. You can find out more at \r
+ * http://www.v3vee.org\r
+ *\r
+ * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu>\r
+ * Copyright (c) 2013, Patrick G. Bridges <bridges@cs.unm.edu>\r
+ * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org> \r
+ * All rights reserved.\r
+ *\r
+ * Author: Oscar Mondragon <omondrag@cs.unm.edu>\r
+ * Patrick G. Bridges <bridges@cs.unm.edu>\r
+ *\r
+ * This is free software. You are permitted to use,\r
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".\r
+ */\r
+\r
+#ifndef __VMM_SCHEDULER_H__\r
+#define __VMM_SCHEDULER_H__\r
+\r
+struct vm_scheduler_impl {\r
+ char *name;\r
+ int (*init)();\r
+ int (*deinit)();\r
+ int (*vm_init)(struct v3_vm_info *vm);\r
+ int (*vm_deinit)(struct v3_vm_info *vm);\r
+ int (*core_init)(struct guest_info *vm);\r
+ int (*core_deinit)(struct guest_info *vm);\r
+ void (*schedule)(struct guest_info *vm);\r
+ void (*yield)(struct guest_info *vm, int usec);\r
+ int (*admit)(struct v3_vm_info *vm);\r
+ int (*remap)(struct v3_vm_info *vm);\r
+ int (*dvfs)(struct v3_vm_info *vm);\r
+};\r
+\r
+//struct vm_sched_state {\r
+// struct vm_scheduler *sched;\r
+// void *priv_data;\r
+//};\r
+\r
+//struct vm_core_sched_state {\r
+// struct vm_scheduler *sched;\r
+// void *priv_data;\r
+//};\r
+\r
+void v3_schedule(struct guest_info *core);\r
+void v3_yield(struct guest_info *core, int usec);\r
+\r
+int v3_scheduler_register_vm(struct v3_vm_info *vm);\r
+int v3_scheduler_register_core(struct guest_info *vm); /* ? */\r
+int v3_scheduler_admit_vm(struct v3_vm_info *vm);\r
+\r
+void v3_scheduler_remap_notify(struct v3_vm_info *vm);\r
+void v3_scheduler_dvfs_notify(struct v3_vm_info *vm);\r
+\r
+int V3_init_scheduling();\r
+int v3_register_scheduler(struct vm_scheduler_impl *vm);\r
+struct vm_scheduler_impl *v3_scheduler_lookup(char *name);\r
+int V3_enable_scheduler();\r
+\r
+#endif /* __VMM_SCHEDULER_H__ */\r
* http://www.v3vee.org\r
*\r
* Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu>\r
+ * Copyright (c) 2013, Patrick G. Bridges <bridges@cs.unm.edu>\r
* Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org> \r
* All rights reserved.\r
*\r
#include <palacios/vmm_hashtable.h>\r
#include <palacios/vmm_config.h>\r
#include <palacios/vmm_extensions.h>\r
-#include <palacios/vmm_edf_sched.h>\r
-\r
+#include <palacios/vmm_rbtree.h>\r
\r
\r
#ifndef V3_CONFIG_DEBUG_EDF_SCHED\r
#define MAX_SLICE 1000000000\r
#define MIN_SLICE 10000\r
#define CPU_PERCENT 100\r
+typedef uint64_t time_us;\r
+\r
+/* \r
+ * Per-core EDF Scheduling information \r
+ */\r
+\r
+struct vm_core_edf_sched {\r
+ struct guest_info *info; // Core struct\r
+ struct rb_node node; // red-black tree node\r
+ time_us period; // Amount of time (us) during which the core may received a CPU allocation\r
+ time_us slice; // Minimum amount of time (us) received for the core during each period \r
+ time_us current_deadline; // Time (us) at which current core period ends\r
+ time_us used_time; // Amount of time (us) of the slice used whiting the current period\r
+ time_us last_wakeup_time; // Time at which the last wakeup started for this core \r
+ time_us remaining_time; // Remaining time (us) before current core period ends (before current deadline) \r
+ bool extra_time; // Specifies if the virtual core is eligible to receive extra CPU time\r
+ int miss_deadline; // Number of times the core has missed its deadline\r
+ time_us total_time; // Total scheduled time for this core. For now used for debugging purposes \r
+ int slice_overuse; // Statistical purposes\r
+ time_us extra_time_given; // Statistical\r
+};\r
+\r
+/* \r
+ * Scheduler configuration\r
+ */\r
+\r
+struct vm_edf_sched_config {\r
+ time_us min_slice; // Minimum allowed slice\r
+ time_us max_slice; // Maximum allowed slice\r
+ time_us min_period; // Minimum allowed period\r
+ time_us max_period; // Maximum allowed period\r
+ int cpu_percent; // Percentange of CPU utilization for the scheduler in each physical CPU (100 or less)\r
+ \r
+};\r
+\r
+/* \r
+ * Run queue structure. Per-logical core data structure used to keep the runnable virtual cores (threads) allocated to that logical core \r
+ * Contains a pointer to the red black tree, the structure of configuration options and other info\r
+ */\r
+\r
+struct vm_edf_rq{\r
+ \r
+ //int cpu_id; // Physical CPU id\r
+ int cpu_u; // CPU utilization (must be less or equal to the cpu_percent in vm_edf_sched_config) \r
+ struct rb_root vCPUs_tree; // Red-Black Tree\r
+ struct vm_edf_sched_config edf_config; // Scheduling configuration structure\r
+ int nr_vCPU; // Number of cores in the runqueue\r
+ struct vm_core_edf_sched *curr_vCPU; // Current running CPU \r
+ struct rb_node *rb_leftmost; // vCPU with the earliest deadline (leftmost in the tree)\r
+ time_us last_sched_time; // statistical purposes\r
+};\r
+\r
+/* \r
+ * Basic functions for scheduling \r
+ */\r
+\r
+int v3_init_edf_scheduling();\r
+\r
+\r
\r
\r
/*\r
\r
\r
/*\r
- * edf_sched_init: Initialize the run queue\r
+ * priv_data_init: Initialize the run queue\r
*/\r
\r
int \r
-edf_sched_init(struct v3_vm_info *vm){\r
+priv_data_init(struct v3_vm_info *vm){\r
\r
- PrintDebug(vm, VCORE_NONE,"EDF Sched. Initializing vm %s\n", vm->name);\r
+ PrintDebug(vm, VCORE_NONE,"EDF Sched. Initializing EDF Scheduling \n");\r
\r
- struct vm_sched_state *sched_state = &vm->sched; \r
- sched_state->priv_data = V3_Malloc( vm->avail_cores * sizeof(struct vm_edf_rq));\r
+ vm->sched_priv_data = V3_Malloc( vm->avail_cores * sizeof(struct vm_edf_rq));\r
\r
- if (!sched_state->priv_data) {\r
- PrintError(vm, VCORE_NONE,"Cannot allocate in priv_data in edf_sched_init\n");\r
+ if (!vm->sched_priv_data) {\r
+ PrintError(vm, VCORE_NONE,"Cannot allocate in priv_data in priv_data_init\n");\r
return -1;\r
}\r
\r
int lcore = 0;\r
\r
- PrintDebug(vm, VCORE_NONE,"EDF Sched. edf_sched_init. Available cores %d\n", vm->avail_cores);\r
+ PrintDebug(vm, VCORE_NONE,"EDF Sched. priv_data_init. Available cores %d\n", vm->avail_cores);\r
\r
for(lcore = 0; lcore < vm->avail_cores ; lcore++){\r
\r
- PrintDebug(vm, VCORE_NONE,"EDF Sched. edf_sched_init. Initializing logical core %d\n", lcore);\r
+ PrintDebug(vm, VCORE_NONE,"EDF Sched. priv_data_init. Initializing logical core %d\n", lcore);\r
\r
- struct vm_edf_rq * edf_rq_list = (struct vm_edf_rq *) sched_state->priv_data;\r
+ struct vm_edf_rq * edf_rq_list = (struct vm_edf_rq *)vm->sched_priv_data;\r
struct vm_edf_rq * edf_rq = &edf_rq_list[lcore];\r
\r
edf_rq->vCPUs_tree = RB_ROOT;\r
\r
}\r
\r
-\r
/*\r
* is_admissible_core: Decides if a core is admited to the red black tree according with \r
* the admisibility formula.\r
else\r
return false; \r
\r
+return true;\r
}\r
\r
\r
\r
struct vm_edf_rq * get_runqueue(struct guest_info *info){\r
\r
- struct vm_edf_rq *runqueue_list = (struct vm_edf_rq *) info->vm_info->sched.priv_data;\r
+ struct vm_edf_rq *runqueue_list = (struct vm_edf_rq *) info->vm_info->sched_priv_data;\r
struct vm_edf_rq *runqueue = &runqueue_list[info->pcpu_id]; \r
return runqueue;\r
}\r
static void \r
wakeup_core(struct guest_info *info){\r
\r
- struct vm_core_edf_sched *core = info->core_sched.priv_data;\r
+ struct vm_core_edf_sched *core = info->sched_priv_data;\r
struct vm_edf_rq *runqueue = get_runqueue(info);\r
\r
if (!info->core_thread) {\r
PrintError(info->vm_info, info,"Cannot allocate private_data in edf_sched_core_init\n");\r
return -1;\r
}\r
- info->core_sched.priv_data = core_edf;\r
+ info->sched_priv_data = core_edf;\r
\r
// Default configuration if not specified in configuration file \r
\r
static void \r
adjust_slice(struct guest_info * info, int used_time, int extra_time)\r
{\r
- struct vm_core_edf_sched *core = info->core_sched.priv_data;\r
+ struct vm_core_edf_sched *core = info->sched_priv_data;\r
struct vm_edf_rq *runqueue = get_runqueue(info);\r
\r
core->used_time = used_time;\r
static void \r
run_next_core(struct guest_info *info, int used_time, int usec)\r
{\r
- struct vm_core_edf_sched *core = info->core_sched.priv_data;\r
+ struct vm_core_edf_sched *core = info->sched_priv_data;\r
struct vm_core_edf_sched *next_core;\r
struct vm_edf_rq *runqueue = get_runqueue(info);\r
\r
\r
uint64_t host_time = get_curr_host_time(&info->time_state);\r
struct vm_edf_rq *runqueue = get_runqueue(info); \r
- struct vm_core_edf_sched *core = (struct vm_core_edf_sched *) info->core_sched.priv_data;\r
+ struct vm_core_edf_sched *core = (struct vm_core_edf_sched *) info->sched_priv_data;\r
\r
uint64_t used_time = 0;\r
if(core->last_wakeup_time != 0) \r
int \r
edf_sched_deinit(struct v3_vm_info *vm)\r
{\r
-\r
- struct vm_scheduler * sched = vm->sched.sched;\r
- void *priv_data = vm->sched.priv_data;\r
+ void *priv_data = vm->sched_priv_data;\r
\r
- if (sched) \r
- V3_Free(sched); \r
-\r
if (priv_data) \r
V3_Free(priv_data);\r
\r
int \r
edf_sched_core_deinit(struct guest_info *core)\r
{\r
-\r
- struct vm_scheduler * sched = core->core_sched.sched;\r
- void *priv_data = core->core_sched.priv_data;\r
+ void *priv_data = core->sched_priv_data;\r
\r
- if (sched) \r
- V3_Free(sched); \r
-\r
if (priv_data) \r
V3_Free(priv_data);\r
\r
return 0;\r
}\r
\r
+int edf_sched_vm_init(struct v3_vm_info *vm){\r
+ return 0;\r
+}\r
+\r
+int edf_sched_admit(struct v3_vm_info *vm){\r
+\r
+ /*\r
+ * Initialize priv_data for the vm: \r
+ * For EDF this is done here because we need the parameter\r
+ * avail_core which is set in v3_start_vm before the\r
+ * v3_scheduler_admit_vm function is called.\r
+ */\r
+ \r
+ priv_data_init(vm);\r
+\r
+ // TODO Admission\r
+ \r
+ return 0;\r
+}\r
+\r
+\r
static struct vm_scheduler_impl edf_sched = {\r
- .name = "edf",\r
- .init = edf_sched_init,\r
- .deinit = edf_sched_deinit,\r
- .core_init = edf_sched_core_init,\r
- .core_deinit = edf_sched_core_deinit,\r
- .schedule = edf_sched_schedule,\r
- .yield = edf_sched_yield\r
+\r
+ .name = "edf",\r
+ .init = NULL,\r
+ .deinit = NULL,\r
+ .vm_init = edf_sched_vm_init,\r
+ .vm_deinit = NULL,\r
+ .core_init = edf_sched_core_init,\r
+ .core_deinit = edf_sched_core_deinit,\r
+ .schedule = edf_sched_schedule,\r
+ .yield = edf_sched_yield,\r
+ .admit = edf_sched_admit,\r
+ .remap = NULL,\r
+ .dvfs=NULL\r
};\r
\r
static int \r
ext_sched_edf_init() {\r
- \r
PrintDebug(VM_NONE, VCORE_NONE,"Sched. Creating (%s) scheduler\n",edf_sched.name);\r
return v3_register_scheduler(&edf_sched);\r
}\r
-/*
- * This file is part of the Palacios Virtual Machine Monitor developed
- * by the V3VEE Project with funding from the United States National
- * Science Foundation and the Department of Energy.
- *
- * The V3VEE Project is a joint project between Northwestern University
- * and the University of New Mexico. You can find out more at
- * http://www.v3vee.org
- *
- * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu>
- * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org>
- * All rights reserved.
- *
- * Author: Oscar Mondragon <omondrag@cs.unm.edu>
- * Patrick G. Bridges <bridges@cs.unm.edu>
- *
- * This is free software. You are permitted to use,
- * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
- */
-
-#include <palacios/vmm.h>
-#include <palacios/vm_guest.h>
-#include <palacios/vmm_scheduler.h>
-#include <palacios/vmm_hashtable.h>
-
-#ifndef V3_CONFIG_DEBUG_SCHEDULER
-#undef PrintDebug
-#define PrintDebug(fmt, args...)
-#endif
-
-static char default_strategy[] = "host";
-static struct hashtable * master_scheduler_table = NULL;
-static int create_host_scheduler();
-
-static struct vm_scheduler_impl *scheduler = NULL;
-
-static uint_t scheduler_hash_fn(addr_t key) {
- char * name = (char *)key;
- return v3_hash_buffer((uint8_t *)name, strlen(name));
-}
-
-static int scheduler_eq_fn(addr_t key1, addr_t key2) {
- char * name1 = (char *)key1;
- char * name2 = (char *)key2;
-
- return (strcmp(name1, name2) == 0);
-}
-
-int V3_init_scheduling() {
-
- PrintDebug(VM_NONE, VCORE_NONE,"Initializing scheduler");
-
- master_scheduler_table = v3_create_htable(0, scheduler_hash_fn, scheduler_eq_fn);
- return create_host_scheduler();
-}
-
-
-int v3_register_scheduler(struct vm_scheduler_impl *s) {
-
- PrintDebug(VM_NONE, VCORE_NONE,"Registering Scheduler (%s)\n", s->name);
-
- if (v3_htable_search(master_scheduler_table, (addr_t)(s->name))) {
- PrintError(VM_NONE, VCORE_NONE, "Multiple instances of scheduler (%s)\n", s->name);
- return -1;
- }
- PrintDebug(VM_NONE, VCORE_NONE,"Registering Scheduler (%s) 2\n", s->name);
-
-
- if (v3_htable_insert(master_scheduler_table,
- (addr_t)(s->name),
- (addr_t)(s)) == 0) {
- PrintError(VM_NONE, VCORE_NONE, "Could not register scheduler (%s)\n", s->name);
- return -1;
- }
-
- PrintDebug(VM_NONE, VCORE_NONE,"Scheduler registered\n");
- return 0;
-}
-
-struct vm_scheduler_impl *v3_scheduler_lookup(char *name)
-{
- return (struct vm_scheduler_impl *)v3_htable_search(master_scheduler_table, (addr_t)(name));
-}
-
-int V3_enable_scheduler() {
- /* XXX Lookup the specified scheduler to use for palacios and use it */
- scheduler = v3_scheduler_lookup(default_strategy);
- if (!scheduler) {
- PrintError(VM_NONE, VCORE_NONE,"Specified Palacios scheduler \"%s\" not found.\n", default_strategy);
- return -1;
- }
- if (scheduler->init) {
- return scheduler->init();
- } else {
- return 0;
- }
-}
-
-int v3_scheduler_register_vm(struct v3_vm_info *vm) {
- if (scheduler->vm_init) {
- return scheduler->vm_init(vm);
- } else {
- return 0;
- }
-}
-int v3_scheduler_register_core(struct guest_info *core) {
- if (scheduler->core_init) {
- return scheduler->core_init(core);
- } else {
- return 0;
- }
-}
-int v3_scheduler_admit_vm(struct v3_vm_info *vm) {
- if (scheduler->admit) {
- return scheduler->admit(vm);
- } else {
- return 0;
- }
-}
-int v3_scheduler_notify_remap(struct v3_vm_info *vm) {
- if (scheduler->remap) {
- return scheduler->remap(vm);
- } else {
- return 0;
- }
-}
-int v3_scheduler_notify_dvfs(struct v3_vm_info *vm) {
- if (scheduler->dvfs) {
- return scheduler->dvfs(vm);
- } else {
- return 0;
- }
-}
-void v3_schedule(struct guest_info *core) {
- if (scheduler->schedule) {
- scheduler->schedule(core);
- }
- return;
-}
-void v3_yield(struct guest_info *core, int usec) {
- if (scheduler->yield) {
- scheduler->yield(core, usec);
- }
- return;
-}
-
-int host_sched_vm_init(struct v3_vm_info *vm)
-{
-
- PrintDebug(vm, VCORE_NONE,"Sched. host_sched_init\n");
-
- char * schedule_hz_str = v3_cfg_val(vm->cfg_data->cfg, "schedule_hz");
- uint32_t sched_hz = 100;
-
-
- if (schedule_hz_str) {
- sched_hz = atoi(schedule_hz_str);
- }
-
- PrintDebug(vm, VCORE_NONE,"CPU_KHZ = %d, schedule_freq=%p\n", V3_CPU_KHZ(),
- (void *)(addr_t)sched_hz);
-
- uint64_t yield_cycle_period = (V3_CPU_KHZ() * 1000) / sched_hz;
- vm->sched_priv_data = (void *)yield_cycle_period;
-
- return 0;
-}
-
-int host_sched_core_init(struct guest_info *core)
-{
- PrintDebug(core->vm_info, core,"Sched. host_sched_core_init\n");
-
- uint64_t t = v3_get_host_time(&core->time_state);
- core->sched_priv_data = (void *)t;
-
- return 0;
-}
-
-void host_sched_schedule(struct guest_info *core)
-{
- uint64_t cur_cycle;
- cur_cycle = v3_get_host_time(&core->time_state);
-
- if (cur_cycle > ( (uint64_t)core->sched_priv_data + (uint64_t)core->vm_info->sched_priv_data)) {
-
- V3_Yield();
-
- uint64_t yield_start_cycle = (uint64_t) core->sched_priv_data;
- yield_start_cycle += (uint64_t)core->vm_info->sched_priv_data;
- core->sched_priv_data = (void *)yield_start_cycle;
-
- }
-}
-
-/*
- * unconditional cpu yield
- * if the yielding thread is a guest context, the guest quantum is reset on resumption
- * Non guest context threads should call this function with a NULL argument
- *
- * usec <0 => the non-timed yield is used
- * usec >=0 => the timed yield is used, which also usually implies interruptible
- */
-void host_sched_yield(struct guest_info * core, int usec) {
- uint64_t yield_start_cycle;
- if (usec < 0) {
- V3_Yield();
- } else {
- V3_Sleep(usec);
- }
- yield_start_cycle = (uint64_t) core->sched_priv_data
- + (uint64_t)core->vm_info->sched_priv_data;
- core->sched_priv_data = (void *)yield_start_cycle;
-}
-
-
-int host_sched_admit(struct v3_vm_info *vm){
- return 0;
-}
-
-static struct vm_scheduler_impl host_sched_impl = {
- .name = "host",
- .init = NULL,
- .deinit = NULL,
- .vm_init = host_sched_vm_init,
- .vm_deinit = NULL,
- .core_init = host_sched_core_init,
- .core_deinit = NULL,
- .schedule = host_sched_schedule,
- .yield = host_sched_yield,
- .admit = host_sched_admit,
- .remap = NULL,
- .dvfs=NULL
-};
-
-static int create_host_scheduler()
-{
- v3_register_scheduler(&host_sched_impl);
- return 0;
-}
+/* \r
+ * This file is part of the Palacios Virtual Machine Monitor developed\r
+ * by the V3VEE Project with funding from the United States National \r
+ * Science Foundation and the Department of Energy. \r
+ *\r
+ * The V3VEE Project is a joint project between Northwestern University\r
+ * and the University of New Mexico. You can find out more at \r
+ * http://www.v3vee.org\r
+ *\r
+ * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu> \r
+ * Copyright (c) 2013, Patrick G. Bridges <bridges@cs.unm.edu>\r
+ * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org> \r
+ * All rights reserved.\r
+ *\r
+ * Author: Oscar Mondragon <omondrag@cs.unm.edu>\r
+ * Patrick G. Bridges <bridges@cs.unm.edu>\r
+ *\r
+ * This is free software. You are permitted to use,\r
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".\r
+ */\r
+\r
+#include <palacios/vmm.h>\r
+#include <palacios/vm_guest.h>\r
+#include <palacios/vmm_scheduler.h>\r
+#include <palacios/vmm_hashtable.h>\r
+\r
+#ifndef V3_CONFIG_DEBUG_SCHEDULER\r
+#undef PrintDebug\r
+#define PrintDebug(fmt, args...)\r
+#endif\r
+\r
+static char default_strategy[] = "host";\r
+static struct hashtable * master_scheduler_table = NULL;\r
+static int create_host_scheduler();\r
+\r
+static struct vm_scheduler_impl *scheduler = NULL;\r
+\r
+static uint_t scheduler_hash_fn(addr_t key) {\r
+ char * name = (char *)key;\r
+ return v3_hash_buffer((uint8_t *)name, strlen(name));\r
+}\r
+\r
+static int scheduler_eq_fn(addr_t key1, addr_t key2) {\r
+ char * name1 = (char *)key1;\r
+ char * name2 = (char *)key2;\r
+\r
+ return (strcmp(name1, name2) == 0);\r
+}\r
+\r
+int V3_init_scheduling() {\r
+ \r
+ PrintDebug(VM_NONE, VCORE_NONE,"Initializing scheduler");\r
+\r
+ master_scheduler_table = v3_create_htable(0, scheduler_hash_fn, scheduler_eq_fn);\r
+ return create_host_scheduler();\r
+}\r
+\r
+\r
+int v3_register_scheduler(struct vm_scheduler_impl *s) {\r
+\r
+ PrintDebug(VM_NONE, VCORE_NONE,"Registering Scheduler (%s)\n", s->name);\r
+\r
+ if (v3_htable_search(master_scheduler_table, (addr_t)(s->name))) {\r
+ PrintError(VM_NONE, VCORE_NONE, "Multiple instances of scheduler (%s)\n", s->name);\r
+ return -1;\r
+ }\r
+ \r
+ if (v3_htable_insert(master_scheduler_table,\r
+ (addr_t)(s->name),\r
+ (addr_t)(s)) == 0) {\r
+ PrintError(VM_NONE, VCORE_NONE, "Could not register scheduler (%s)\n", s->name);\r
+ return -1;\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+struct vm_scheduler_impl *v3_scheduler_lookup(char *name)\r
+{\r
+ return (struct vm_scheduler_impl *)v3_htable_search(master_scheduler_table, (addr_t)(name));\r
+}\r
+\r
+int V3_enable_scheduler() {\r
+ /* XXX Lookup the specified scheduler to use for palacios and use it */\r
+ \r
+ scheduler = v3_scheduler_lookup(default_strategy);\r
+ PrintDebug(VM_NONE, VCORE_NONE,"Sched. Scheduler %s found",scheduler->name);\r
+ \r
+ if (!scheduler) {\r
+ PrintError(VM_NONE, VCORE_NONE,"Specified Palacios scheduler \"%s\" not found.\n", default_strategy);\r
+ return -1;\r
+ }\r
+ if (scheduler->init) {\r
+ return scheduler->init();\r
+ } else {\r
+ return 0;\r
+ }\r
+}\r
+\r
+int v3_scheduler_register_vm(struct v3_vm_info *vm) {\r
+ if (scheduler->vm_init) {\r
+ return scheduler->vm_init(vm);\r
+ } else {\r
+ return 0;\r
+ }\r
+}\r
+int v3_scheduler_register_core(struct guest_info *core) {\r
+ if (scheduler->core_init) {\r
+ return scheduler->core_init(core);\r
+ } else {\r
+ return 0;\r
+ }\r
+}\r
+int v3_scheduler_admit_vm(struct v3_vm_info *vm) {\r
+ if (scheduler->admit) {\r
+ return scheduler->admit(vm);\r
+ } else {\r
+ return 0;\r
+ }\r
+}\r
+int v3_scheduler_notify_remap(struct v3_vm_info *vm) {\r
+ if (scheduler->remap) {\r
+ return scheduler->remap(vm);\r
+ } else {\r
+ return 0;\r
+ }\r
+}\r
+int v3_scheduler_notify_dvfs(struct v3_vm_info *vm) {\r
+ if (scheduler->dvfs) {\r
+ return scheduler->dvfs(vm);\r
+ } else {\r
+ return 0;\r
+ }\r
+}\r
+void v3_schedule(struct guest_info *core) {\r
+ if (scheduler->schedule) {\r
+ scheduler->schedule(core);\r
+ }\r
+ return;\r
+}\r
+void v3_yield(struct guest_info *core, int usec) {\r
+ if (scheduler->yield) {\r
+ scheduler->yield(core, usec);\r
+ } \r
+ return;\r
+}\r
+\r
+int host_sched_vm_init(struct v3_vm_info *vm)\r
+{\r
+\r
+ PrintDebug(vm, VCORE_NONE,"Sched. host_sched_init\n"); \r
+\r
+ char * schedule_hz_str = v3_cfg_val(vm->cfg_data->cfg, "schedule_hz");\r
+ uint32_t sched_hz = 100; \r
+\r
+\r
+ if (schedule_hz_str) {\r
+ sched_hz = atoi(schedule_hz_str);\r
+ }\r
+\r
+ PrintDebug(vm, VCORE_NONE,"CPU_KHZ = %d, schedule_freq=%p\n", V3_CPU_KHZ(), \r
+ (void *)(addr_t)sched_hz);\r
+\r
+ uint64_t yield_cycle_period = (V3_CPU_KHZ() * 1000) / sched_hz;\r
+ vm->sched_priv_data = (void *)yield_cycle_period; \r
+\r
+ return 0;\r
+}\r
+\r
+int host_sched_core_init(struct guest_info *core)\r
+{\r
+ PrintDebug(core->vm_info, core,"Sched. host_sched_core_init\n"); \r
+\r
+ uint64_t t = v3_get_host_time(&core->time_state); \r
+ core->sched_priv_data = (void *)t;\r
+\r
+ return 0;\r
+}\r
+\r
+void host_sched_schedule(struct guest_info *core)\r
+{\r
+ uint64_t cur_cycle;\r
+ cur_cycle = v3_get_host_time(&core->time_state);\r
+\r
+ if (cur_cycle > ( (uint64_t)core->sched_priv_data + (uint64_t)core->vm_info->sched_priv_data)) {\r
+ \r
+ V3_Yield();\r
+ \r
+ uint64_t yield_start_cycle = (uint64_t) core->sched_priv_data;\r
+ yield_start_cycle += (uint64_t)core->vm_info->sched_priv_data;\r
+ core->sched_priv_data = (void *)yield_start_cycle;\r
+ \r
+ }\r
+}\r
+\r
+/* \r
+ * unconditional cpu yield \r
+ * if the yielding thread is a guest context, the guest quantum is reset on resumption \r
+ * Non guest context threads should call this function with a NULL argument\r
+ *\r
+ * usec <0 => the non-timed yield is used\r
+ * usec >=0 => the timed yield is used, which also usually implies interruptible\r
+ */\r
+void host_sched_yield(struct guest_info * core, int usec) {\r
+ uint64_t yield_start_cycle;\r
+ if (usec < 0) {\r
+ V3_Yield();\r
+ } else {\r
+ V3_Sleep(usec);\r
+ }\r
+ yield_start_cycle = (uint64_t) core->sched_priv_data\r
+ + (uint64_t)core->vm_info->sched_priv_data;\r
+ core->sched_priv_data = (void *)yield_start_cycle;\r
+}\r
+\r
+\r
+int host_sched_admit(struct v3_vm_info *vm){\r
+ return 0;\r
+}\r
+\r
+static struct vm_scheduler_impl host_sched_impl = {\r
+ .name = "host",\r
+ .init = NULL,\r
+ .deinit = NULL,\r
+ .vm_init = host_sched_vm_init,\r
+ .vm_deinit = NULL,\r
+ .core_init = host_sched_core_init,\r
+ .core_deinit = NULL,\r
+ .schedule = host_sched_schedule,\r
+ .yield = host_sched_yield,\r
+ .admit = host_sched_admit,\r
+ .remap = NULL,\r
+ .dvfs=NULL\r
+};\r
+\r
+static int create_host_scheduler()\r
+{\r
+ v3_register_scheduler(&host_sched_impl);\r
+ return 0;\r
+}\r