From: Oscar Mondragon Date: Mon, 11 Feb 2013 21:19:30 +0000 (-0700) Subject: Update of scheduling infrastructure and newest version of EDF scheduler X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?p=palacios.git;a=commitdiff_plain;h=194c3ac8da18934654586e121a0facfb36b4bc1f Update of scheduling infrastructure and newest version of EDF scheduler --- diff --git a/Kconfig b/Kconfig index 1d21516..5edd454 100644 --- a/Kconfig +++ b/Kconfig @@ -357,6 +357,13 @@ config DEBUG_TIME help This turns on debugging of system time virtualization +config DEBUG_SCHEDULER + bool "Scheduler" + default n + depends on DEBUG_ON + help + This turns on debugging for scheduler + config DEBUG_IO bool "IO" default n diff --git a/palacios/include/palacios/vmm_scheduler.h b/palacios/include/palacios/vmm_scheduler.h index 3effce5..6f53010 100644 --- a/palacios/include/palacios/vmm_scheduler.h +++ b/palacios/include/palacios/vmm_scheduler.h @@ -1,64 +1,65 @@ -/* - * This file is part of the Palacios Virtual Machine Monitor developed - * by the V3VEE Project with funding from the United States National - * Science Foundation and the Department of Energy. - * - * The V3VEE Project is a joint project between Northwestern University - * and the University of New Mexico. You can find out more at - * http://www.v3vee.org - * - * Copyright (c) 2013, Oscar Mondragon - * Copyright (c) 2013, The V3VEE Project - * All rights reserved. - * - * Author: Oscar Mondragon - * Patrick G. Bridges - * - * This is free software. You are permitted to use, - * redistribute, and modify it as specified in the file "V3VEE_LICENSE". - */ - -#ifndef __VMM_SCHEDULER_H__ -#define __VMM_SCHEDULER_H__ - -struct vm_scheduler_impl { - char *name; - int (*init)(); - int (*deinit)(); - int (*vm_init)(struct v3_vm_info *vm); - int (*vm_deinit)(struct v3_vm_info *vm); - int (*core_init)(struct guest_info *vm); - int (*core_deinit)(struct guest_info *vm); - void (*schedule)(struct guest_info *vm); - void (*yield)(struct guest_info *vm, int usec); - int (*admit)(struct v3_vm_info *vm); - int (*remap)(struct v3_vm_info *vm); - int (*dvfs)(struct v3_vm_info *vm); -}; - -struct vm_sched_state { - struct vm_scheduler *sched; - void *priv_data; -}; - -struct vm_core_sched_state { - struct vm_scheduler *sched; - void *priv_data; -}; - -void v3_schedule(struct guest_info *core); -void v3_yield(struct guest_info *core, int usec); - -int v3_scheduler_register_vm(struct v3_vm_info *vm); -int v3_scheduler_register_core(struct guest_info *vm); /* ? */ -int v3_scheduler_admit_vm(struct v3_vm_info *vm); - -void v3_scheduler_remap_notify(struct v3_vm_info *vm); -void v3_scheduler_dvfs_notify(struct v3_vm_info *vm); - -int V3_init_scheduling(); -int v3_register_scheduler(struct vm_scheduler_impl *vm); -struct vm_scheduler_impl *v3_scheduler_lookup(char *name); -int V3_enable_scheduler(); - -#endif /* __VMM_SCHEDULER_H__ */ +/* + * This file is part of the Palacios Virtual Machine Monitor developed + * by the V3VEE Project with funding from the United States National + * Science Foundation and the Department of Energy. + * + * The V3VEE Project is a joint project between Northwestern University + * and the University of New Mexico. You can find out more at + * http://www.v3vee.org + * + * Copyright (c) 2013, Oscar Mondragon + * Copyright (c) 2013, Patrick G. Bridges + * Copyright (c) 2013, The V3VEE Project + * All rights reserved. + * + * Author: Oscar Mondragon + * Patrick G. Bridges + * + * This is free software. You are permitted to use, + * redistribute, and modify it as specified in the file "V3VEE_LICENSE". + */ + +#ifndef __VMM_SCHEDULER_H__ +#define __VMM_SCHEDULER_H__ + +struct vm_scheduler_impl { + char *name; + int (*init)(); + int (*deinit)(); + int (*vm_init)(struct v3_vm_info *vm); + int (*vm_deinit)(struct v3_vm_info *vm); + int (*core_init)(struct guest_info *vm); + int (*core_deinit)(struct guest_info *vm); + void (*schedule)(struct guest_info *vm); + void (*yield)(struct guest_info *vm, int usec); + int (*admit)(struct v3_vm_info *vm); + int (*remap)(struct v3_vm_info *vm); + int (*dvfs)(struct v3_vm_info *vm); +}; + +//struct vm_sched_state { +// struct vm_scheduler *sched; +// void *priv_data; +//}; + +//struct vm_core_sched_state { +// struct vm_scheduler *sched; +// void *priv_data; +//}; + +void v3_schedule(struct guest_info *core); +void v3_yield(struct guest_info *core, int usec); + +int v3_scheduler_register_vm(struct v3_vm_info *vm); +int v3_scheduler_register_core(struct guest_info *vm); /* ? */ +int v3_scheduler_admit_vm(struct v3_vm_info *vm); + +void v3_scheduler_remap_notify(struct v3_vm_info *vm); +void v3_scheduler_dvfs_notify(struct v3_vm_info *vm); + +int V3_init_scheduling(); +int v3_register_scheduler(struct vm_scheduler_impl *vm); +struct vm_scheduler_impl *v3_scheduler_lookup(char *name); +int V3_enable_scheduler(); + +#endif /* __VMM_SCHEDULER_H__ */ diff --git a/palacios/src/extensions/Kconfig b/palacios/src/extensions/Kconfig index a8beae6..000d99f 100644 --- a/palacios/src/extensions/Kconfig +++ b/palacios/src/extensions/Kconfig @@ -12,7 +12,7 @@ config EXT_MACH_CHECK help Provides a virtualized machine-check architecture -config DEGUB_EXT_MACH_CHECK +config DEBUG_EXT_MACH_CHECK bool "Debug machine-check functionality" default n depends on EXT_MACH_CHECK @@ -37,4 +37,9 @@ config EXT_SCHED_EDF help Provides a full real-time EDF scheduler for VM cores +config DEBUG_EXT_SCHED_EDF + bool "Debugging for EDF Real-time Scheduler" + default n + depends on DEBUG_ON && EXT_SCHED_EDF + endmenu diff --git a/palacios/src/extensions/ext_sched_edf.c b/palacios/src/extensions/ext_sched_edf.c index 953fa4e..b09b463 100644 --- a/palacios/src/extensions/ext_sched_edf.c +++ b/palacios/src/extensions/ext_sched_edf.c @@ -8,6 +8,7 @@ * http://www.v3vee.org * * Copyright (c) 2013, Oscar Mondragon + * Copyright (c) 2013, Patrick G. Bridges * Copyright (c) 2013, The V3VEE Project * All rights reserved. * @@ -25,8 +26,7 @@ #include #include #include -#include - +#include #ifndef V3_CONFIG_DEBUG_EDF_SCHED @@ -59,6 +59,65 @@ #define MAX_SLICE 1000000000 #define MIN_SLICE 10000 #define CPU_PERCENT 100 +typedef uint64_t time_us; + +/* + * Per-core EDF Scheduling information + */ + +struct vm_core_edf_sched { + struct guest_info *info; // Core struct + struct rb_node node; // red-black tree node + time_us period; // Amount of time (us) during which the core may received a CPU allocation + time_us slice; // Minimum amount of time (us) received for the core during each period + time_us current_deadline; // Time (us) at which current core period ends + time_us used_time; // Amount of time (us) of the slice used whiting the current period + time_us last_wakeup_time; // Time at which the last wakeup started for this core + time_us remaining_time; // Remaining time (us) before current core period ends (before current deadline) + bool extra_time; // Specifies if the virtual core is eligible to receive extra CPU time + int miss_deadline; // Number of times the core has missed its deadline + time_us total_time; // Total scheduled time for this core. For now used for debugging purposes + int slice_overuse; // Statistical purposes + time_us extra_time_given; // Statistical +}; + +/* + * Scheduler configuration + */ + +struct vm_edf_sched_config { + time_us min_slice; // Minimum allowed slice + time_us max_slice; // Maximum allowed slice + time_us min_period; // Minimum allowed period + time_us max_period; // Maximum allowed period + int cpu_percent; // Percentange of CPU utilization for the scheduler in each physical CPU (100 or less) + +}; + +/* + * Run queue structure. Per-logical core data structure used to keep the runnable virtual cores (threads) allocated to that logical core + * Contains a pointer to the red black tree, the structure of configuration options and other info + */ + +struct vm_edf_rq{ + + //int cpu_id; // Physical CPU id + int cpu_u; // CPU utilization (must be less or equal to the cpu_percent in vm_edf_sched_config) + struct rb_root vCPUs_tree; // Red-Black Tree + struct vm_edf_sched_config edf_config; // Scheduling configuration structure + int nr_vCPU; // Number of cores in the runqueue + struct vm_core_edf_sched *curr_vCPU; // Current running CPU + struct rb_node *rb_leftmost; // vCPU with the earliest deadline (leftmost in the tree) + time_us last_sched_time; // statistical purposes +}; + +/* + * Basic functions for scheduling + */ + +int v3_init_edf_scheduling(); + + /* @@ -77,31 +136,30 @@ init_edf_config(struct vm_edf_sched_config *edf_config){ /* - * edf_sched_init: Initialize the run queue + * priv_data_init: Initialize the run queue */ int -edf_sched_init(struct v3_vm_info *vm){ +priv_data_init(struct v3_vm_info *vm){ - PrintDebug(vm, VCORE_NONE,"EDF Sched. Initializing vm %s\n", vm->name); + PrintDebug(vm, VCORE_NONE,"EDF Sched. Initializing EDF Scheduling \n"); - struct vm_sched_state *sched_state = &vm->sched; - sched_state->priv_data = V3_Malloc( vm->avail_cores * sizeof(struct vm_edf_rq)); + vm->sched_priv_data = V3_Malloc( vm->avail_cores * sizeof(struct vm_edf_rq)); - if (!sched_state->priv_data) { - PrintError(vm, VCORE_NONE,"Cannot allocate in priv_data in edf_sched_init\n"); + if (!vm->sched_priv_data) { + PrintError(vm, VCORE_NONE,"Cannot allocate in priv_data in priv_data_init\n"); return -1; } int lcore = 0; - PrintDebug(vm, VCORE_NONE,"EDF Sched. edf_sched_init. Available cores %d\n", vm->avail_cores); + PrintDebug(vm, VCORE_NONE,"EDF Sched. priv_data_init. Available cores %d\n", vm->avail_cores); for(lcore = 0; lcore < vm->avail_cores ; lcore++){ - PrintDebug(vm, VCORE_NONE,"EDF Sched. edf_sched_init. Initializing logical core %d\n", lcore); + PrintDebug(vm, VCORE_NONE,"EDF Sched. priv_data_init. Initializing logical core %d\n", lcore); - struct vm_edf_rq * edf_rq_list = (struct vm_edf_rq *) sched_state->priv_data; + struct vm_edf_rq * edf_rq_list = (struct vm_edf_rq *)vm->sched_priv_data; struct vm_edf_rq * edf_rq = &edf_rq_list[lcore]; edf_rq->vCPUs_tree = RB_ROOT; @@ -118,7 +176,6 @@ edf_sched_init(struct v3_vm_info *vm){ } - /* * is_admissible_core: Decides if a core is admited to the red black tree according with * the admisibility formula. @@ -136,6 +193,7 @@ is_admissible_core(struct vm_core_edf_sched * new_sched_core, struct vm_edf_rq * else return false; +return true; } @@ -235,7 +293,7 @@ next_start_period(uint64_t curr_time_us, uint64_t period_us){ struct vm_edf_rq * get_runqueue(struct guest_info *info){ - struct vm_edf_rq *runqueue_list = (struct vm_edf_rq *) info->vm_info->sched.priv_data; + struct vm_edf_rq *runqueue_list = (struct vm_edf_rq *) info->vm_info->sched_priv_data; struct vm_edf_rq *runqueue = &runqueue_list[info->pcpu_id]; return runqueue; } @@ -248,7 +306,7 @@ struct vm_edf_rq * get_runqueue(struct guest_info *info){ static void wakeup_core(struct guest_info *info){ - struct vm_core_edf_sched *core = info->core_sched.priv_data; + struct vm_core_edf_sched *core = info->sched_priv_data; struct vm_edf_rq *runqueue = get_runqueue(info); if (!info->core_thread) { @@ -350,7 +408,7 @@ edf_sched_core_init(struct guest_info * info){ PrintError(info->vm_info, info,"Cannot allocate private_data in edf_sched_core_init\n"); return -1; } - info->core_sched.priv_data = core_edf; + info->sched_priv_data = core_edf; // Default configuration if not specified in configuration file @@ -509,7 +567,7 @@ pick_next_core(struct vm_edf_rq *runqueue){ static void adjust_slice(struct guest_info * info, int used_time, int extra_time) { - struct vm_core_edf_sched *core = info->core_sched.priv_data; + struct vm_core_edf_sched *core = info->sched_priv_data; struct vm_edf_rq *runqueue = get_runqueue(info); core->used_time = used_time; @@ -532,7 +590,7 @@ adjust_slice(struct guest_info * info, int used_time, int extra_time) static void run_next_core(struct guest_info *info, int used_time, int usec) { - struct vm_core_edf_sched *core = info->core_sched.priv_data; + struct vm_core_edf_sched *core = info->sched_priv_data; struct vm_core_edf_sched *next_core; struct vm_edf_rq *runqueue = get_runqueue(info); @@ -571,7 +629,7 @@ edf_schedule(struct guest_info * info, int usec){ uint64_t host_time = get_curr_host_time(&info->time_state); struct vm_edf_rq *runqueue = get_runqueue(info); - struct vm_core_edf_sched *core = (struct vm_core_edf_sched *) info->core_sched.priv_data; + struct vm_core_edf_sched *core = (struct vm_core_edf_sched *) info->sched_priv_data; uint64_t used_time = 0; if(core->last_wakeup_time != 0) @@ -625,13 +683,8 @@ edf_sched_yield(struct guest_info * info, int usec){ int edf_sched_deinit(struct v3_vm_info *vm) { - - struct vm_scheduler * sched = vm->sched.sched; - void *priv_data = vm->sched.priv_data; + void *priv_data = vm->sched_priv_data; - if (sched) - V3_Free(sched); - if (priv_data) V3_Free(priv_data); @@ -646,32 +699,53 @@ edf_sched_deinit(struct v3_vm_info *vm) int edf_sched_core_deinit(struct guest_info *core) { - - struct vm_scheduler * sched = core->core_sched.sched; - void *priv_data = core->core_sched.priv_data; + void *priv_data = core->sched_priv_data; - if (sched) - V3_Free(sched); - if (priv_data) V3_Free(priv_data); return 0; } +int edf_sched_vm_init(struct v3_vm_info *vm){ + return 0; +} + +int edf_sched_admit(struct v3_vm_info *vm){ + + /* + * Initialize priv_data for the vm: + * For EDF this is done here because we need the parameter + * avail_core which is set in v3_start_vm before the + * v3_scheduler_admit_vm function is called. + */ + + priv_data_init(vm); + + // TODO Admission + + return 0; +} + + static struct vm_scheduler_impl edf_sched = { - .name = "edf", - .init = edf_sched_init, - .deinit = edf_sched_deinit, - .core_init = edf_sched_core_init, - .core_deinit = edf_sched_core_deinit, - .schedule = edf_sched_schedule, - .yield = edf_sched_yield + + .name = "edf", + .init = NULL, + .deinit = NULL, + .vm_init = edf_sched_vm_init, + .vm_deinit = NULL, + .core_init = edf_sched_core_init, + .core_deinit = edf_sched_core_deinit, + .schedule = edf_sched_schedule, + .yield = edf_sched_yield, + .admit = edf_sched_admit, + .remap = NULL, + .dvfs=NULL }; static int ext_sched_edf_init() { - PrintDebug(VM_NONE, VCORE_NONE,"Sched. Creating (%s) scheduler\n",edf_sched.name); return v3_register_scheduler(&edf_sched); } diff --git a/palacios/src/palacios/vmm_scheduler.c b/palacios/src/palacios/vmm_scheduler.c index 0739575..e459244 100644 --- a/palacios/src/palacios/vmm_scheduler.c +++ b/palacios/src/palacios/vmm_scheduler.c @@ -1,239 +1,240 @@ -/* - * This file is part of the Palacios Virtual Machine Monitor developed - * by the V3VEE Project with funding from the United States National - * Science Foundation and the Department of Energy. - * - * The V3VEE Project is a joint project between Northwestern University - * and the University of New Mexico. You can find out more at - * http://www.v3vee.org - * - * Copyright (c) 2013, Oscar Mondragon - * Copyright (c) 2013, The V3VEE Project - * All rights reserved. - * - * Author: Oscar Mondragon - * Patrick G. Bridges - * - * This is free software. You are permitted to use, - * redistribute, and modify it as specified in the file "V3VEE_LICENSE". - */ - -#include -#include -#include -#include - -#ifndef V3_CONFIG_DEBUG_SCHEDULER -#undef PrintDebug -#define PrintDebug(fmt, args...) -#endif - -static char default_strategy[] = "host"; -static struct hashtable * master_scheduler_table = NULL; -static int create_host_scheduler(); - -static struct vm_scheduler_impl *scheduler = NULL; - -static uint_t scheduler_hash_fn(addr_t key) { - char * name = (char *)key; - return v3_hash_buffer((uint8_t *)name, strlen(name)); -} - -static int scheduler_eq_fn(addr_t key1, addr_t key2) { - char * name1 = (char *)key1; - char * name2 = (char *)key2; - - return (strcmp(name1, name2) == 0); -} - -int V3_init_scheduling() { - - PrintDebug(VM_NONE, VCORE_NONE,"Initializing scheduler"); - - master_scheduler_table = v3_create_htable(0, scheduler_hash_fn, scheduler_eq_fn); - return create_host_scheduler(); -} - - -int v3_register_scheduler(struct vm_scheduler_impl *s) { - - PrintDebug(VM_NONE, VCORE_NONE,"Registering Scheduler (%s)\n", s->name); - - if (v3_htable_search(master_scheduler_table, (addr_t)(s->name))) { - PrintError(VM_NONE, VCORE_NONE, "Multiple instances of scheduler (%s)\n", s->name); - return -1; - } - PrintDebug(VM_NONE, VCORE_NONE,"Registering Scheduler (%s) 2\n", s->name); - - - if (v3_htable_insert(master_scheduler_table, - (addr_t)(s->name), - (addr_t)(s)) == 0) { - PrintError(VM_NONE, VCORE_NONE, "Could not register scheduler (%s)\n", s->name); - return -1; - } - - PrintDebug(VM_NONE, VCORE_NONE,"Scheduler registered\n"); - return 0; -} - -struct vm_scheduler_impl *v3_scheduler_lookup(char *name) -{ - return (struct vm_scheduler_impl *)v3_htable_search(master_scheduler_table, (addr_t)(name)); -} - -int V3_enable_scheduler() { - /* XXX Lookup the specified scheduler to use for palacios and use it */ - scheduler = v3_scheduler_lookup(default_strategy); - if (!scheduler) { - PrintError(VM_NONE, VCORE_NONE,"Specified Palacios scheduler \"%s\" not found.\n", default_strategy); - return -1; - } - if (scheduler->init) { - return scheduler->init(); - } else { - return 0; - } -} - -int v3_scheduler_register_vm(struct v3_vm_info *vm) { - if (scheduler->vm_init) { - return scheduler->vm_init(vm); - } else { - return 0; - } -} -int v3_scheduler_register_core(struct guest_info *core) { - if (scheduler->core_init) { - return scheduler->core_init(core); - } else { - return 0; - } -} -int v3_scheduler_admit_vm(struct v3_vm_info *vm) { - if (scheduler->admit) { - return scheduler->admit(vm); - } else { - return 0; - } -} -int v3_scheduler_notify_remap(struct v3_vm_info *vm) { - if (scheduler->remap) { - return scheduler->remap(vm); - } else { - return 0; - } -} -int v3_scheduler_notify_dvfs(struct v3_vm_info *vm) { - if (scheduler->dvfs) { - return scheduler->dvfs(vm); - } else { - return 0; - } -} -void v3_schedule(struct guest_info *core) { - if (scheduler->schedule) { - scheduler->schedule(core); - } - return; -} -void v3_yield(struct guest_info *core, int usec) { - if (scheduler->yield) { - scheduler->yield(core, usec); - } - return; -} - -int host_sched_vm_init(struct v3_vm_info *vm) -{ - - PrintDebug(vm, VCORE_NONE,"Sched. host_sched_init\n"); - - char * schedule_hz_str = v3_cfg_val(vm->cfg_data->cfg, "schedule_hz"); - uint32_t sched_hz = 100; - - - if (schedule_hz_str) { - sched_hz = atoi(schedule_hz_str); - } - - PrintDebug(vm, VCORE_NONE,"CPU_KHZ = %d, schedule_freq=%p\n", V3_CPU_KHZ(), - (void *)(addr_t)sched_hz); - - uint64_t yield_cycle_period = (V3_CPU_KHZ() * 1000) / sched_hz; - vm->sched_priv_data = (void *)yield_cycle_period; - - return 0; -} - -int host_sched_core_init(struct guest_info *core) -{ - PrintDebug(core->vm_info, core,"Sched. host_sched_core_init\n"); - - uint64_t t = v3_get_host_time(&core->time_state); - core->sched_priv_data = (void *)t; - - return 0; -} - -void host_sched_schedule(struct guest_info *core) -{ - uint64_t cur_cycle; - cur_cycle = v3_get_host_time(&core->time_state); - - if (cur_cycle > ( (uint64_t)core->sched_priv_data + (uint64_t)core->vm_info->sched_priv_data)) { - - V3_Yield(); - - uint64_t yield_start_cycle = (uint64_t) core->sched_priv_data; - yield_start_cycle += (uint64_t)core->vm_info->sched_priv_data; - core->sched_priv_data = (void *)yield_start_cycle; - - } -} - -/* - * unconditional cpu yield - * if the yielding thread is a guest context, the guest quantum is reset on resumption - * Non guest context threads should call this function with a NULL argument - * - * usec <0 => the non-timed yield is used - * usec >=0 => the timed yield is used, which also usually implies interruptible - */ -void host_sched_yield(struct guest_info * core, int usec) { - uint64_t yield_start_cycle; - if (usec < 0) { - V3_Yield(); - } else { - V3_Sleep(usec); - } - yield_start_cycle = (uint64_t) core->sched_priv_data - + (uint64_t)core->vm_info->sched_priv_data; - core->sched_priv_data = (void *)yield_start_cycle; -} - - -int host_sched_admit(struct v3_vm_info *vm){ - return 0; -} - -static struct vm_scheduler_impl host_sched_impl = { - .name = "host", - .init = NULL, - .deinit = NULL, - .vm_init = host_sched_vm_init, - .vm_deinit = NULL, - .core_init = host_sched_core_init, - .core_deinit = NULL, - .schedule = host_sched_schedule, - .yield = host_sched_yield, - .admit = host_sched_admit, - .remap = NULL, - .dvfs=NULL -}; - -static int create_host_scheduler() -{ - v3_register_scheduler(&host_sched_impl); - return 0; -} +/* + * This file is part of the Palacios Virtual Machine Monitor developed + * by the V3VEE Project with funding from the United States National + * Science Foundation and the Department of Energy. + * + * The V3VEE Project is a joint project between Northwestern University + * and the University of New Mexico. You can find out more at + * http://www.v3vee.org + * + * Copyright (c) 2013, Oscar Mondragon + * Copyright (c) 2013, Patrick G. Bridges + * Copyright (c) 2013, The V3VEE Project + * All rights reserved. + * + * Author: Oscar Mondragon + * Patrick G. Bridges + * + * This is free software. You are permitted to use, + * redistribute, and modify it as specified in the file "V3VEE_LICENSE". + */ + +#include +#include +#include +#include + +#ifndef V3_CONFIG_DEBUG_SCHEDULER +#undef PrintDebug +#define PrintDebug(fmt, args...) +#endif + +static char default_strategy[] = "host"; +static struct hashtable * master_scheduler_table = NULL; +static int create_host_scheduler(); + +static struct vm_scheduler_impl *scheduler = NULL; + +static uint_t scheduler_hash_fn(addr_t key) { + char * name = (char *)key; + return v3_hash_buffer((uint8_t *)name, strlen(name)); +} + +static int scheduler_eq_fn(addr_t key1, addr_t key2) { + char * name1 = (char *)key1; + char * name2 = (char *)key2; + + return (strcmp(name1, name2) == 0); +} + +int V3_init_scheduling() { + + PrintDebug(VM_NONE, VCORE_NONE,"Initializing scheduler"); + + master_scheduler_table = v3_create_htable(0, scheduler_hash_fn, scheduler_eq_fn); + return create_host_scheduler(); +} + + +int v3_register_scheduler(struct vm_scheduler_impl *s) { + + PrintDebug(VM_NONE, VCORE_NONE,"Registering Scheduler (%s)\n", s->name); + + if (v3_htable_search(master_scheduler_table, (addr_t)(s->name))) { + PrintError(VM_NONE, VCORE_NONE, "Multiple instances of scheduler (%s)\n", s->name); + return -1; + } + + if (v3_htable_insert(master_scheduler_table, + (addr_t)(s->name), + (addr_t)(s)) == 0) { + PrintError(VM_NONE, VCORE_NONE, "Could not register scheduler (%s)\n", s->name); + return -1; + } + + return 0; +} + +struct vm_scheduler_impl *v3_scheduler_lookup(char *name) +{ + return (struct vm_scheduler_impl *)v3_htable_search(master_scheduler_table, (addr_t)(name)); +} + +int V3_enable_scheduler() { + /* XXX Lookup the specified scheduler to use for palacios and use it */ + + scheduler = v3_scheduler_lookup(default_strategy); + PrintDebug(VM_NONE, VCORE_NONE,"Sched. Scheduler %s found",scheduler->name); + + if (!scheduler) { + PrintError(VM_NONE, VCORE_NONE,"Specified Palacios scheduler \"%s\" not found.\n", default_strategy); + return -1; + } + if (scheduler->init) { + return scheduler->init(); + } else { + return 0; + } +} + +int v3_scheduler_register_vm(struct v3_vm_info *vm) { + if (scheduler->vm_init) { + return scheduler->vm_init(vm); + } else { + return 0; + } +} +int v3_scheduler_register_core(struct guest_info *core) { + if (scheduler->core_init) { + return scheduler->core_init(core); + } else { + return 0; + } +} +int v3_scheduler_admit_vm(struct v3_vm_info *vm) { + if (scheduler->admit) { + return scheduler->admit(vm); + } else { + return 0; + } +} +int v3_scheduler_notify_remap(struct v3_vm_info *vm) { + if (scheduler->remap) { + return scheduler->remap(vm); + } else { + return 0; + } +} +int v3_scheduler_notify_dvfs(struct v3_vm_info *vm) { + if (scheduler->dvfs) { + return scheduler->dvfs(vm); + } else { + return 0; + } +} +void v3_schedule(struct guest_info *core) { + if (scheduler->schedule) { + scheduler->schedule(core); + } + return; +} +void v3_yield(struct guest_info *core, int usec) { + if (scheduler->yield) { + scheduler->yield(core, usec); + } + return; +} + +int host_sched_vm_init(struct v3_vm_info *vm) +{ + + PrintDebug(vm, VCORE_NONE,"Sched. host_sched_init\n"); + + char * schedule_hz_str = v3_cfg_val(vm->cfg_data->cfg, "schedule_hz"); + uint32_t sched_hz = 100; + + + if (schedule_hz_str) { + sched_hz = atoi(schedule_hz_str); + } + + PrintDebug(vm, VCORE_NONE,"CPU_KHZ = %d, schedule_freq=%p\n", V3_CPU_KHZ(), + (void *)(addr_t)sched_hz); + + uint64_t yield_cycle_period = (V3_CPU_KHZ() * 1000) / sched_hz; + vm->sched_priv_data = (void *)yield_cycle_period; + + return 0; +} + +int host_sched_core_init(struct guest_info *core) +{ + PrintDebug(core->vm_info, core,"Sched. host_sched_core_init\n"); + + uint64_t t = v3_get_host_time(&core->time_state); + core->sched_priv_data = (void *)t; + + return 0; +} + +void host_sched_schedule(struct guest_info *core) +{ + uint64_t cur_cycle; + cur_cycle = v3_get_host_time(&core->time_state); + + if (cur_cycle > ( (uint64_t)core->sched_priv_data + (uint64_t)core->vm_info->sched_priv_data)) { + + V3_Yield(); + + uint64_t yield_start_cycle = (uint64_t) core->sched_priv_data; + yield_start_cycle += (uint64_t)core->vm_info->sched_priv_data; + core->sched_priv_data = (void *)yield_start_cycle; + + } +} + +/* + * unconditional cpu yield + * if the yielding thread is a guest context, the guest quantum is reset on resumption + * Non guest context threads should call this function with a NULL argument + * + * usec <0 => the non-timed yield is used + * usec >=0 => the timed yield is used, which also usually implies interruptible + */ +void host_sched_yield(struct guest_info * core, int usec) { + uint64_t yield_start_cycle; + if (usec < 0) { + V3_Yield(); + } else { + V3_Sleep(usec); + } + yield_start_cycle = (uint64_t) core->sched_priv_data + + (uint64_t)core->vm_info->sched_priv_data; + core->sched_priv_data = (void *)yield_start_cycle; +} + + +int host_sched_admit(struct v3_vm_info *vm){ + return 0; +} + +static struct vm_scheduler_impl host_sched_impl = { + .name = "host", + .init = NULL, + .deinit = NULL, + .vm_init = host_sched_vm_init, + .vm_deinit = NULL, + .core_init = host_sched_core_init, + .core_deinit = NULL, + .schedule = host_sched_schedule, + .yield = host_sched_yield, + .admit = host_sched_admit, + .remap = NULL, + .dvfs=NULL +}; + +static int create_host_scheduler() +{ + v3_register_scheduler(&host_sched_impl); + return 0; +}