2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu>
11 * Copyright (c) 2013, Patrick G. Bridges <bridges@cs.unm.edu>
12 * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org>
13 * All rights reserved.
15 * Author: Oscar Mondragon <omondrag@cs.unm.edu>
16 * Patrick G. Bridges <bridges@cs.unm.edu>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 #include <palacios/vmm.h>
24 #include <palacios/vmm_time.h>
25 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_hashtable.h>
27 #include <palacios/vmm_config.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_rbtree.h>
32 #ifndef V3_CONFIG_DEBUG_EXT_SCHED_EDF
34 #define PrintDebug(fmt, args...)
41 * The EDF scheduler uses a dynamic calculated priority as scheduling criteria to choose
42 * what thread will be scheduled.That priority is calculated according with the relative
43 * deadline of the threads that are ready to run in the runqueue. This runqueue is a per-logical
44 * core data structure used to keep the runnable virtual cores (threads) allocated to that
45 * logical core.The threads with less time before its deadline will receive better priorities.
46 * The runqueue is sorted each time that a vCPU becomes runnable or when its period is over.
47 * At that time the vCPU is enqueue. A new scheduling decision is taken is time that an interruption
48 * is received. At that time if the earliest deadline core is different that the current core,
49 * the current core goes to sleep and the earliest deadline core is wake up. Each time a vCPU is scheduled,
50 * the parameter "used_time" is set to zero and the current deadline is calculated using its period.
51 * One vCPU uses at least slice seconds of CPU.Some extra time can be allocated to that virtual core after
52 * all the other virtual cores consumes their slices.
55 // Default configuration values for the EDF Scheduler
56 // time parameters in microseconds
58 #define MAX_PERIOD 1000000000
59 #define MIN_PERIOD 50000
60 #define MAX_SLICE 1000000000
61 #define MIN_SLICE 50000
62 #define CPU_PERCENT 80
63 #define DEADLINE_INTERVAL 30000000 // Period in which the missed deadline ratio is checked
65 typedef uint64_t time_us;
68 * Per-core EDF Scheduling information
71 struct vm_core_edf_sched {
72 struct guest_info *info; // Core data struct
73 struct rb_node node; // red-black tree node
74 time_us period; // Amount of time (us) during which the virtual core may received a CPU allocation
75 time_us slice; // Minimum amount of time (us) received for the virtual core during each period
76 time_us current_deadline; // Time (us) at which current virtual core period ends
77 time_us used_time; // Amount of time (us) used whiting the current period
78 time_us last_wakeup_time; // Time at which the last wakeup started for this virtual core
79 time_us remaining_time; // Remaining time (us) to reach the expected time allocated to the virtual core
80 bool extra_time; // Specifies if the virtual core is eligible to receive extra CPU time (For now it is true always)
81 uint64_t deadline; // Total number of deadlines
82 int deadline_percentage; // Percentage of total missed deadlines
83 uint64_t miss_deadline; // Number of times the core has missed deadlines
84 uint64_t deadline_interval; // Total number of deadlines in the latest interval
85 int deadline_percentage_interval; // Percentage of missed deadlines in the latest interval
86 uint64_t miss_deadline_interval; // Number of times the core has missed deadlines in the last interval
87 time_us print_deadline_interval; // Last time missed deadline ratio was printed
88 time_us total_time; // Total scheduled time for this virtual core.
89 int slice_overuse; // Number of times a core consumes extra time
90 time_us extra_time_given; // Total extra time given to a virtual core
91 time_us start_time; // Time at which this virtual core start to be scheduled
92 time_us expected_time; // Minimum CPU time expected to be allocated to this virtual core
97 * Scheduler configuration
100 struct vm_edf_sched_config {
101 time_us min_slice; // Minimum allowed slice
102 time_us max_slice; // Maximum allowed slice
103 time_us min_period; // Minimum allowed period
104 time_us max_period; // Maximum allowed period
105 int cpu_percent; // Percentange of CPU utilization for the scheduler in each physical CPU (100 or less)
110 * Run queue structure. Per-logical core data structure used to keep the runnable virtual cores (threads) allocated to that logical core
111 * Contains a pointer to the red black tree, the data structure of configuration options, and other info
116 int cpu_u; // CPU utilization (must be less or equal to the cpu_percent in vm_edf_sched_config)
117 struct rb_root vCPUs_tree; // Red-Black Tree
118 struct vm_edf_sched_config edf_config; // Scheduling configuration structure
119 int nr_vCPU; // Number of cores in the runqueue
120 struct vm_core_edf_sched *curr_vCPU; // Current running CPU
121 time_us last_sched_time; // Last time a virtual core in this runqueue was scheduled
122 time_us check_deadline_time; // Last time in which the deadlines were recalculated
123 time_us smallest_period; // Smallest period of the virtual cores in the runqueue, used to control the period of deadlines recalculation
124 time_us print_time; // Last time of debugging print
125 time_us start_time; // Time at which first core started running in this runqueue
126 int sched_low; // Incremented when the time between interruptions is large
127 bool yielded; // CPU yielded to palacios (No core running)
131 * Basic functions for scheduling
134 int v3_init_edf_scheduling();
141 print_parameters(time_us host_time, struct vm_edf_rq * runqueue, struct vm_core_edf_sched * core){
143 time_us elapsed_time = 0;
144 if(runqueue->start_time != 0)
145 elapsed_time = host_time - runqueue->start_time;
147 PrintDebug(core->info->vm_info, core->info,"Test: %llu %llu %d %d %llu %llu %llu %d\n",
155 core->deadline_percentage);
157 runqueue->print_time = host_time;
162 * init_edf_config: Initialize scheduler configuration
166 init_edf_config(struct vm_edf_sched_config *edf_config){
168 edf_config->min_slice = MIN_SLICE;
169 edf_config->max_slice = MAX_SLICE;
170 edf_config->min_period = MIN_PERIOD;
171 edf_config->max_period = MAX_PERIOD;
172 edf_config->cpu_percent = CPU_PERCENT;
177 * priv_data_init: Initialize the run queue
181 priv_data_init(struct v3_vm_info *vm){
183 PrintDebug(vm, VCORE_NONE,"EDF Sched. Initializing EDF Scheduling \n");
185 vm->sched_priv_data = V3_Malloc( vm->avail_cores * sizeof(struct vm_edf_rq));
187 if (!vm->sched_priv_data) {
188 PrintError(vm, VCORE_NONE,"Cannot allocate in priv_data in priv_data_init\n");
194 PrintDebug(vm, VCORE_NONE,"EDF Sched. priv_data_init. Available cores %d\n", vm->avail_cores);
196 for(lcore = 0; lcore < vm->avail_cores ; lcore++){
198 PrintDebug(vm, VCORE_NONE,"EDF Sched. priv_data_init. Initializing logical core %d\n", lcore);
200 struct vm_edf_rq * edf_rq_list = (struct vm_edf_rq *)vm->sched_priv_data;
201 struct vm_edf_rq * edf_rq = &edf_rq_list[lcore];
203 edf_rq->vCPUs_tree = RB_ROOT;
206 edf_rq->curr_vCPU=NULL;
207 edf_rq->last_sched_time=0;
208 edf_rq->check_deadline_time=0;
210 edf_rq->smallest_period=0;
211 edf_rq->start_time = 0;
212 edf_rq->yielded=false;
213 init_edf_config(&edf_rq->edf_config);
222 * is_admissible_core: Decides if a core is admited to the red black tree according with
223 * the admisibility formula.
227 is_admissible_core(struct vm_core_edf_sched * new_sched_core, struct vm_edf_rq *runqueue){
229 int curr_utilization = runqueue->cpu_u;
230 int new_utilization = curr_utilization + (100 * new_sched_core->slice / new_sched_core->period);
231 int cpu_percent = (runqueue->edf_config).cpu_percent;
233 if (new_utilization <= cpu_percent)
244 * insert_core_edf: Finds a place in the tree for a newly activated core, adds the node
245 * and rebalaces the tree
249 insert_core_edf(struct vm_core_edf_sched *core, struct vm_edf_rq *runqueue){
251 struct rb_node **new_core = &(runqueue->vCPUs_tree.rb_node);
252 struct rb_node *parent = NULL;
253 struct vm_core_edf_sched *curr_core;
255 // Find out place in the tree for the new core
258 curr_core = container_of(*new_core, struct vm_core_edf_sched, node);
261 if (core->current_deadline < curr_core->current_deadline)
262 new_core = &((*new_core)->rb_left);
263 else if (core->current_deadline > curr_core->current_deadline)
264 new_core = &((*new_core)->rb_right);
265 else // Is Possible to have same current deadlines in both cores!
268 // Add new node and rebalance tree.
269 rb_link_node(&core->node, parent, new_core);
270 v3_rb_insert_color(&core->node, &runqueue->vCPUs_tree);
277 * get_curr_host_time: Calculates the current host time (microseconds)
281 get_curr_host_time(struct vm_core_time *core_time){
283 uint64_t cur_cycle = v3_get_host_time(core_time);
284 uint64_t cpu_khz = core_time->host_cpu_freq;
285 uint64_t curr_time_us = 1000 * cur_cycle / cpu_khz;
293 * count_cores: Function useful to count the number of cores in a runqueue (Not used for now)
298 static int count_cores(struct vm_edf_rq *runqueue){
300 struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
301 struct vm_core_edf_sched *curr_core;
302 int number_cores = 0;
307 curr_core = container_of(node, struct vm_core_edf_sched, node);
308 time_us host_time = get_curr_host_time(&curr_core->info->time_state);
309 PrintDebug(VM_NONE,VCORE_NONE,"Count %d. core %d, used time %llu, deadline %llu, host time %llu extra_time %llu\n",
311 curr_core->info->vcpu_id,
312 curr_core->used_time,
313 curr_core->current_deadline,
315 curr_core->extra_time_given);
316 node = v3_rb_next(node);
325 * next_start_period: Given the current host time and the period of a given vCPU,
326 * calculates the time in which its next period starts.
331 next_start_period(uint64_t curr_time_us, uint64_t period_us){
333 uint64_t time_period_us = curr_time_us % period_us;
334 uint64_t remaining_time_us = period_us - time_period_us;
335 uint64_t next_start_us = curr_time_us + remaining_time_us;
337 return next_start_us;
342 * get_runqueue: Get the runqueue assigned to a virtual core.
345 struct vm_edf_rq * get_runqueue(struct guest_info *info){
347 struct vm_edf_rq *runqueue_list = (struct vm_edf_rq *) info->vm_info->sched_priv_data;
348 struct vm_edf_rq *runqueue = &runqueue_list[info->pcpu_id];
354 * wakeup_core: Wakeup a given vCPU thread
358 wakeup_core(struct guest_info *info){
360 struct vm_core_edf_sched *core = info->sched_priv_data;
361 struct vm_edf_rq *runqueue = get_runqueue(info);
362 time_us host_time = get_curr_host_time(&core->info->time_state);
364 if (!info->core_thread) {
365 PrintError(info->vm_info, info,"ERROR: Tried to wakeup non-existent core thread vCPU_id %d \n",info->vcpu_id);
369 V3_Wakeup(info->core_thread);
370 if(core->start_time == 0){
371 core->start_time = host_time;
372 print_parameters(host_time, runqueue, core);
374 core->last_wakeup_time = host_time;
375 runqueue->curr_vCPU = core;
383 * activate_core - Moves a core to the red-black tree.
384 * used time is set to zero and current deadline is calculated
388 activate_core(struct vm_core_edf_sched * core, struct vm_edf_rq *runqueue){
391 if (is_admissible_core(core, runqueue)){
393 uint64_t curr_time_us = get_curr_host_time(&core->info->time_state);
394 uint64_t curr_deadline = next_start_period(curr_time_us, core->period);
396 core->current_deadline = curr_deadline;
399 bool ins = insert_core_edf(core, runqueue);
401 * If not inserted is possible that there is other core with the same deadline.
402 * Then, the deadline is modified and try again
405 core->current_deadline ++;
406 ins = insert_core_edf(core, runqueue);
409 runqueue->cpu_u += 100 * core->slice / core->period;
410 runqueue->nr_vCPU ++;
412 if(runqueue->nr_vCPU == 1){
413 runqueue->smallest_period = core->period;
416 else if(core->period < runqueue->smallest_period){
417 runqueue->smallest_period = core->period;
422 * If this is the first time to be activated pick first earliest deadline core to wakeup.
425 if(!runqueue->curr_vCPU){
427 struct vm_core_edf_sched *next_core;
428 // Pick first earliest deadline core
430 struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
431 next_core = container_of(node, struct vm_core_edf_sched, node);
433 PrintDebug(VM_NONE, VCORE_NONE,"EDF Sched. First time activation, core %d, time %llu \n", next_core->info->vcpu_id, curr_time_us);
435 // Wakeup next_core (It is not neccessary to wakeup since in this case this is the first core launched by Palacios
436 // however wakeup_core is called to initialize some needed parameters.
438 wakeup_core(next_core->info);
439 next_core->start_time = get_curr_host_time(&next_core->info->time_state);
440 print_parameters(curr_time_us, runqueue, next_core);
441 runqueue->start_time = next_core->start_time;
448 PrintError(core->info->vm_info, core->info,"EDF Sched. activate_core. CPU cannot activate the core. It is not admissible");
454 #define CEIL_DIV(x,y) (((x)/(y)) + !!((x)%(y)))
455 #define MAX(x,y) ((x)>(y) ? (x) : (y))
458 * edf_sched_core_init: Initializes per core data structure and
459 * calls activate function.
464 edf_sched_core_init(struct guest_info * info){
466 struct vm_edf_rq *runqueue = get_runqueue(info);
467 struct vm_core_edf_sched *core_edf;
468 struct v3_time *vm_ts = &(info->vm_info->time_state);
469 uint32_t tdf = vm_ts->td_denom;
470 uint_t cpu_khz = V3_CPU_KHZ();
472 PrintDebug(info->vm_info, info,"EDF Sched. Initializing vcore %d, tdf %d\n", info->vcpu_id,tdf);
474 core_edf = (struct vm_core_edf_sched *) V3_Malloc(sizeof (struct vm_core_edf_sched));
476 PrintError(info->vm_info, info,"Cannot allocate private_data in edf_sched_core_init\n");
479 info->sched_priv_data = core_edf;
481 // Default configuration if not specified in configuration file
483 core_edf->info = info;
484 core_edf->period = MIN_PERIOD;
485 core_edf->slice = MIN_SLICE;
486 core_edf->used_time = 0;
487 core_edf->last_wakeup_time = 0;
488 core_edf->remaining_time = 0;
489 core_edf->deadline = 0;
490 core_edf->miss_deadline = 0;
491 core_edf->extra_time = true;
492 core_edf->total_time = 0;
493 core_edf->slice_overuse = 0;
494 core_edf->extra_time_given = 0;
495 core_edf->expected_time = 0;
496 core_edf->start_time = 0;
497 core_edf->deadline_interval = 0;
498 core_edf->deadline_percentage_interval = 0;
499 core_edf->miss_deadline_interval = 0;
500 core_edf->print_deadline_interval = 0;
503 v3_cfg_tree_t * cfg_tree = core_edf->info->vm_info->cfg_data->cfg;
504 v3_cfg_tree_t * core = v3_cfg_subtree(v3_cfg_subtree(cfg_tree, "cores"), "core");
507 char *id = v3_cfg_val(core, "vcpu_id");
508 char *period = v3_cfg_val(core, "period");
509 char *slice = v3_cfg_val(core, "slice");
510 char *extra_time = v3_cfg_val(core, "extra_time");
511 char *speed = v3_cfg_val(core, "khz");
512 uint_t speed_khz = cpu_khz;
514 if (atoi(id) == core_edf->info->vcpu_id){
517 speed_khz = atoi(speed);
521 core_edf->slice = atoi(slice);
522 core_edf->slice = MAX(MIN_SLICE,core_edf->slice);
525 core_edf->slice = MIN_SLICE;
529 core_edf->period = atoi(period);
530 core_edf->period = MAX(MIN_PERIOD,core_edf->period);
533 core_edf->period = (core_edf->slice * cpu_khz * tdf)/speed_khz;
534 // WTF is this floating point doing here?!
535 // core_edf->period += 0.3*(100*core_edf->slice/core_edf->period); // Give faster vcores a little more bigger periods.
536 core_edf->period += CEIL_DIV(100*core_edf->slice/core_edf->period,3); // Give faster vcores a little more bigger periods.
539 PrintDebug(info->vm_info,info,"EDF_SCHED. Vcore %d, Pcore %d, cpu_khz %u, Period %llu Speed %d, Utilization %d, tdf %d %llu \n",
540 core_edf->info->vcpu_id,
541 core_edf->info->pcpu_id,
545 (int)(100*core_edf->slice/core_edf->period),
547 runqueue->smallest_period);
550 if (strcasecmp(extra_time, "true") == 0)
551 core_edf->extra_time = true;
553 core_edf->extra_time = false;
556 core_edf->extra_time = false;
560 core = v3_cfg_next_branch(core);
563 return activate_core(core_edf,runqueue);
569 * search_core_edf: Searches a core in the red-black tree by using its current_deadline
571 static struct vm_core_edf_sched *
572 search_core_edf(time_us current_deadline, struct vm_edf_rq *runqueue){
574 struct rb_node *node = runqueue->vCPUs_tree.rb_node;
577 struct vm_core_edf_sched *core = container_of(node, struct vm_core_edf_sched, node);
579 if (current_deadline < core->current_deadline)
580 node = node->rb_left;
581 else if (current_deadline > core->current_deadline)
582 node = node->rb_right;
591 * delete_core_edf: Deletes a core from the red black tree, generally when it has
592 * consumed its time slice within the current period.
596 delete_core_edf( struct vm_core_edf_sched *core_edf , struct vm_edf_rq *runqueue){
598 struct vm_core_edf_sched *core = search_core_edf(core_edf->current_deadline, runqueue);
601 v3_rb_erase(&core->node, &runqueue->vCPUs_tree);
605 PrintError(VM_NONE,VCORE_NONE,"EDF Sched. delete_core_edf.Attempted to erase unexisting core");
612 * deactivate_core - Removes a core from the red-black tree.
616 deactivate_core(struct vm_core_edf_sched * core, struct vm_edf_rq *runqueue){
618 if(delete_core_edf(core, runqueue)){
619 runqueue->cpu_u -= 100 * core->slice / core->period;
620 runqueue->nr_vCPU -- ;
625 * adjust_slice - Adjust vcore parameters values
628 static void adjust_slice(struct vm_core_edf_sched *core){
630 time_us host_time = get_curr_host_time(&core->info->time_state);
631 time_us used_time = host_time - core->last_wakeup_time;
633 if(core->last_wakeup_time != 0){
635 core->used_time += used_time;
636 core->total_time += used_time;
637 if(core->start_time != 0){
638 core->expected_time = ((host_time - core->start_time)/core->period)*core->slice;
640 if(core->total_time <= core->expected_time){
641 core->remaining_time = core->expected_time - core->total_time;
644 core->remaining_time =0;
647 if (core->used_time > core->slice){
648 core->slice_overuse++;
655 * Check_deadlines - Check virtual cores, and re-insert in the runqueue the ones which deadline is over
659 static void check_deadlines(struct vm_edf_rq *runqueue){
661 struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
662 struct vm_core_edf_sched *next_core = container_of(node, struct vm_core_edf_sched, node);
663 time_us host_time = get_curr_host_time(&next_core->info->time_state);
664 struct vm_core_edf_sched * deadline_cores[runqueue->nr_vCPU];
669 memset(deadline_cores, 0, runqueue->nr_vCPU);
673 next_core = container_of(node, struct vm_core_edf_sched, node);
674 if(next_core->current_deadline < host_time){
675 next_core->deadline++;
676 next_core->deadline_interval++;
677 deadline_cores[nr_dcores++]= next_core;
678 if(next_core->used_time < next_core->slice){
679 next_core->miss_deadline++;
680 next_core->miss_deadline_interval++;
682 /*PrintError(VM_NONE,VCORE_NONE,"Test: Core %d miss_deadlines %d, used time %llu, slice %llu, deadline %llu, host time %llu \n",
683 next_core->info->vcpu_id,
684 next_core->miss_deadline,
685 next_core->used_time,
687 next_core->current_deadline,
692 next_core->extra_time_given += (next_core->used_time - next_core->slice);
694 /* PrintError(VM_NONE,VCORE_NONE,"Test: Extra time, core %d, core used time %llu, slice %llu, extra time %llu, Total extra time %llu \n",
695 next_core->info->vcpu_id,
696 next_core->used_time,
698 (next_core->used_time - next_core->slice),
699 next_core->extra_time_given);*/
707 node = v3_rb_next(node);
711 for(i=0;i<nr_dcores;i++){
713 next_core = deadline_cores[nr_dcores-1-i];
715 deactivate_core(next_core,runqueue);
716 activate_core(next_core,runqueue);
719 runqueue->check_deadline_time = host_time;
725 * pick_next_core: Returns the next core to be scheduled from the red black tree
728 static struct vm_core_edf_sched *
729 pick_next_core(struct vm_edf_rq *runqueue){
732 * Pick first earliest deadline core
734 struct rb_node *node=NULL;
735 struct vm_core_edf_sched *next_core=NULL;
736 time_us core_deadline=0; // Deadline of the core with more time remaining
737 time_us core_extra_deadline=0; // Deadline of the core with extra time enable and earliest deadline
738 time_us max_remaining=0;
739 time_us host_time = get_curr_host_time(&runqueue->curr_vCPU->info->time_state);
741 if(host_time - runqueue->check_deadline_time >= runqueue->smallest_period ){
742 check_deadlines(runqueue);
745 node = v3_rb_first(&runqueue->vCPUs_tree);
746 next_core = container_of(node, struct vm_core_edf_sched, node);
748 if (next_core->used_time < next_core->slice){
752 if(next_core->total_time < next_core->expected_time){
753 max_remaining = next_core->remaining_time;
754 core_deadline = next_core->current_deadline;
757 if(next_core->extra_time){
758 core_extra_deadline = next_core->current_deadline;
761 // Pick the next core that has not used its whole slice
764 next_core = container_of(node, struct vm_core_edf_sched, node);
766 if(next_core->remaining_time > max_remaining){
767 max_remaining = next_core->extra_time_given;
768 core_deadline = next_core->current_deadline;
769 if(core_extra_deadline !=0 && next_core->extra_time){
770 core_extra_deadline = next_core->current_deadline;
774 if(next_core->used_time < next_core->slice){
779 node = v3_rb_next(node);
782 if(core_extra_deadline !=0){
783 next_core = search_core_edf(core_extra_deadline, runqueue);
786 else if (core_deadline != 0){
787 next_core = search_core_edf(core_deadline, runqueue);
795 * run_next_core: Pick next core to be scheduled and wakeup it
799 run_next_core(struct guest_info *info, int usec)
801 struct vm_core_edf_sched *core = info->sched_priv_data;
802 struct vm_core_edf_sched *next_core;
803 struct vm_edf_rq *runqueue = get_runqueue(info);
804 time_us host_time = get_curr_host_time(&info->time_state);
806 /* The next core to be scheduled is choosen from the tree (Function pick_next_core).
807 * The selected core is the one with the earliest deadline and with available time
808 * to use within the current period (used_time < slice)
810 if(!runqueue->yielded){
813 next_core = pick_next_core(runqueue); // Pick next core to schedule
815 if(host_time - core->print_deadline_interval >= DEADLINE_INTERVAL){
817 if(core->deadline_interval != 0){
818 core->deadline_percentage_interval = (int)100*core->miss_deadline_interval/core->deadline_interval;
821 PrintDebug(info->vm_info,info,"deadline: %d %llu %llu %d",
823 core->deadline_interval,
824 core->miss_deadline_interval,
825 core->deadline_percentage_interval);
827 core->print_deadline_interval = host_time;
828 core->deadline_interval=0;
829 core->miss_deadline_interval = 0;
830 core->deadline_percentage_interval = 0;
834 runqueue->yielded=true;
838 runqueue->yielded=false;
841 if(core->deadline != 0)
842 core->deadline_percentage = (int)100*core->miss_deadline/core->deadline;
844 if (core != next_core){
846 print_parameters(host_time, runqueue,core);
847 wakeup_core(next_core->info);
852 // Necessary to update last_wakeup_time to adjust slice properly later
853 next_core->last_wakeup_time = get_curr_host_time(&next_core->info->time_state);
855 if(host_time - runqueue->print_time >= runqueue->smallest_period ){
856 print_parameters(host_time, runqueue,core);
864 * edf_schedule: Scheduling function
868 edf_schedule(struct guest_info * info, int usec){
870 uint64_t host_time = get_curr_host_time(&info->time_state);
871 struct vm_edf_rq *runqueue = get_runqueue(info);
873 /*PrintDebug(info->vm_info,info,"Test PCORE. %d host time: %llu Last sched_time: %llu, difference: %llu\n",
874 info->pcpu_id, host_time, runqueue->last_sched_time, host_time-runqueue->last_sched_time); */
876 /* if( (runqueue->last_sched_time !=0) && ((host_time - runqueue->last_sched_time) > 10000)){
877 PrintDebug(info->vm_info,info,"%d Test PCORE. %d LOW SCHED FREQUENCY host time: %llu Last sched_time: %llu, difference: %llu\n",
878 ++runqueue->sched_low,info->pcpu_id, host_time, runqueue->last_sched_time, host_time-runqueue->last_sched_time);
881 runqueue->last_sched_time = host_time;
882 run_next_core(info, usec);
888 * edf_sched_schedule: Main scheduling function. Called each time that an interruption occurs.
893 edf_sched_schedule(struct guest_info * info){
895 edf_schedule(info, 0);
900 * edf_sched_yield: Called when yielding the logical cpu for usec is needed
904 edf_sched_yield(struct guest_info * info, int usec){
905 edf_schedule(info, usec);
910 * edf_sched_core_stop. Stops virtual machine. All the virtual cores yield the CPU
914 edf_sched_core_stop(struct guest_info * info){
916 struct vm_edf_rq * runqueue = get_runqueue(info);
917 struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
918 struct vm_core_edf_sched *curr_core;
922 curr_core = container_of(node, struct vm_core_edf_sched, node);
923 wakeup_core(curr_core->info);
924 PrintDebug(VM_NONE,VCORE_NONE,"Waking up core %d, thread (%p)\n",
925 curr_core->info->vcpu_id,
926 (struct task_struct *)info->core_thread);
928 PrintDebug(VM_NONE,VCORE_NONE,"Yielding Thread %p\n",(struct task_struct *)info->core_thread);
929 node = v3_rb_next(node);
937 * edf_sched_deinit: Frees edf scheduler data structures
942 edf_sched_deinit(struct v3_vm_info *vm)
944 PrintDebug(VM_NONE,VCORE_NONE,"Freeing vm\n");
945 void *priv_data = vm->sched_priv_data;
955 * edf_sched_deinit: Frees virtual core data structures
959 edf_sched_core_deinit(struct guest_info *core)
961 PrintDebug(VM_NONE,VCORE_NONE,"Freeing core\n");
962 void *priv_data = core->sched_priv_data;
971 * edf_sched_vm_int. Called when the VM starts
974 int edf_sched_vm_init(struct v3_vm_info *vm){
978 int edf_sched_admit(struct v3_vm_info *vm){
981 * Initialize priv_data for the vm:
982 * For EDF this is done here because we need the parameter
983 * avail_core which is set in v3_start_vm before the
984 * v3_scheduler_admit_vm function is called.
996 * vm_scheduler_impl. Functions that implement vmm_scheduler interface
999 static struct vm_scheduler_impl edf_sched = {
1004 .vm_init = edf_sched_vm_init,
1005 .vm_deinit = edf_sched_deinit,
1006 .core_init = edf_sched_core_init,
1007 .core_stop = edf_sched_core_stop,
1008 .core_deinit = edf_sched_core_deinit,
1009 .schedule = edf_sched_schedule,
1010 .yield = edf_sched_yield,
1011 .admit = edf_sched_admit,
1018 * ext_sched_edf_init. Creates an register the EDF scheduler.
1022 ext_sched_edf_init() {
1023 PrintDebug(VM_NONE, VCORE_NONE,"Sched. Creating (%s) scheduler\n",edf_sched.name);
1024 return v3_register_scheduler(&edf_sched);
1028 * ext_sched_edf_vm_int. Called when the VM starts
1032 ext_sched_edf_vm_init() {
1038 * v3_extension_impl. EDF extension functions
1042 static struct v3_extension_impl sched_edf_impl = {
1043 .name = "EDF Scheduler",
1044 .init = ext_sched_edf_init,
1045 .vm_init = ext_sched_edf_vm_init,
1048 .core_deinit = NULL,
1053 register_extension(&sched_edf_impl);