2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu>
11 * Copyright (c) 2013, Patrick G. Bridges <bridges@cs.unm.edu>
12 * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org>
13 * All rights reserved.
15 * Author: Oscar Mondragon <omondrag@cs.unm.edu>
16 * Patrick G. Bridges <bridges@cs.unm.edu>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 #include <palacios/vmm.h>
24 #include <palacios/vmm_time.h>
25 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_hashtable.h>
27 #include <palacios/vmm_config.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_rbtree.h>
32 #ifndef V3_CONFIG_DEBUG_EXT_SCHED_EDF
34 #define PrintDebug(fmt, args...)
41 * The EDF scheduler uses a dynamic calculated priority as scheduling criteria to choose
42 * what thread will be scheduled.That priority is calculated according with the relative
43 * deadline of the threads that are ready to run in the runqueue. This runqueue is a per-logical
44 * core data structure used to keep the runnable virtual cores (threads) allocated to that
45 * logical core.The threads with less time before its deadline will receive better priorities.
46 * The runqueue is sorted each time that a vCPU becomes runnable or when its period is over.
47 * At that time the vCPU is enqueue. A new scheduling decision is taken is time that an interruption
48 * is received. At that time if the earliest deadline core is different that the current core,
49 * the current core goes to sleep and the earliest deadline core is wake up. Each time a vCPU is scheduled,
50 * the parameter "used_time" is set to zero and the current deadline is calculated using its period.
51 * One vCPU uses at least slice seconds of CPU.Some extra time can be allocated to that virtual core after
52 * all the other virtual cores consumes their slices.
55 // Default configuration values for the EDF Scheduler
56 // time parameters in microseconds
58 #define MAX_PERIOD 1000000000
59 #define MIN_PERIOD 50000
60 #define MAX_SLICE 1000000000
61 #define MIN_SLICE 50000
62 #define CPU_PERCENT 80
63 #define DEADLINE_INTERVAL 30000000 // Period in which the missed deadline ratio is checked
65 typedef uint64_t time_us;
68 * Per-core EDF Scheduling information
71 struct vm_core_edf_sched {
72 struct guest_info *info; // Core data struct
73 struct rb_node node; // red-black tree node
74 time_us period; // Amount of time (us) during which the virtual core may received a CPU allocation
75 time_us slice; // Minimum amount of time (us) received for the virtual core during each period
76 time_us current_deadline; // Time (us) at which current virtual core period ends
77 time_us used_time; // Amount of time (us) used whiting the current period
78 time_us last_wakeup_time; // Time at which the last wakeup started for this virtual core
79 time_us remaining_time; // Remaining time (us) to reach the expected time allocated to the virtual core
80 bool extra_time; // Specifies if the virtual core is eligible to receive extra CPU time (For now it is true always)
81 uint64_t deadline; // Total number of deadlines
82 int deadline_percentage; // Percentage of total missed deadlines
83 uint64_t miss_deadline; // Number of times the core has missed deadlines
84 uint64_t deadline_interval; // Total number of deadlines in the latest interval
85 int deadline_percentage_interval; // Percentage of missed deadlines in the latest interval
86 uint64_t miss_deadline_interval; // Number of times the core has missed deadlines in the last interval
87 time_us print_deadline_interval; // Last time missed deadline ratio was printed
88 time_us total_time; // Total scheduled time for this virtual core.
89 int slice_overuse; // Number of times a core consumes extra time
90 time_us extra_time_given; // Total extra time given to a virtual core
91 time_us start_time; // Time at which this virtual core start to be scheduled
92 time_us expected_time; // Minimum CPU time expected to be allocated to this virtual core
97 * Scheduler configuration
100 struct vm_edf_sched_config {
101 time_us min_slice; // Minimum allowed slice
102 time_us max_slice; // Maximum allowed slice
103 time_us min_period; // Minimum allowed period
104 time_us max_period; // Maximum allowed period
105 int cpu_percent; // Percentange of CPU utilization for the scheduler in each physical CPU (100 or less)
110 * Run queue structure. Per-logical core data structure used to keep the runnable virtual cores (threads) allocated to that logical core
111 * Contains a pointer to the red black tree, the data structure of configuration options, and other info
116 int cpu_u; // CPU utilization (must be less or equal to the cpu_percent in vm_edf_sched_config)
117 struct rb_root vCPUs_tree; // Red-Black Tree
118 struct vm_edf_sched_config edf_config; // Scheduling configuration structure
119 int nr_vCPU; // Number of cores in the runqueue
120 struct vm_core_edf_sched *curr_vCPU; // Current running CPU
121 time_us last_sched_time; // Last time a virtual core in this runqueue was scheduled
122 time_us check_deadline_time; // Last time in which the deadlines were recalculated
123 time_us smallest_period; // Smallest period of the virtual cores in the runqueue, used to control the period of deadlines recalculation
124 time_us print_time; // Last time of debugging print
125 time_us start_time; // Time at which first core started running in this runqueue
126 int sched_low; // Incremented when the time between interruptions is large
127 bool yielded; // CPU yielded to palacios (No core running)
131 * Basic functions for scheduling
134 int v3_init_edf_scheduling();
141 print_parameters(time_us host_time, struct vm_edf_rq * runqueue, struct vm_core_edf_sched * core){
143 time_us elapsed_time = 0;
144 if(runqueue->start_time != 0)
145 elapsed_time = host_time - runqueue->start_time;
147 PrintDebug(core->info->vm_info, core->info,"Test: %llu %llu %d %d %llu %llu %llu %d\n",
155 core->deadline_percentage);
157 runqueue->print_time = host_time;
162 * init_edf_config: Initialize scheduler configuration
166 init_edf_config(struct vm_edf_sched_config *edf_config){
168 edf_config->min_slice = MIN_SLICE;
169 edf_config->max_slice = MAX_SLICE;
170 edf_config->min_period = MIN_PERIOD;
171 edf_config->max_period = MAX_PERIOD;
172 edf_config->cpu_percent = CPU_PERCENT;
177 * priv_data_init: Initialize the run queue
181 priv_data_init(struct v3_vm_info *vm){
183 PrintDebug(vm, VCORE_NONE,"EDF Sched. Initializing EDF Scheduling \n");
185 vm->sched_priv_data = V3_Malloc( vm->avail_cores * sizeof(struct vm_edf_rq));
187 if (!vm->sched_priv_data) {
188 PrintError(vm, VCORE_NONE,"Cannot allocate in priv_data in priv_data_init\n");
194 PrintDebug(vm, VCORE_NONE,"EDF Sched. priv_data_init. Available cores %d\n", vm->avail_cores);
196 for(lcore = 0; lcore < vm->avail_cores ; lcore++){
198 PrintDebug(vm, VCORE_NONE,"EDF Sched. priv_data_init. Initializing logical core %d\n", lcore);
200 struct vm_edf_rq * edf_rq_list = (struct vm_edf_rq *)vm->sched_priv_data;
201 struct vm_edf_rq * edf_rq = &edf_rq_list[lcore];
203 edf_rq->vCPUs_tree = RB_ROOT;
206 edf_rq->curr_vCPU=NULL;
207 edf_rq->last_sched_time=0;
208 edf_rq->check_deadline_time=0;
210 edf_rq->smallest_period=0;
211 edf_rq->start_time = 0;
212 edf_rq->yielded=false;
213 init_edf_config(&edf_rq->edf_config);
222 * is_admissible_core: Decides if a core is admited to the red black tree according with
223 * the admisibility formula.
227 is_admissible_core(struct vm_core_edf_sched * new_sched_core, struct vm_edf_rq *runqueue){
229 int curr_utilization = runqueue->cpu_u;
230 int new_utilization = curr_utilization + (100 * new_sched_core->slice / new_sched_core->period);
231 int cpu_percent = (runqueue->edf_config).cpu_percent;
233 if (new_utilization <= cpu_percent)
244 * insert_core_edf: Finds a place in the tree for a newly activated core, adds the node
245 * and rebalaces the tree
249 insert_core_edf(struct vm_core_edf_sched *core, struct vm_edf_rq *runqueue){
251 struct rb_node **new_core = &(runqueue->vCPUs_tree.rb_node);
252 struct rb_node *parent = NULL;
253 struct vm_core_edf_sched *curr_core;
255 // Find out place in the tree for the new core
258 curr_core = container_of(*new_core, struct vm_core_edf_sched, node);
261 if (core->current_deadline < curr_core->current_deadline)
262 new_core = &((*new_core)->rb_left);
263 else if (core->current_deadline > curr_core->current_deadline)
264 new_core = &((*new_core)->rb_right);
265 else // Is Possible to have same current deadlines in both cores!
268 // Add new node and rebalance tree.
269 rb_link_node(&core->node, parent, new_core);
270 v3_rb_insert_color(&core->node, &runqueue->vCPUs_tree);
277 * get_curr_host_time: Calculates the current host time (microseconds)
281 get_curr_host_time(struct vm_core_time *core_time){
283 uint64_t cur_cycle = v3_get_host_time(core_time);
284 uint64_t cpu_khz = core_time->host_cpu_freq;
285 uint64_t curr_time_us = 1000 * cur_cycle / cpu_khz;
293 * count_cores: Function useful to count the number of cores in a runqueue (Not used for now)
298 static int count_cores(struct vm_edf_rq *runqueue){
300 struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
301 struct vm_core_edf_sched *curr_core;
302 int number_cores = 0;
307 curr_core = container_of(node, struct vm_core_edf_sched, node);
308 time_us host_time = get_curr_host_time(&curr_core->info->time_state);
309 PrintDebug(VM_NONE,VCORE_NONE,"Count %d. core %d, used time %llu, deadline %llu, host time %llu extra_time %llu\n",
311 curr_core->info->vcpu_id,
312 curr_core->used_time,
313 curr_core->current_deadline,
315 curr_core->extra_time_given);
316 node = v3_rb_next(node);
325 * next_start_period: Given the current host time and the period of a given vCPU,
326 * calculates the time in which its next period starts.
331 next_start_period(uint64_t curr_time_us, uint64_t period_us){
333 uint64_t time_period_us = curr_time_us % period_us;
334 uint64_t remaining_time_us = period_us - time_period_us;
335 uint64_t next_start_us = curr_time_us + remaining_time_us;
337 return next_start_us;
342 * get_runqueue: Get the runqueue assigned to a virtual core.
345 struct vm_edf_rq * get_runqueue(struct guest_info *info){
347 struct vm_edf_rq *runqueue_list = (struct vm_edf_rq *) info->vm_info->sched_priv_data;
348 struct vm_edf_rq *runqueue = &runqueue_list[info->pcpu_id];
354 * wakeup_core: Wakeup a given vCPU thread
358 wakeup_core(struct guest_info *info){
360 struct vm_core_edf_sched *core = info->sched_priv_data;
361 struct vm_edf_rq *runqueue = get_runqueue(info);
362 time_us host_time = get_curr_host_time(&core->info->time_state);
364 if (!info->core_thread) {
365 PrintError(info->vm_info, info,"ERROR: Tried to wakeup non-existent core thread vCPU_id %d \n",info->vcpu_id);
369 V3_Wakeup(info->core_thread);
370 if(core->start_time == 0){
371 core->start_time = host_time;
372 print_parameters(host_time, runqueue, core);
374 core->last_wakeup_time = host_time;
375 runqueue->curr_vCPU = core;
383 * activate_core - Moves a core to the red-black tree.
384 * used time is set to zero and current deadline is calculated
388 activate_core(struct vm_core_edf_sched * core, struct vm_edf_rq *runqueue){
391 if (is_admissible_core(core, runqueue)){
393 uint64_t curr_time_us = get_curr_host_time(&core->info->time_state);
394 uint64_t curr_deadline = next_start_period(curr_time_us, core->period);
396 core->current_deadline = curr_deadline;
399 bool ins = insert_core_edf(core, runqueue);
401 * If not inserted is possible that there is other core with the same deadline.
402 * Then, the deadline is modified and try again
405 core->current_deadline ++;
406 ins = insert_core_edf(core, runqueue);
409 runqueue->cpu_u += 100 * core->slice / core->period;
410 runqueue->nr_vCPU ++;
412 if(runqueue->nr_vCPU == 1){
413 runqueue->smallest_period = core->period;
416 else if(core->period < runqueue->smallest_period){
417 runqueue->smallest_period = core->period;
422 * If this is the first time to be activated pick first earliest deadline core to wakeup.
425 if(!runqueue->curr_vCPU){
427 struct vm_core_edf_sched *next_core;
428 // Pick first earliest deadline core
430 struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
431 next_core = container_of(node, struct vm_core_edf_sched, node);
433 PrintDebug(VM_NONE, VCORE_NONE,"EDF Sched. First time activation, core %d, time %llu \n", next_core->info->vcpu_id, curr_time_us);
435 // Wakeup next_core (It is not neccessary to wakeup since in this case this is the first core launched by Palacios
436 // however wakeup_core is called to initialize some needed parameters.
438 wakeup_core(next_core->info);
439 next_core->start_time = get_curr_host_time(&next_core->info->time_state);
440 print_parameters(curr_time_us, runqueue, next_core);
441 runqueue->start_time = next_core->start_time;
448 PrintError(core->info->vm_info, core->info,"EDF Sched. activate_core. CPU cannot activate the core. It is not admissible");
455 * edf_sched_core_init: Initializes per core data structure and
456 * calls activate function.
461 edf_sched_core_init(struct guest_info * info){
463 struct vm_edf_rq *runqueue = get_runqueue(info);
464 struct vm_core_edf_sched *core_edf;
465 struct v3_time *vm_ts = &(info->vm_info->time_state);
466 uint32_t tdf = vm_ts->td_denom;
467 uint_t cpu_khz = V3_CPU_KHZ();
469 PrintDebug(info->vm_info, info,"EDF Sched. Initializing vcore %d, tdf %d\n", info->vcpu_id,tdf);
471 core_edf = (struct vm_core_edf_sched *) V3_Malloc(sizeof (struct vm_core_edf_sched));
473 PrintError(info->vm_info, info,"Cannot allocate private_data in edf_sched_core_init\n");
476 info->sched_priv_data = core_edf;
478 // Default configuration if not specified in configuration file
480 core_edf->info = info;
481 core_edf->period = MIN_PERIOD;
482 core_edf->slice = MIN_SLICE;
483 core_edf->used_time = 0;
484 core_edf->last_wakeup_time = 0;
485 core_edf->remaining_time = 0;
486 core_edf->deadline = 0;
487 core_edf->miss_deadline = 0;
488 core_edf->extra_time = true;
489 core_edf->total_time = 0;
490 core_edf->slice_overuse = 0;
491 core_edf->extra_time_given = 0;
492 core_edf->expected_time = 0;
493 core_edf->start_time = 0;
494 core_edf->deadline_interval = 0;
495 core_edf->deadline_percentage_interval = 0;
496 core_edf->miss_deadline_interval = 0;
497 core_edf->print_deadline_interval = 0;
500 v3_cfg_tree_t * cfg_tree = core_edf->info->vm_info->cfg_data->cfg;
501 v3_cfg_tree_t * core = v3_cfg_subtree(v3_cfg_subtree(cfg_tree, "cores"), "core");
504 char *id = v3_cfg_val(core, "vcpu_id");
505 char *period = v3_cfg_val(core, "period");
506 char *slice = v3_cfg_val(core, "slice");
507 char *extra_time = v3_cfg_val(core, "extra_time");
508 char *speed = v3_cfg_val(core, "khz");
509 uint_t speed_khz = cpu_khz;
511 if (atoi(id) == core_edf->info->vcpu_id){
514 speed_khz = atoi(speed);
518 core_edf->slice = atoi(slice);
521 core_edf->slice = MIN_SLICE;
525 core_edf->period = atoi(period);
528 core_edf->period = (core_edf->slice * cpu_khz * tdf)/speed_khz;
529 core_edf->period += 0.3*(100*core_edf->slice/core_edf->period); // Give faster vcores a little more bigger periods.
532 PrintDebug(info->vm_info,info,"EDF_SCHED. Vcore %d, Pcore %d, cpu_khz %u, Period %llu Speed %d, Utilization %d, tdf %d %llu \n",
533 core_edf->info->vcpu_id,
534 core_edf->info->pcpu_id,
538 (int)(100*core_edf->slice/core_edf->period),
540 runqueue->smallest_period);
543 if (strcasecmp(extra_time, "true") == 0)
544 core_edf->extra_time = true;
546 core_edf->extra_time = false;
549 core_edf->extra_time = false;
553 core = v3_cfg_next_branch(core);
556 return activate_core(core_edf,runqueue);
562 * search_core_edf: Searches a core in the red-black tree by using its current_deadline
564 static struct vm_core_edf_sched *
565 search_core_edf(time_us current_deadline, struct vm_edf_rq *runqueue){
567 struct rb_node *node = runqueue->vCPUs_tree.rb_node;
570 struct vm_core_edf_sched *core = container_of(node, struct vm_core_edf_sched, node);
572 if (current_deadline < core->current_deadline)
573 node = node->rb_left;
574 else if (current_deadline > core->current_deadline)
575 node = node->rb_right;
584 * delete_core_edf: Deletes a core from the red black tree, generally when it has
585 * consumed its time slice within the current period.
589 delete_core_edf( struct vm_core_edf_sched *core_edf , struct vm_edf_rq *runqueue){
591 struct vm_core_edf_sched *core = search_core_edf(core_edf->current_deadline, runqueue);
594 v3_rb_erase(&core->node, &runqueue->vCPUs_tree);
598 PrintError(VM_NONE,VCORE_NONE,"EDF Sched. delete_core_edf.Attempted to erase unexisting core");
605 * deactivate_core - Removes a core from the red-black tree.
609 deactivate_core(struct vm_core_edf_sched * core, struct vm_edf_rq *runqueue){
611 if(delete_core_edf(core, runqueue)){
612 runqueue->cpu_u -= 100 * core->slice / core->period;
613 runqueue->nr_vCPU -- ;
618 * adjust_slice - Adjust vcore parameters values
621 static void adjust_slice(struct vm_core_edf_sched *core){
623 time_us host_time = get_curr_host_time(&core->info->time_state);
624 time_us used_time = host_time - core->last_wakeup_time;
626 if(core->last_wakeup_time != 0){
628 core->used_time += used_time;
629 core->total_time += used_time;
630 if(core->start_time != 0){
631 core->expected_time = ((host_time - core->start_time)/core->period)*core->slice;
633 if(core->total_time <= core->expected_time){
634 core->remaining_time = core->expected_time - core->total_time;
637 core->remaining_time =0;
640 if (core->used_time > core->slice){
641 core->slice_overuse++;
648 * Check_deadlines - Check virtual cores, and re-insert in the runqueue the ones which deadline is over
652 static void check_deadlines(struct vm_edf_rq *runqueue){
654 struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
655 struct vm_core_edf_sched *next_core = container_of(node, struct vm_core_edf_sched, node);
656 time_us host_time = get_curr_host_time(&next_core->info->time_state);
657 struct vm_core_edf_sched * deadline_cores[runqueue->nr_vCPU];
662 memset(deadline_cores, 0, runqueue->nr_vCPU);
666 next_core = container_of(node, struct vm_core_edf_sched, node);
667 if(next_core->current_deadline < host_time){
668 next_core->deadline++;
669 next_core->deadline_interval++;
670 deadline_cores[nr_dcores++]= next_core;
671 if(next_core->used_time < next_core->slice){
672 next_core->miss_deadline++;
673 next_core->miss_deadline_interval++;
675 /*PrintError(VM_NONE,VCORE_NONE,"Test: Core %d miss_deadlines %d, used time %llu, slice %llu, deadline %llu, host time %llu \n",
676 next_core->info->vcpu_id,
677 next_core->miss_deadline,
678 next_core->used_time,
680 next_core->current_deadline,
685 next_core->extra_time_given += (next_core->used_time - next_core->slice);
687 /* PrintError(VM_NONE,VCORE_NONE,"Test: Extra time, core %d, core used time %llu, slice %llu, extra time %llu, Total extra time %llu \n",
688 next_core->info->vcpu_id,
689 next_core->used_time,
691 (next_core->used_time - next_core->slice),
692 next_core->extra_time_given);*/
700 node = v3_rb_next(node);
704 for(i=0;i<nr_dcores;i++){
706 next_core = deadline_cores[nr_dcores-1-i];
708 deactivate_core(next_core,runqueue);
709 activate_core(next_core,runqueue);
712 runqueue->check_deadline_time = host_time;
718 * pick_next_core: Returns the next core to be scheduled from the red black tree
721 static struct vm_core_edf_sched *
722 pick_next_core(struct vm_edf_rq *runqueue){
725 * Pick first earliest deadline core
727 struct rb_node *node=NULL;
728 struct vm_core_edf_sched *next_core=NULL;
729 time_us core_deadline=0; // Deadline of the core with more time remaining
730 time_us core_extra_deadline=0; // Deadline of the core with extra time enable and earliest deadline
731 time_us max_remaining=0;
732 time_us host_time = get_curr_host_time(&runqueue->curr_vCPU->info->time_state);
734 if(host_time - runqueue->check_deadline_time >= runqueue->smallest_period ){
735 check_deadlines(runqueue);
738 node = v3_rb_first(&runqueue->vCPUs_tree);
739 next_core = container_of(node, struct vm_core_edf_sched, node);
741 if (next_core->used_time < next_core->slice){
745 if(next_core->total_time < next_core->expected_time){
746 max_remaining = next_core->remaining_time;
747 core_deadline = next_core->current_deadline;
750 if(next_core->extra_time){
751 core_extra_deadline = next_core->current_deadline;
754 // Pick the next core that has not used its whole slice
757 next_core = container_of(node, struct vm_core_edf_sched, node);
759 if(next_core->remaining_time > max_remaining){
760 max_remaining = next_core->extra_time_given;
761 core_deadline = next_core->current_deadline;
762 if(core_extra_deadline !=0 && next_core->extra_time){
763 core_extra_deadline = next_core->current_deadline;
767 if(next_core->used_time < next_core->slice){
772 node = v3_rb_next(node);
775 if(core_extra_deadline !=0){
776 next_core = search_core_edf(core_extra_deadline, runqueue);
779 else if (core_deadline != 0){
780 next_core = search_core_edf(core_deadline, runqueue);
788 * run_next_core: Pick next core to be scheduled and wakeup it
792 run_next_core(struct guest_info *info, int usec)
794 struct vm_core_edf_sched *core = info->sched_priv_data;
795 struct vm_core_edf_sched *next_core;
796 struct vm_edf_rq *runqueue = get_runqueue(info);
797 time_us host_time = get_curr_host_time(&info->time_state);
799 /* The next core to be scheduled is choosen from the tree (Function pick_next_core).
800 * The selected core is the one with the earliest deadline and with available time
801 * to use within the current period (used_time < slice)
803 if(!runqueue->yielded){
806 next_core = pick_next_core(runqueue); // Pick next core to schedule
808 if(host_time - core->print_deadline_interval >= DEADLINE_INTERVAL){
810 if(core->deadline_interval != 0){
811 core->deadline_percentage_interval = (int)100*core->miss_deadline_interval/core->deadline_interval;
814 PrintDebug(info->vm_info,info,"deadline: %d %llu %llu %d",
816 core->deadline_interval,
817 core->miss_deadline_interval,
818 core->deadline_percentage_interval);
820 core->print_deadline_interval = host_time;
821 core->deadline_interval=0;
822 core->miss_deadline_interval = 0;
823 core->deadline_percentage_interval = 0;
827 runqueue->yielded=true;
831 runqueue->yielded=false;
834 if(core->deadline != 0)
835 core->deadline_percentage = (int)100*core->miss_deadline/core->deadline;
837 if (core != next_core){
839 print_parameters(host_time, runqueue,core);
840 wakeup_core(next_core->info);
845 // Necessary to update last_wakeup_time to adjust slice properly later
846 next_core->last_wakeup_time = get_curr_host_time(&next_core->info->time_state);
848 if(host_time - runqueue->print_time >= runqueue->smallest_period ){
849 print_parameters(host_time, runqueue,core);
857 * edf_schedule: Scheduling function
861 edf_schedule(struct guest_info * info, int usec){
863 uint64_t host_time = get_curr_host_time(&info->time_state);
864 struct vm_edf_rq *runqueue = get_runqueue(info);
866 /*PrintDebug(info->vm_info,info,"Test PCORE. %d host time: %llu Last sched_time: %llu, difference: %llu\n",
867 info->pcpu_id, host_time, runqueue->last_sched_time, host_time-runqueue->last_sched_time); */
869 /* if( (runqueue->last_sched_time !=0) && ((host_time - runqueue->last_sched_time) > 10000)){
870 PrintDebug(info->vm_info,info,"%d Test PCORE. %d LOW SCHED FREQUENCY host time: %llu Last sched_time: %llu, difference: %llu\n",
871 ++runqueue->sched_low,info->pcpu_id, host_time, runqueue->last_sched_time, host_time-runqueue->last_sched_time);
874 runqueue->last_sched_time = host_time;
875 run_next_core(info, usec);
881 * edf_sched_schedule: Main scheduling function. Called each time that an interruption occurs.
886 edf_sched_schedule(struct guest_info * info){
888 edf_schedule(info, 0);
893 * edf_sched_yield: Called when yielding the logical cpu for usec is needed
897 edf_sched_yield(struct guest_info * info, int usec){
898 edf_schedule(info, usec);
903 * edf_sched_core_stop. Stops virtual machine. All the virtual cores yield the CPU
907 edf_sched_core_stop(struct guest_info * info){
909 struct vm_edf_rq * runqueue = get_runqueue(info);
910 struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
911 struct vm_core_edf_sched *curr_core;
915 curr_core = container_of(node, struct vm_core_edf_sched, node);
916 wakeup_core(curr_core->info);
917 PrintDebug(VM_NONE,VCORE_NONE,"Waking up core %d, thread (%p)\n",
918 curr_core->info->vcpu_id,
919 (struct task_struct *)info->core_thread);
921 PrintDebug(VM_NONE,VCORE_NONE,"Yielding Thread %p\n",(struct task_struct *)info->core_thread);
922 node = v3_rb_next(node);
930 * edf_sched_deinit: Frees edf scheduler data structures
935 edf_sched_deinit(struct v3_vm_info *vm)
937 PrintDebug(VM_NONE,VCORE_NONE,"Freeing vm\n");
938 void *priv_data = vm->sched_priv_data;
948 * edf_sched_deinit: Frees virtual core data structures
952 edf_sched_core_deinit(struct guest_info *core)
954 PrintDebug(VM_NONE,VCORE_NONE,"Freeing core\n");
955 void *priv_data = core->sched_priv_data;
964 * edf_sched_vm_int. Called when the VM starts
967 int edf_sched_vm_init(struct v3_vm_info *vm){
971 int edf_sched_admit(struct v3_vm_info *vm){
974 * Initialize priv_data for the vm:
975 * For EDF this is done here because we need the parameter
976 * avail_core which is set in v3_start_vm before the
977 * v3_scheduler_admit_vm function is called.
989 * vm_scheduler_impl. Functions that implement vmm_scheduler interface
992 static struct vm_scheduler_impl edf_sched = {
997 .vm_init = edf_sched_vm_init,
998 .vm_deinit = edf_sched_deinit,
999 .core_init = edf_sched_core_init,
1000 .core_stop = edf_sched_core_stop,
1001 .core_deinit = edf_sched_core_deinit,
1002 .schedule = edf_sched_schedule,
1003 .yield = edf_sched_yield,
1004 .admit = edf_sched_admit,
1011 * ext_sched_edf_init. Creates an register the EDF scheduler.
1015 ext_sched_edf_init() {
1016 PrintDebug(VM_NONE, VCORE_NONE,"Sched. Creating (%s) scheduler\n",edf_sched.name);
1017 return v3_register_scheduler(&edf_sched);
1021 * ext_sched_edf_vm_int. Called when the VM starts
1025 ext_sched_edf_vm_init() {
1031 * v3_extension_impl. EDF extension functions
1035 static struct v3_extension_impl sched_edf_impl = {
1036 .name = "EDF Scheduler",
1037 .init = ext_sched_edf_init,
1038 .vm_init = ext_sched_edf_vm_init,
1041 .core_deinit = NULL,
1046 register_extension(&sched_edf_impl);