Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Cleanup and sanity-checking of divide-by-zero and floating point use bugs (Coverity...
[palacios.git] / palacios / src / extensions / ext_sched_edf.c
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National
4  * Science Foundation and the Department of Energy.
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu>
11  * Copyright (c) 2013, Patrick G. Bridges <bridges@cs.unm.edu>
12  * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org>
13  * All rights reserved.
14  *
15  * Author: Oscar Mondragon <omondrag@cs.unm.edu>
16  *         Patrick G. Bridges <bridges@cs.unm.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22
23 #include <palacios/vmm.h>
24 #include <palacios/vmm_time.h>
25 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_hashtable.h>
27 #include <palacios/vmm_config.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_rbtree.h>
30
31
32 #ifndef V3_CONFIG_DEBUG_EXT_SCHED_EDF
33 #undef PrintDebug
34 #define PrintDebug(fmt, args...)
35 #endif
36
37 /* Overview
38  *
39  * EDF Scheduling
40  *
41  * The EDF scheduler uses a dynamic calculated priority as scheduling criteria to choose
42  * what thread will be scheduled.That priority is calculated according with the relative
43  * deadline of the threads that are ready to run in the runqueue. This runqueue is a per-logical
44  * core data structure used to keep the runnable virtual cores (threads) allocated to that
45  * logical core.The threads with less time before its deadline will receive better priorities.
46  * The runqueue is sorted each time that a vCPU becomes runnable or when its period is over.
47  * At that time the vCPU is enqueue. A new scheduling decision is taken is time that an interruption
48  * is received. At that time if the earliest deadline core is different that the current core,
49  * the current core goes to sleep and the earliest deadline core is wake up. Each time a vCPU is scheduled,
50  * the parameter "used_time" is set to zero and the current deadline is calculated using its period.
51  * One vCPU uses at least slice seconds of CPU.Some extra time can be allocated to that virtual core after
52  * all the other virtual cores consumes their slices.
53  */
54
55 // Default configuration values for the EDF Scheduler
56 // time parameters in microseconds
57
58 #define MAX_PERIOD 1000000000
59 #define MIN_PERIOD 50000
60 #define MAX_SLICE 1000000000
61 #define MIN_SLICE 50000
62 #define CPU_PERCENT 80
63 #define DEADLINE_INTERVAL 30000000 // Period in which the missed deadline ratio is checked
64
65 typedef uint64_t time_us;
66
67 /*
68  * Per-core EDF Scheduling information
69  */
70
71 struct vm_core_edf_sched {
72     struct guest_info *info;              // Core data struct
73     struct rb_node node;                  // red-black tree node
74     time_us period;                       // Amount of time (us) during which the virtual core may received a CPU allocation
75     time_us slice;                        // Minimum amount of time (us) received for the virtual core during each period
76     time_us current_deadline;             // Time (us) at which current virtual core period ends
77     time_us used_time;                    // Amount of time (us) used whiting the current period
78     time_us last_wakeup_time;             // Time at which the last wakeup started for this virtual core
79     time_us remaining_time;               // Remaining time (us) to reach the expected time allocated to the virtual core
80     bool extra_time;                      // Specifies if the virtual core is eligible to receive extra CPU time (For now it is true always)
81     uint64_t deadline;                    // Total number of deadlines
82     int deadline_percentage;              // Percentage of total missed deadlines
83     uint64_t miss_deadline;               // Number of times the core has missed deadlines
84     uint64_t deadline_interval;           // Total number of deadlines in the latest interval
85     int deadline_percentage_interval;     // Percentage of missed deadlines in the latest interval
86     uint64_t miss_deadline_interval;      // Number of times the core has missed deadlines in the last interval
87     time_us print_deadline_interval;      // Last time missed deadline ratio was printed
88     time_us total_time;                   // Total scheduled time for this virtual core.
89     int slice_overuse;                    // Number of times a core consumes extra time
90     time_us extra_time_given;             // Total extra time given to a virtual core
91     time_us start_time;                   // Time at which this virtual core start to be scheduled
92     time_us expected_time;                // Minimum CPU time expected to be allocated to this virtual core
93
94 };
95
96 /*
97  * Scheduler configuration
98  */
99
100 struct vm_edf_sched_config {
101     time_us min_slice;       // Minimum allowed slice
102     time_us max_slice;       // Maximum allowed slice
103     time_us min_period;      // Minimum allowed period
104     time_us max_period;      // Maximum allowed period
105     int cpu_percent;         // Percentange of CPU utilization for the scheduler in each physical CPU (100 or less)
106
107 };
108
109 /*
110  * Run queue structure. Per-logical core data structure  used to keep the runnable virtual cores (threads) allocated to that logical core
111  * Contains a pointer to the red black tree, the data structure of configuration options, and other info
112  */
113
114 struct vm_edf_rq{
115
116     int cpu_u;                                  // CPU utilization (must be less or equal to the cpu_percent in vm_edf_sched_config)
117     struct rb_root vCPUs_tree;                  // Red-Black Tree
118     struct vm_edf_sched_config edf_config;      // Scheduling configuration structure
119     int nr_vCPU;                                // Number of cores in the runqueue
120     struct vm_core_edf_sched *curr_vCPU;        // Current running CPU
121     time_us last_sched_time;                    // Last time a virtual core in this runqueue was scheduled
122     time_us check_deadline_time;                // Last time in which the deadlines were recalculated
123     time_us smallest_period;                    // Smallest period of the virtual cores in the runqueue, used to control the period of deadlines recalculation
124     time_us print_time;                         // Last time of debugging print
125     time_us start_time;                         // Time at which first core started running in this runqueue
126     int sched_low;                              // Incremented when the time between interruptions is large
127     bool yielded;                               // CPU yielded to palacios (No core running)
128 };
129
130 /*
131  * Basic functions for scheduling
132  */
133
134 int v3_init_edf_scheduling();
135
136 /*
137  * Debugging Function
138  */
139
140 static void
141 print_parameters(time_us host_time, struct vm_edf_rq * runqueue, struct vm_core_edf_sched * core){
142
143     time_us elapsed_time = 0;
144     if(runqueue->start_time != 0)
145         elapsed_time = host_time - runqueue->start_time;
146
147     PrintDebug(core->info->vm_info, core->info,"Test: %llu %llu %d %d %llu %llu %llu %d\n",
148              host_time,
149              elapsed_time,
150              core->info->vcpu_id,
151              core->info->pcpu_id,
152              core->total_time,
153              core->expected_time,
154              core->miss_deadline,
155              core->deadline_percentage);
156
157    runqueue->print_time = host_time;
158 }
159
160
161 /*
162  * init_edf_config: Initialize scheduler configuration
163  */
164
165 static void
166 init_edf_config(struct vm_edf_sched_config *edf_config){
167
168     edf_config->min_slice = MIN_SLICE;
169     edf_config->max_slice = MAX_SLICE;
170     edf_config->min_period = MIN_PERIOD;
171     edf_config->max_period = MAX_PERIOD;
172     edf_config->cpu_percent = CPU_PERCENT;
173 }
174
175
176 /*
177  * priv_data_init: Initialize the run queue
178  */
179
180 int
181 priv_data_init(struct v3_vm_info *vm){
182
183     PrintDebug(vm, VCORE_NONE,"EDF Sched. Initializing EDF Scheduling \n");
184
185     vm->sched_priv_data = V3_Malloc( vm->avail_cores * sizeof(struct vm_edf_rq));
186
187     if (!vm->sched_priv_data) {
188         PrintError(vm, VCORE_NONE,"Cannot allocate in priv_data in priv_data_init\n");
189         return -1;
190     }
191
192     int lcore = 0;
193
194     PrintDebug(vm, VCORE_NONE,"EDF Sched. priv_data_init. Available cores %d\n", vm->avail_cores);
195
196     for(lcore = 0; lcore < vm->avail_cores ; lcore++){
197
198         PrintDebug(vm, VCORE_NONE,"EDF Sched. priv_data_init. Initializing logical core %d\n", lcore);
199
200         struct vm_edf_rq * edf_rq_list =   (struct vm_edf_rq *)vm->sched_priv_data;
201         struct vm_edf_rq * edf_rq = &edf_rq_list[lcore];
202
203         edf_rq->vCPUs_tree = RB_ROOT;
204         edf_rq->cpu_u=0;
205         edf_rq->nr_vCPU=0;
206         edf_rq->curr_vCPU=NULL;
207         edf_rq->last_sched_time=0;
208         edf_rq->check_deadline_time=0;
209         edf_rq->sched_low=0;
210         edf_rq->smallest_period=0;
211         edf_rq->start_time = 0;
212         edf_rq->yielded=false;
213         init_edf_config(&edf_rq->edf_config);
214
215     }
216
217    return 0;
218
219 }
220
221 /*
222  * is_admissible_core: Decides if a core is admited to the red black tree according with
223  * the admisibility formula.
224  */
225
226 static bool
227 is_admissible_core(struct vm_core_edf_sched * new_sched_core, struct vm_edf_rq *runqueue){
228
229     int curr_utilization = runqueue->cpu_u;
230     int new_utilization = curr_utilization + (100 * new_sched_core->slice / new_sched_core->period);
231     int cpu_percent = (runqueue->edf_config).cpu_percent;
232
233     if (new_utilization <= cpu_percent)
234         return true;
235     else
236         return false;
237
238 return true;
239 }
240
241
242
243 /*
244  * insert_core_edf: Finds a place in the tree for a newly activated core, adds the node
245  * and rebalaces the tree
246  */
247
248 static bool
249 insert_core_edf(struct vm_core_edf_sched *core, struct vm_edf_rq *runqueue){
250
251     struct rb_node **new_core = &(runqueue->vCPUs_tree.rb_node);
252     struct rb_node *parent = NULL;
253     struct vm_core_edf_sched *curr_core;
254
255     // Find out place in the tree for the new core
256     while (*new_core) {
257
258         curr_core = container_of(*new_core, struct vm_core_edf_sched, node);
259         parent = *new_core;
260
261         if (core->current_deadline < curr_core->current_deadline)
262             new_core = &((*new_core)->rb_left);
263         else if (core->current_deadline > curr_core->current_deadline)
264             new_core = &((*new_core)->rb_right);
265         else // Is Possible to have same current deadlines in both cores!
266             return false;
267     }
268     // Add new node and rebalance tree.
269     rb_link_node(&core->node, parent, new_core);
270     v3_rb_insert_color(&core->node, &runqueue->vCPUs_tree);
271
272     return true;
273  }
274
275
276 /*
277  * get_curr_host_time: Calculates the current host time (microseconds)
278  */
279
280 static uint64_t
281 get_curr_host_time(struct vm_core_time *core_time){
282
283     uint64_t cur_cycle = v3_get_host_time(core_time);
284     uint64_t cpu_khz = core_time->host_cpu_freq;
285     uint64_t curr_time_us = 1000 * cur_cycle / cpu_khz;
286
287     return curr_time_us;
288
289 }
290
291
292 /*
293  * count_cores: Function useful to count the number of cores in a runqueue (Not used for now)
294  *
295  */
296
297 /*
298 static int count_cores(struct vm_edf_rq *runqueue){
299
300   struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
301   struct vm_core_edf_sched *curr_core;
302   int number_cores = 0;
303
304     while(node){
305
306         number_cores++;
307         curr_core = container_of(node, struct vm_core_edf_sched, node);
308         time_us host_time =  get_curr_host_time(&curr_core->info->time_state);
309         PrintDebug(VM_NONE,VCORE_NONE,"Count %d. core %d, used time %llu, deadline %llu, host time  %llu extra_time %llu\n",
310                    number_cores,
311                    curr_core->info->vcpu_id,
312                    curr_core->used_time,
313                    curr_core->current_deadline,
314                    host_time,
315                    curr_core->extra_time_given);
316         node = v3_rb_next(node);
317
318     }
319
320    return number_cores;
321 }
322 */
323
324 /*
325  * next_start_period: Given the current host time and the period of a given vCPU,
326  * calculates the time in which its next period starts.
327  *
328  */
329
330 static uint64_t
331 next_start_period(uint64_t curr_time_us, uint64_t period_us){
332
333     uint64_t time_period_us = curr_time_us % period_us;
334     uint64_t remaining_time_us = period_us - time_period_us;
335     uint64_t next_start_us = curr_time_us + remaining_time_us;
336
337     return next_start_us;
338
339 }
340
341 /*
342  * get_runqueue: Get the runqueue assigned to a virtual core.
343  */
344
345 struct vm_edf_rq * get_runqueue(struct guest_info *info){
346
347     struct vm_edf_rq *runqueue_list = (struct vm_edf_rq *) info->vm_info->sched_priv_data;
348     struct vm_edf_rq *runqueue = &runqueue_list[info->pcpu_id];
349     return runqueue;
350 }
351
352
353 /*
354  * wakeup_core: Wakeup a given vCPU thread
355  */
356
357 static void
358 wakeup_core(struct guest_info *info){
359
360     struct vm_core_edf_sched *core = info->sched_priv_data;
361     struct vm_edf_rq *runqueue = get_runqueue(info);
362     time_us host_time = get_curr_host_time(&core->info->time_state);
363
364     if (!info->core_thread) {
365               PrintError(info->vm_info, info,"ERROR: Tried to wakeup non-existent core thread vCPU_id %d \n",info->vcpu_id);
366     }
367     else {
368
369        V3_Wakeup(info->core_thread);
370        if(core->start_time == 0){
371             core->start_time = host_time;
372             print_parameters(host_time, runqueue, core);
373        }
374        core->last_wakeup_time = host_time;
375        runqueue->curr_vCPU = core;
376
377     }
378
379 }
380
381
382 /*
383  * activate_core - Moves a core to the red-black tree.
384  * used time is set to zero and current deadline is calculated
385  */
386
387 static int
388 activate_core(struct vm_core_edf_sched * core, struct vm_edf_rq *runqueue){
389
390
391     if (is_admissible_core(core, runqueue)){
392
393         uint64_t curr_time_us = get_curr_host_time(&core->info->time_state);
394         uint64_t curr_deadline = next_start_period(curr_time_us, core->period);
395
396         core->current_deadline = curr_deadline;
397         core->used_time=0;
398
399         bool ins = insert_core_edf(core, runqueue);
400         /*
401          * If not inserted is possible that there is other core with the same deadline.
402          * Then, the deadline is modified and try again
403          */
404         while(!ins){
405             core->current_deadline ++;
406             ins = insert_core_edf(core, runqueue);
407         }
408
409         runqueue->cpu_u += 100 * core->slice / core->period;
410         runqueue->nr_vCPU ++;
411
412         if(runqueue->nr_vCPU == 1){
413             runqueue->smallest_period = core->period;
414         }
415
416         else if(core->period < runqueue->smallest_period){
417           runqueue->smallest_period = core->period;
418         }
419
420
421         /*
422          * If this is the first time to be activated pick first earliest deadline core to wakeup.
423          */
424
425          if(!runqueue->curr_vCPU){
426
427             struct vm_core_edf_sched *next_core;
428            // Pick first earliest deadline core
429
430             struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
431             next_core = container_of(node, struct vm_core_edf_sched, node);
432
433             PrintDebug(VM_NONE, VCORE_NONE,"EDF Sched. First time activation, core %d, time %llu \n", next_core->info->vcpu_id, curr_time_us);
434
435             // Wakeup next_core (It is not neccessary to wakeup since in this case this is the first core launched by Palacios
436             // however wakeup_core is called to initialize some needed parameters.
437
438             wakeup_core(next_core->info);
439             next_core->start_time =  get_curr_host_time(&next_core->info->time_state);
440             print_parameters(curr_time_us, runqueue, next_core);
441             runqueue->start_time = next_core->start_time;
442
443
444         }
445
446       }
447       else
448           PrintError(core->info->vm_info, core->info,"EDF Sched. activate_core. CPU cannot activate the core. It is not admissible");
449     return 0;
450
451 }
452
453
454 #define CEIL_DIV(x,y) (((x)/(y)) + !!((x)%(y)))
455 #define MAX(x,y) ((x)>(y) ? (x) : (y))
456
457 /*
458  * edf_sched_core_init: Initializes per core data structure and
459  * calls activate function.
460  */
461
462
463 int
464 edf_sched_core_init(struct guest_info * info){
465
466     struct vm_edf_rq *runqueue = get_runqueue(info);
467     struct vm_core_edf_sched *core_edf;
468     struct v3_time *vm_ts = &(info->vm_info->time_state);
469     uint32_t tdf = vm_ts->td_denom;
470     uint_t cpu_khz = V3_CPU_KHZ();
471
472     PrintDebug(info->vm_info, info,"EDF Sched. Initializing vcore %d, tdf %d\n", info->vcpu_id,tdf);
473
474     core_edf = (struct vm_core_edf_sched *) V3_Malloc(sizeof (struct vm_core_edf_sched));
475     if (!core_edf) {
476         PrintError(info->vm_info, info,"Cannot allocate private_data in edf_sched_core_init\n");
477         return -1;
478     }
479     info->sched_priv_data = core_edf;
480
481     // Default configuration if not specified in configuration file
482
483     core_edf->info = info;
484     core_edf->period = MIN_PERIOD;
485     core_edf->slice = MIN_SLICE;
486     core_edf->used_time = 0;
487     core_edf->last_wakeup_time = 0;
488     core_edf->remaining_time = 0;
489     core_edf->deadline = 0;
490     core_edf->miss_deadline = 0;
491     core_edf->extra_time = true;
492     core_edf->total_time = 0;
493     core_edf->slice_overuse = 0;
494     core_edf->extra_time_given = 0;
495     core_edf->expected_time = 0;
496     core_edf->start_time = 0;
497     core_edf->deadline_interval = 0;
498     core_edf->deadline_percentage_interval = 0;
499     core_edf->miss_deadline_interval = 0;
500     core_edf->print_deadline_interval = 0;
501
502
503     v3_cfg_tree_t * cfg_tree = core_edf->info->vm_info->cfg_data->cfg;
504     v3_cfg_tree_t * core = v3_cfg_subtree(v3_cfg_subtree(cfg_tree, "cores"), "core");
505
506     while (core){
507         char *id = v3_cfg_val(core, "vcpu_id");
508         char *period = v3_cfg_val(core, "period");
509         char *slice = v3_cfg_val(core, "slice");
510         char *extra_time = v3_cfg_val(core, "extra_time");
511         char *speed = v3_cfg_val(core, "khz");
512         uint_t speed_khz = cpu_khz;
513
514         if (atoi(id) == core_edf->info->vcpu_id){
515
516             if(speed){
517                 speed_khz = atoi(speed);
518             }
519
520             if(slice){
521                 core_edf->slice = atoi(slice);
522                 core_edf->slice = MAX(MIN_SLICE,core_edf->slice);
523             }
524             else{
525                 core_edf->slice = MIN_SLICE;
526             }
527
528             if(period){
529                 core_edf->period = atoi(period);
530                 core_edf->period = MAX(MIN_PERIOD,core_edf->period);
531             }
532             else{
533                 core_edf->period = (core_edf->slice * cpu_khz * tdf)/speed_khz;
534                 // WTF is this floating point doing here?!
535                 // core_edf->period += 0.3*(100*core_edf->slice/core_edf->period); // Give faster vcores a little more bigger periods.
536                 core_edf->period += CEIL_DIV(100*core_edf->slice/core_edf->period,3); // Give faster vcores a little more bigger periods.
537             }
538
539             PrintDebug(info->vm_info,info,"EDF_SCHED. Vcore %d, Pcore %d, cpu_khz %u, Period %llu Speed %d, Utilization %d, tdf %d %llu \n",
540                    core_edf->info->vcpu_id,
541                    core_edf->info->pcpu_id,
542                    cpu_khz,
543                    core_edf->period,
544                    speed_khz,
545                    (int)(100*core_edf->slice/core_edf->period),
546                    tdf,
547                    runqueue->smallest_period);
548
549             if(extra_time){
550                 if (strcasecmp(extra_time, "true") == 0)
551                     core_edf->extra_time = true;
552                 else
553                     core_edf->extra_time = false;
554             }
555             else
556                 core_edf->extra_time = false;
557
558             break;
559         }
560         core = v3_cfg_next_branch(core);
561     }
562
563     return activate_core(core_edf,runqueue);
564
565 }
566
567
568 /*
569  * search_core_edf: Searches a core in the red-black tree by using its current_deadline
570  */
571 static struct vm_core_edf_sched *
572 search_core_edf(time_us current_deadline, struct vm_edf_rq *runqueue){
573
574     struct rb_node *node = runqueue->vCPUs_tree.rb_node;
575
576     while (node) {
577         struct vm_core_edf_sched *core = container_of(node, struct vm_core_edf_sched, node);
578
579         if (current_deadline < core->current_deadline)
580             node = node->rb_left;
581         else if (current_deadline > core->current_deadline)
582             node = node->rb_right;
583         else
584             return core;
585     }
586     return NULL;
587 }
588
589
590 /*
591  * delete_core_edf: Deletes a core from the red black tree, generally when it has
592  * consumed its time slice within the current period.
593  */
594
595 static bool
596 delete_core_edf( struct vm_core_edf_sched *core_edf  , struct vm_edf_rq *runqueue){
597
598     struct vm_core_edf_sched *core = search_core_edf(core_edf->current_deadline, runqueue);
599         if (core){
600
601             v3_rb_erase(&core->node, &runqueue->vCPUs_tree);
602             return true;
603         }
604         else{
605             PrintError(VM_NONE,VCORE_NONE,"EDF Sched. delete_core_edf.Attempted to erase unexisting core");
606             return false;
607         }
608 }
609
610
611 /*
612  * deactivate_core - Removes a core from the red-black tree.
613  */
614
615 static void
616 deactivate_core(struct vm_core_edf_sched * core, struct vm_edf_rq *runqueue){
617
618      if(delete_core_edf(core, runqueue)){
619          runqueue->cpu_u -= 100 * core->slice / core->period;
620          runqueue->nr_vCPU -- ;
621      }
622 }
623
624 /*
625  * adjust_slice - Adjust vcore parameters values
626  */
627
628 static void adjust_slice(struct vm_core_edf_sched *core){
629
630     time_us host_time = get_curr_host_time(&core->info->time_state);
631     time_us used_time =  host_time - core->last_wakeup_time;
632
633     if(core->last_wakeup_time != 0){
634
635         core->used_time += used_time;
636         core->total_time += used_time;
637         if(core->start_time != 0){
638             core->expected_time = ((host_time - core->start_time)/core->period)*core->slice;
639         }
640         if(core->total_time <= core->expected_time){
641             core->remaining_time = core->expected_time - core->total_time;
642         }
643         else{
644             core->remaining_time =0;
645         }
646
647         if (core->used_time > core->slice){
648             core->slice_overuse++;
649         }
650     }
651
652 }
653
654 /*
655  * Check_deadlines - Check virtual cores, and re-insert in the runqueue the ones which deadline is over
656  */
657
658
659 static void check_deadlines(struct vm_edf_rq *runqueue){
660
661     struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
662     struct vm_core_edf_sched *next_core = container_of(node, struct vm_core_edf_sched, node);
663     time_us host_time = get_curr_host_time(&next_core->info->time_state);
664     struct vm_core_edf_sched * deadline_cores[runqueue->nr_vCPU];
665
666
667     int nr_dcores=0;
668     int i=0;
669     memset(deadline_cores, 0, runqueue->nr_vCPU);
670
671     while(node){
672
673         next_core = container_of(node, struct vm_core_edf_sched, node);
674         if(next_core->current_deadline < host_time){
675             next_core->deadline++;
676             next_core->deadline_interval++;
677             deadline_cores[nr_dcores++]= next_core;
678             if(next_core->used_time < next_core->slice){
679                 next_core->miss_deadline++;
680                 next_core->miss_deadline_interval++;
681
682                 /*PrintError(VM_NONE,VCORE_NONE,"Test: Core %d miss_deadlines %d, used time %llu, slice %llu, deadline %llu, host time %llu \n",
683                        next_core->info->vcpu_id,
684                        next_core->miss_deadline,
685                        next_core->used_time,
686                        next_core->slice,
687                        next_core->current_deadline,
688                        host_time);*/
689
690             }
691             else{
692               next_core->extra_time_given += (next_core->used_time - next_core->slice);
693
694               /* PrintError(VM_NONE,VCORE_NONE,"Test: Extra time, core %d, core used time %llu, slice %llu, extra time %llu, Total extra time %llu \n",
695                  next_core->info->vcpu_id,
696                  next_core->used_time,
697                  next_core->slice,
698                  (next_core->used_time - next_core->slice),
699                  next_core->extra_time_given);*/
700
701             }
702
703         }
704         else{
705             break;
706         }
707         node = v3_rb_next(node);
708     }
709
710
711     for(i=0;i<nr_dcores;i++){
712
713         next_core = deadline_cores[nr_dcores-1-i];
714
715         deactivate_core(next_core,runqueue);
716         activate_core(next_core,runqueue);
717
718     }
719     runqueue->check_deadline_time = host_time;
720 }
721
722
723
724 /*
725  * pick_next_core: Returns the next core to be scheduled from the red black tree
726  */
727
728 static struct vm_core_edf_sched *
729 pick_next_core(struct vm_edf_rq *runqueue){
730
731     /*
732      * Pick first earliest deadline core
733      */
734     struct rb_node *node=NULL;
735     struct vm_core_edf_sched *next_core=NULL;
736     time_us core_deadline=0;         // Deadline of the core with more time remaining
737     time_us core_extra_deadline=0;   // Deadline of the core with extra time enable and earliest deadline
738     time_us max_remaining=0;
739     time_us host_time = get_curr_host_time(&runqueue->curr_vCPU->info->time_state);
740
741     if(host_time - runqueue->check_deadline_time >= runqueue->smallest_period ){
742         check_deadlines(runqueue);
743     }
744
745     node = v3_rb_first(&runqueue->vCPUs_tree);
746     next_core = container_of(node, struct vm_core_edf_sched, node);
747
748     if (next_core->used_time < next_core->slice){
749          return next_core;
750     }
751
752     if(next_core->total_time < next_core->expected_time){
753         max_remaining = next_core->remaining_time;
754         core_deadline = next_core->current_deadline;
755     }
756
757     if(next_core->extra_time){
758         core_extra_deadline = next_core->current_deadline;
759     }
760
761     // Pick the next core that has not used its whole slice
762     while(node){
763
764         next_core = container_of(node, struct vm_core_edf_sched, node);
765
766         if(next_core->remaining_time > max_remaining){
767             max_remaining = next_core->extra_time_given;
768             core_deadline = next_core->current_deadline;
769             if(core_extra_deadline !=0 && next_core->extra_time){
770                 core_extra_deadline = next_core->current_deadline;
771             }
772         }
773
774         if(next_core->used_time < next_core->slice){
775           return next_core;
776         }
777
778
779         node = v3_rb_next(node);
780     }
781
782     if(core_extra_deadline !=0){
783         next_core  = search_core_edf(core_extra_deadline, runqueue);
784     }
785
786     else if (core_deadline != 0){
787         next_core  = search_core_edf(core_deadline, runqueue);
788     }
789
790      return NULL;
791 }
792
793
794 /*
795  * run_next_core: Pick next core to be scheduled and wakeup it
796  */
797
798 static void
799 run_next_core(struct guest_info *info, int usec)
800 {
801     struct vm_core_edf_sched *core = info->sched_priv_data;
802     struct vm_core_edf_sched *next_core;
803     struct vm_edf_rq *runqueue = get_runqueue(info);
804     time_us host_time = get_curr_host_time(&info->time_state);
805
806      /* The next core to be scheduled is choosen from the tree (Function pick_next_core).
807      * The selected core is the one with the earliest deadline and with available time
808      * to use within the current period (used_time < slice)
809      */
810     if(!runqueue->yielded){
811         adjust_slice(core);
812     }
813     next_core = pick_next_core(runqueue); // Pick next core to schedule
814
815     if(host_time - core->print_deadline_interval >= DEADLINE_INTERVAL){
816
817         if(core->deadline_interval != 0){
818             core->deadline_percentage_interval = (int)100*core->miss_deadline_interval/core->deadline_interval;
819         }
820
821         PrintDebug(info->vm_info,info,"deadline: %d %llu %llu %d",
822             core->info->vcpu_id,
823             core->deadline_interval,
824             core->miss_deadline_interval,
825             core->deadline_percentage_interval);
826
827         core->print_deadline_interval = host_time;
828         core->deadline_interval=0;
829         core->miss_deadline_interval = 0;
830         core->deadline_percentage_interval = 0;
831     }
832
833     if(!next_core){
834         runqueue->yielded=true;
835         V3_Yield();
836         return;
837     }
838     runqueue->yielded=false;
839
840
841     if(core->deadline != 0)
842         core->deadline_percentage = (int)100*core->miss_deadline/core->deadline;
843
844     if (core != next_core){
845
846         print_parameters(host_time, runqueue,core);
847         wakeup_core(next_core->info);
848         V3_Sleep(usec);
849      }
850
851     else{
852         // Necessary to update last_wakeup_time to adjust slice properly later
853         next_core->last_wakeup_time = get_curr_host_time(&next_core->info->time_state);
854
855         if(host_time - runqueue->print_time >= runqueue->smallest_period ){
856             print_parameters(host_time, runqueue,core);
857         }
858      }
859
860 }
861
862
863 /*
864  * edf_schedule: Scheduling function
865  */
866
867 static void
868 edf_schedule(struct guest_info * info, int usec){
869
870   uint64_t host_time = get_curr_host_time(&info->time_state);
871   struct vm_edf_rq *runqueue = get_runqueue(info);
872
873   /*PrintDebug(info->vm_info,info,"Test PCORE. %d host time: %llu Last sched_time: %llu, difference: %llu\n",
874     info->pcpu_id,  host_time, runqueue->last_sched_time, host_time-runqueue->last_sched_time); */
875
876   /* if( (runqueue->last_sched_time !=0) && ((host_time - runqueue->last_sched_time) > 10000)){
877          PrintDebug(info->vm_info,info,"%d Test PCORE. %d LOW SCHED FREQUENCY  host time: %llu Last sched_time: %llu, difference: %llu\n",
878                  ++runqueue->sched_low,info->pcpu_id,  host_time, runqueue->last_sched_time, host_time-runqueue->last_sched_time);
879                  }*/
880
881   runqueue->last_sched_time = host_time;
882   run_next_core(info, usec);
883     return;
884
885 }
886
887 /*
888  * edf_sched_schedule: Main scheduling function. Called each time that an interruption occurs.
889  *
890  */
891
892 void
893 edf_sched_schedule(struct guest_info * info){
894
895     edf_schedule(info, 0);
896     return;
897 }
898
899 /*
900  * edf_sched_yield: Called when yielding the logical cpu for usec is needed
901  */
902
903 void
904 edf_sched_yield(struct guest_info * info, int usec){
905     edf_schedule(info, usec);
906     return;
907 }
908
909 /*
910  * edf_sched_core_stop. Stops virtual machine. All the virtual cores yield the CPU
911  */
912
913 int
914 edf_sched_core_stop(struct guest_info * info){
915
916     struct vm_edf_rq * runqueue =  get_runqueue(info);
917     struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
918     struct vm_core_edf_sched *curr_core;
919
920     while(node){
921
922         curr_core = container_of(node, struct vm_core_edf_sched, node);
923         wakeup_core(curr_core->info);
924         PrintDebug(VM_NONE,VCORE_NONE,"Waking up core %d, thread (%p)\n",
925                    curr_core->info->vcpu_id,
926                  (struct task_struct *)info->core_thread);
927         V3_Yield();
928         PrintDebug(VM_NONE,VCORE_NONE,"Yielding Thread %p\n",(struct task_struct *)info->core_thread);
929         node = v3_rb_next(node);
930
931     }
932    return 0;
933 }
934
935
936 /*
937  * edf_sched_deinit: Frees edf scheduler data structures
938  */
939
940
941 int
942 edf_sched_deinit(struct v3_vm_info *vm)
943 {
944     PrintDebug(VM_NONE,VCORE_NONE,"Freeing vm\n");
945     void *priv_data = vm->sched_priv_data;
946
947     if (priv_data)
948         V3_Free(priv_data);
949
950     return 0;
951
952 }
953
954 /*
955  * edf_sched_deinit: Frees virtual core data structures
956  */
957
958 int
959 edf_sched_core_deinit(struct guest_info *core)
960 {
961     PrintDebug(VM_NONE,VCORE_NONE,"Freeing core\n");
962     void *priv_data = core->sched_priv_data;
963
964     if (priv_data)
965         V3_Free(priv_data);
966
967     return 0;
968 }
969
970 /*
971  * edf_sched_vm_int. Called when the VM starts
972  */
973
974 int edf_sched_vm_init(struct v3_vm_info *vm){
975     return 0;
976 }
977
978 int edf_sched_admit(struct v3_vm_info *vm){
979
980     /*
981      * Initialize priv_data for the vm:
982      * For EDF this is done here because we need the parameter
983      * avail_core which is set in v3_start_vm before the
984      * v3_scheduler_admit_vm function is called.
985      */
986
987     priv_data_init(vm);
988
989     // TODO Admission
990
991     return 0;
992 }
993
994
995 /*
996  * vm_scheduler_impl. Functions that implement vmm_scheduler interface
997  */
998
999 static struct vm_scheduler_impl edf_sched = {
1000
1001     .name = "edf",
1002     .init = NULL,
1003     .deinit = NULL,
1004     .vm_init = edf_sched_vm_init,
1005     .vm_deinit = edf_sched_deinit,
1006     .core_init = edf_sched_core_init,
1007     .core_stop = edf_sched_core_stop,
1008     .core_deinit = edf_sched_core_deinit,
1009     .schedule = edf_sched_schedule,
1010     .yield = edf_sched_yield,
1011     .admit = edf_sched_admit,
1012     .remap = NULL,
1013     .dvfs=NULL
1014 };
1015
1016
1017 /*
1018  * ext_sched_edf_init. Creates an register the EDF scheduler.
1019  */
1020
1021 static int
1022 ext_sched_edf_init() {
1023     PrintDebug(VM_NONE, VCORE_NONE,"Sched. Creating (%s) scheduler\n",edf_sched.name);
1024     return v3_register_scheduler(&edf_sched);
1025 }
1026
1027 /*
1028  * ext_sched_edf_vm_int. Called when the VM starts
1029  */
1030
1031 static int
1032 ext_sched_edf_vm_init() {
1033     return 0;
1034 }
1035
1036
1037 /*
1038  * v3_extension_impl. EDF extension functions
1039  */
1040
1041
1042 static struct v3_extension_impl sched_edf_impl = {
1043         .name = "EDF Scheduler",
1044         .init = ext_sched_edf_init,
1045         .vm_init = ext_sched_edf_vm_init,
1046         .vm_deinit = NULL,
1047         .core_init = NULL,
1048         .core_deinit = NULL,
1049         .on_entry = NULL,
1050         .on_exit = NULL
1051 };
1052
1053 register_extension(&sched_edf_impl);