Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Cleanup based on cppcheck pass (Devices and Extensions)
[palacios.git] / palacios / src / extensions / ext_sched_edf.c
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National
4  * Science Foundation and the Department of Energy.
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu>
11  * Copyright (c) 2013, Patrick G. Bridges <bridges@cs.unm.edu>
12  * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org>
13  * All rights reserved.
14  *
15  * Author: Oscar Mondragon <omondrag@cs.unm.edu>
16  *         Patrick G. Bridges <bridges@cs.unm.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22
23 #include <palacios/vmm.h>
24 #include <palacios/vmm_time.h>
25 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_hashtable.h>
27 #include <palacios/vmm_config.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_rbtree.h>
30
31
32 #ifndef V3_CONFIG_DEBUG_EXT_SCHED_EDF
33 #undef PrintDebug
34 #define PrintDebug(fmt, args...)
35 #endif
36
37 /* Overview
38  *
39  * EDF Scheduling
40  *
41  * The EDF scheduler uses a dynamic calculated priority as scheduling criteria to choose
42  * what thread will be scheduled.That priority is calculated according with the relative
43  * deadline of the threads that are ready to run in the runqueue. This runqueue is a per-logical
44  * core data structure used to keep the runnable virtual cores (threads) allocated to that
45  * logical core.The threads with less time before its deadline will receive better priorities.
46  * The runqueue is sorted each time that a vCPU becomes runnable or when its period is over.
47  * At that time the vCPU is enqueue. A new scheduling decision is taken is time that an interruption
48  * is received. At that time if the earliest deadline core is different that the current core,
49  * the current core goes to sleep and the earliest deadline core is wake up. Each time a vCPU is scheduled,
50  * the parameter "used_time" is set to zero and the current deadline is calculated using its period.
51  * One vCPU uses at least slice seconds of CPU.Some extra time can be allocated to that virtual core after
52  * all the other virtual cores consumes their slices.
53  */
54
55 // Default configuration values for the EDF Scheduler
56 // time parameters in microseconds
57
58 #define MAX_PERIOD 1000000000
59 #define MIN_PERIOD 50000
60 #define MAX_SLICE 1000000000
61 #define MIN_SLICE 50000
62 #define CPU_PERCENT 80
63 #define DEADLINE_INTERVAL 30000000 // Period in which the missed deadline ratio is checked
64
65 typedef uint64_t time_us;
66
67 /*
68  * Per-core EDF Scheduling information
69  */
70
71 struct vm_core_edf_sched {
72     struct guest_info *info;              // Core data struct
73     struct rb_node node;                  // red-black tree node
74     time_us period;                       // Amount of time (us) during which the virtual core may received a CPU allocation
75     time_us slice;                        // Minimum amount of time (us) received for the virtual core during each period
76     time_us current_deadline;             // Time (us) at which current virtual core period ends
77     time_us used_time;                    // Amount of time (us) used whiting the current period
78     time_us last_wakeup_time;             // Time at which the last wakeup started for this virtual core
79     time_us remaining_time;               // Remaining time (us) to reach the expected time allocated to the virtual core
80     bool extra_time;                      // Specifies if the virtual core is eligible to receive extra CPU time (For now it is true always)
81     uint64_t deadline;                    // Total number of deadlines
82     int deadline_percentage;              // Percentage of total missed deadlines
83     uint64_t miss_deadline;               // Number of times the core has missed deadlines
84     uint64_t deadline_interval;           // Total number of deadlines in the latest interval
85     int deadline_percentage_interval;     // Percentage of missed deadlines in the latest interval
86     uint64_t miss_deadline_interval;      // Number of times the core has missed deadlines in the last interval
87     time_us print_deadline_interval;      // Last time missed deadline ratio was printed
88     time_us total_time;                   // Total scheduled time for this virtual core.
89     int slice_overuse;                    // Number of times a core consumes extra time
90     time_us extra_time_given;             // Total extra time given to a virtual core
91     time_us start_time;                   // Time at which this virtual core start to be scheduled
92     time_us expected_time;                // Minimum CPU time expected to be allocated to this virtual core
93
94 };
95
96 /*
97  * Scheduler configuration
98  */
99
100 struct vm_edf_sched_config {
101     time_us min_slice;       // Minimum allowed slice
102     time_us max_slice;       // Maximum allowed slice
103     time_us min_period;      // Minimum allowed period
104     time_us max_period;      // Maximum allowed period
105     int cpu_percent;         // Percentange of CPU utilization for the scheduler in each physical CPU (100 or less)
106
107 };
108
109 /*
110  * Run queue structure. Per-logical core data structure  used to keep the runnable virtual cores (threads) allocated to that logical core
111  * Contains a pointer to the red black tree, the data structure of configuration options, and other info
112  */
113
114 struct vm_edf_rq{
115
116     int cpu_u;                                  // CPU utilization (must be less or equal to the cpu_percent in vm_edf_sched_config)
117     struct rb_root vCPUs_tree;                  // Red-Black Tree
118     struct vm_edf_sched_config edf_config;      // Scheduling configuration structure
119     int nr_vCPU;                                // Number of cores in the runqueue
120     struct vm_core_edf_sched *curr_vCPU;        // Current running CPU
121     time_us last_sched_time;                    // Last time a virtual core in this runqueue was scheduled
122     time_us check_deadline_time;                // Last time in which the deadlines were recalculated
123     time_us smallest_period;                    // Smallest period of the virtual cores in the runqueue, used to control the period of deadlines recalculation
124     time_us print_time;                         // Last time of debugging print
125     time_us start_time;                         // Time at which first core started running in this runqueue
126     int sched_low;                              // Incremented when the time between interruptions is large
127     bool yielded;                               // CPU yielded to palacios (No core running)
128 };
129
130 /*
131  * Basic functions for scheduling
132  */
133
134 int v3_init_edf_scheduling();
135
136 /*
137  * Debugging Function
138  */
139
140 static void
141 print_parameters(time_us host_time, struct vm_edf_rq * runqueue, struct vm_core_edf_sched * core){
142
143     time_us elapsed_time = 0;
144     if(runqueue->start_time != 0)
145         elapsed_time = host_time - runqueue->start_time;
146
147     PrintDebug(core->info->vm_info, core->info,"Test: %llu %llu %d %d %llu %llu %llu %d\n",
148              host_time,
149              elapsed_time,
150              core->info->vcpu_id,
151              core->info->pcpu_id,
152              core->total_time,
153              core->expected_time,
154              core->miss_deadline,
155              core->deadline_percentage);
156
157    runqueue->print_time = host_time;
158 }
159
160
161 /*
162  * init_edf_config: Initialize scheduler configuration
163  */
164
165 static void
166 init_edf_config(struct vm_edf_sched_config *edf_config){
167
168     edf_config->min_slice = MIN_SLICE;
169     edf_config->max_slice = MAX_SLICE;
170     edf_config->min_period = MIN_PERIOD;
171     edf_config->max_period = MAX_PERIOD;
172     edf_config->cpu_percent = CPU_PERCENT;
173 }
174
175
176 /*
177  * priv_data_init: Initialize the run queue
178  */
179
180 int
181 priv_data_init(struct v3_vm_info *vm){
182
183     PrintDebug(vm, VCORE_NONE,"EDF Sched. Initializing EDF Scheduling \n");
184
185     vm->sched_priv_data = V3_Malloc( vm->avail_cores * sizeof(struct vm_edf_rq));
186
187     if (!vm->sched_priv_data) {
188         PrintError(vm, VCORE_NONE,"Cannot allocate in priv_data in priv_data_init\n");
189         return -1;
190     }
191
192     int lcore = 0;
193
194     PrintDebug(vm, VCORE_NONE,"EDF Sched. priv_data_init. Available cores %d\n", vm->avail_cores);
195
196     for(lcore = 0; lcore < vm->avail_cores ; lcore++){
197
198         PrintDebug(vm, VCORE_NONE,"EDF Sched. priv_data_init. Initializing logical core %d\n", lcore);
199
200         struct vm_edf_rq * edf_rq_list =   (struct vm_edf_rq *)vm->sched_priv_data;
201         struct vm_edf_rq * edf_rq = &edf_rq_list[lcore];
202
203         edf_rq->vCPUs_tree = RB_ROOT;
204         edf_rq->cpu_u=0;
205         edf_rq->nr_vCPU=0;
206         edf_rq->curr_vCPU=NULL;
207         edf_rq->last_sched_time=0;
208         edf_rq->check_deadline_time=0;
209         edf_rq->sched_low=0;
210         edf_rq->smallest_period=0;
211         edf_rq->start_time = 0;
212         edf_rq->yielded=false;
213         init_edf_config(&edf_rq->edf_config);
214
215     }
216
217    return 0;
218
219 }
220
221 /*
222  * is_admissible_core: Decides if a core is admited to the red black tree according with
223  * the admisibility formula.
224  */
225
226 static bool
227 is_admissible_core(struct vm_core_edf_sched * new_sched_core, struct vm_edf_rq *runqueue){
228
229     int curr_utilization = runqueue->cpu_u;
230     int new_utilization = curr_utilization + (100 * new_sched_core->slice / new_sched_core->period);
231     int cpu_percent = (runqueue->edf_config).cpu_percent;
232
233     if (new_utilization <= cpu_percent)
234         return true;
235     else
236         return false;
237
238 return true;
239 }
240
241
242
243 /*
244  * insert_core_edf: Finds a place in the tree for a newly activated core, adds the node
245  * and rebalaces the tree
246  */
247
248 static bool
249 insert_core_edf(struct vm_core_edf_sched *core, struct vm_edf_rq *runqueue){
250
251     struct rb_node **new_core = &(runqueue->vCPUs_tree.rb_node);
252     struct rb_node *parent = NULL;
253     struct vm_core_edf_sched *curr_core;
254
255     // Find out place in the tree for the new core
256     while (*new_core) {
257
258         curr_core = container_of(*new_core, struct vm_core_edf_sched, node);
259         parent = *new_core;
260
261         if (core->current_deadline < curr_core->current_deadline)
262             new_core = &((*new_core)->rb_left);
263         else if (core->current_deadline > curr_core->current_deadline)
264             new_core = &((*new_core)->rb_right);
265         else // Is Possible to have same current deadlines in both cores!
266             return false;
267     }
268     // Add new node and rebalance tree.
269     rb_link_node(&core->node, parent, new_core);
270     v3_rb_insert_color(&core->node, &runqueue->vCPUs_tree);
271
272     return true;
273  }
274
275
276 /*
277  * get_curr_host_time: Calculates the current host time (microseconds)
278  */
279
280 static uint64_t
281 get_curr_host_time(struct vm_core_time *core_time){
282
283     uint64_t cur_cycle = v3_get_host_time(core_time);
284     uint64_t cpu_khz = core_time->host_cpu_freq;
285     uint64_t curr_time_us = 1000 * cur_cycle / cpu_khz;
286
287     return curr_time_us;
288
289 }
290
291
292 /*
293  * count_cores: Function useful to count the number of cores in a runqueue (Not used for now)
294  *
295  */
296
297 /*
298 static int count_cores(struct vm_edf_rq *runqueue){
299
300   struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
301   struct vm_core_edf_sched *curr_core;
302   int number_cores = 0;
303
304     while(node){
305
306         number_cores++;
307         curr_core = container_of(node, struct vm_core_edf_sched, node);
308         time_us host_time =  get_curr_host_time(&curr_core->info->time_state);
309         PrintDebug(VM_NONE,VCORE_NONE,"Count %d. core %d, used time %llu, deadline %llu, host time  %llu extra_time %llu\n",
310                    number_cores,
311                    curr_core->info->vcpu_id,
312                    curr_core->used_time,
313                    curr_core->current_deadline,
314                    host_time,
315                    curr_core->extra_time_given);
316         node = v3_rb_next(node);
317
318     }
319
320    return number_cores;
321 }
322 */
323
324 /*
325  * next_start_period: Given the current host time and the period of a given vCPU,
326  * calculates the time in which its next period starts.
327  *
328  */
329
330 static uint64_t
331 next_start_period(uint64_t curr_time_us, uint64_t period_us){
332
333     uint64_t time_period_us = curr_time_us % period_us;
334     uint64_t remaining_time_us = period_us - time_period_us;
335     uint64_t next_start_us = curr_time_us + remaining_time_us;
336
337     return next_start_us;
338
339 }
340
341 /*
342  * get_runqueue: Get the runqueue assigned to a virtual core.
343  */
344
345 struct vm_edf_rq * get_runqueue(struct guest_info *info){
346
347     struct vm_edf_rq *runqueue_list = (struct vm_edf_rq *) info->vm_info->sched_priv_data;
348     struct vm_edf_rq *runqueue = &runqueue_list[info->pcpu_id];
349     return runqueue;
350 }
351
352
353 /*
354  * wakeup_core: Wakeup a given vCPU thread
355  */
356
357 static void
358 wakeup_core(struct guest_info *info){
359
360     struct vm_core_edf_sched *core = info->sched_priv_data;
361     struct vm_edf_rq *runqueue = get_runqueue(info);
362     time_us host_time = get_curr_host_time(&core->info->time_state);
363
364     if (!info->core_thread) {
365               PrintError(info->vm_info, info,"ERROR: Tried to wakeup non-existent core thread vCPU_id %d \n",info->vcpu_id);
366     }
367     else {
368
369        V3_Wakeup(info->core_thread);
370        if(core->start_time == 0){
371             core->start_time = host_time;
372             print_parameters(host_time, runqueue, core);
373        }
374        core->last_wakeup_time = host_time;
375        runqueue->curr_vCPU = core;
376
377     }
378
379 }
380
381
382 /*
383  * activate_core - Moves a core to the red-black tree.
384  * used time is set to zero and current deadline is calculated
385  */
386
387 static int
388 activate_core(struct vm_core_edf_sched * core, struct vm_edf_rq *runqueue){
389
390
391     if (is_admissible_core(core, runqueue)){
392
393         uint64_t curr_time_us = get_curr_host_time(&core->info->time_state);
394         uint64_t curr_deadline = next_start_period(curr_time_us, core->period);
395
396         core->current_deadline = curr_deadline;
397         core->used_time=0;
398
399         bool ins = insert_core_edf(core, runqueue);
400         /*
401          * If not inserted is possible that there is other core with the same deadline.
402          * Then, the deadline is modified and try again
403          */
404         while(!ins){
405             core->current_deadline ++;
406             ins = insert_core_edf(core, runqueue);
407         }
408
409         runqueue->cpu_u += 100 * core->slice / core->period;
410         runqueue->nr_vCPU ++;
411
412         if(runqueue->nr_vCPU == 1){
413             runqueue->smallest_period = core->period;
414         }
415
416         else if(core->period < runqueue->smallest_period){
417           runqueue->smallest_period = core->period;
418         }
419
420
421         /*
422          * If this is the first time to be activated pick first earliest deadline core to wakeup.
423          */
424
425          if(!runqueue->curr_vCPU){
426
427             struct vm_core_edf_sched *next_core;
428            // Pick first earliest deadline core
429
430             struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
431             next_core = container_of(node, struct vm_core_edf_sched, node);
432
433             PrintDebug(VM_NONE, VCORE_NONE,"EDF Sched. First time activation, core %d, time %llu \n", next_core->info->vcpu_id, curr_time_us);
434
435             // Wakeup next_core (It is not neccessary to wakeup since in this case this is the first core launched by Palacios
436             // however wakeup_core is called to initialize some needed parameters.
437
438             wakeup_core(next_core->info);
439             next_core->start_time =  get_curr_host_time(&next_core->info->time_state);
440             print_parameters(curr_time_us, runqueue, next_core);
441             runqueue->start_time = next_core->start_time;
442
443
444         }
445
446       }
447       else
448           PrintError(core->info->vm_info, core->info,"EDF Sched. activate_core. CPU cannot activate the core. It is not admissible");
449     return 0;
450
451 }
452
453
454 /*
455  * edf_sched_core_init: Initializes per core data structure and
456  * calls activate function.
457  */
458
459
460 int
461 edf_sched_core_init(struct guest_info * info){
462
463     struct vm_edf_rq *runqueue = get_runqueue(info);
464     struct vm_core_edf_sched *core_edf;
465     struct v3_time *vm_ts = &(info->vm_info->time_state);
466     uint32_t tdf = vm_ts->td_denom;
467     uint_t cpu_khz = V3_CPU_KHZ();
468
469     PrintDebug(info->vm_info, info,"EDF Sched. Initializing vcore %d, tdf %d\n", info->vcpu_id,tdf);
470
471     core_edf = (struct vm_core_edf_sched *) V3_Malloc(sizeof (struct vm_core_edf_sched));
472     if (!core_edf) {
473         PrintError(info->vm_info, info,"Cannot allocate private_data in edf_sched_core_init\n");
474         return -1;
475     }
476     info->sched_priv_data = core_edf;
477
478     // Default configuration if not specified in configuration file
479
480     core_edf->info = info;
481     core_edf->period = MIN_PERIOD;
482     core_edf->slice = MIN_SLICE;
483     core_edf->used_time = 0;
484     core_edf->last_wakeup_time = 0;
485     core_edf->remaining_time = 0;
486     core_edf->deadline = 0;
487     core_edf->miss_deadline = 0;
488     core_edf->extra_time = true;
489     core_edf->total_time = 0;
490     core_edf->slice_overuse = 0;
491     core_edf->extra_time_given = 0;
492     core_edf->expected_time = 0;
493     core_edf->start_time = 0;
494     core_edf->deadline_interval = 0;
495     core_edf->deadline_percentage_interval = 0;
496     core_edf->miss_deadline_interval = 0;
497     core_edf->print_deadline_interval = 0;
498
499
500     v3_cfg_tree_t * cfg_tree = core_edf->info->vm_info->cfg_data->cfg;
501     v3_cfg_tree_t * core = v3_cfg_subtree(v3_cfg_subtree(cfg_tree, "cores"), "core");
502
503     while (core){
504         char *id = v3_cfg_val(core, "vcpu_id");
505         char *period = v3_cfg_val(core, "period");
506         char *slice = v3_cfg_val(core, "slice");
507         char *extra_time = v3_cfg_val(core, "extra_time");
508         char *speed = v3_cfg_val(core, "khz");
509         uint_t speed_khz = cpu_khz;
510
511         if (atoi(id) == core_edf->info->vcpu_id){
512
513             if(speed){
514                 speed_khz = atoi(speed);
515             }
516
517             if(slice){
518                 core_edf->slice = atoi(slice);
519             }
520             else{
521                 core_edf->slice = MIN_SLICE;
522             }
523
524             if(period){
525                 core_edf->period = atoi(period);
526             }
527             else{
528                 core_edf->period = (core_edf->slice * cpu_khz * tdf)/speed_khz;
529                 core_edf->period += 0.3*(100*core_edf->slice/core_edf->period); // Give faster vcores a little more bigger periods.
530             }
531
532             PrintDebug(info->vm_info,info,"EDF_SCHED. Vcore %d, Pcore %d, cpu_khz %u, Period %llu Speed %d, Utilization %d, tdf %d %llu \n",
533                    core_edf->info->vcpu_id,
534                    core_edf->info->pcpu_id,
535                    cpu_khz,
536                    core_edf->period,
537                    speed_khz,
538                    (int)(100*core_edf->slice/core_edf->period),
539                    tdf,
540                    runqueue->smallest_period);
541
542             if(extra_time){
543                 if (strcasecmp(extra_time, "true") == 0)
544                     core_edf->extra_time = true;
545                 else
546                     core_edf->extra_time = false;
547             }
548             else
549                 core_edf->extra_time = false;
550
551             break;
552         }
553         core = v3_cfg_next_branch(core);
554     }
555
556     return activate_core(core_edf,runqueue);
557
558 }
559
560
561 /*
562  * search_core_edf: Searches a core in the red-black tree by using its current_deadline
563  */
564 static struct vm_core_edf_sched *
565 search_core_edf(time_us current_deadline, struct vm_edf_rq *runqueue){
566
567     struct rb_node *node = runqueue->vCPUs_tree.rb_node;
568
569     while (node) {
570         struct vm_core_edf_sched *core = container_of(node, struct vm_core_edf_sched, node);
571
572         if (current_deadline < core->current_deadline)
573             node = node->rb_left;
574         else if (current_deadline > core->current_deadline)
575             node = node->rb_right;
576         else
577             return core;
578     }
579     return NULL;
580 }
581
582
583 /*
584  * delete_core_edf: Deletes a core from the red black tree, generally when it has
585  * consumed its time slice within the current period.
586  */
587
588 static bool
589 delete_core_edf( struct vm_core_edf_sched *core_edf  , struct vm_edf_rq *runqueue){
590
591     struct vm_core_edf_sched *core = search_core_edf(core_edf->current_deadline, runqueue);
592         if (core){
593
594             v3_rb_erase(&core->node, &runqueue->vCPUs_tree);
595             return true;
596         }
597         else{
598             PrintError(VM_NONE,VCORE_NONE,"EDF Sched. delete_core_edf.Attempted to erase unexisting core");
599             return false;
600         }
601 }
602
603
604 /*
605  * deactivate_core - Removes a core from the red-black tree.
606  */
607
608 static void
609 deactivate_core(struct vm_core_edf_sched * core, struct vm_edf_rq *runqueue){
610
611      if(delete_core_edf(core, runqueue)){
612          runqueue->cpu_u -= 100 * core->slice / core->period;
613          runqueue->nr_vCPU -- ;
614      }
615 }
616
617 /*
618  * adjust_slice - Adjust vcore parameters values
619  */
620
621 static void adjust_slice(struct vm_core_edf_sched *core){
622
623     time_us host_time = get_curr_host_time(&core->info->time_state);
624     time_us used_time =  host_time - core->last_wakeup_time;
625
626     if(core->last_wakeup_time != 0){
627
628         core->used_time += used_time;
629         core->total_time += used_time;
630         if(core->start_time != 0){
631             core->expected_time = ((host_time - core->start_time)/core->period)*core->slice;
632         }
633         if(core->total_time <= core->expected_time){
634             core->remaining_time = core->expected_time - core->total_time;
635         }
636         else{
637             core->remaining_time =0;
638         }
639
640         if (core->used_time > core->slice){
641             core->slice_overuse++;
642         }
643     }
644
645 }
646
647 /*
648  * Check_deadlines - Check virtual cores, and re-insert in the runqueue the ones which deadline is over
649  */
650
651
652 static void check_deadlines(struct vm_edf_rq *runqueue){
653
654     struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
655     struct vm_core_edf_sched *next_core = container_of(node, struct vm_core_edf_sched, node);
656     time_us host_time = get_curr_host_time(&next_core->info->time_state);
657     struct vm_core_edf_sched * deadline_cores[runqueue->nr_vCPU];
658
659
660     int nr_dcores=0;
661     int i=0;
662     memset(deadline_cores, 0, runqueue->nr_vCPU);
663
664     while(node){
665
666         next_core = container_of(node, struct vm_core_edf_sched, node);
667         if(next_core->current_deadline < host_time){
668             next_core->deadline++;
669             next_core->deadline_interval++;
670             deadline_cores[nr_dcores++]= next_core;
671             if(next_core->used_time < next_core->slice){
672                 next_core->miss_deadline++;
673                 next_core->miss_deadline_interval++;
674
675                 /*PrintError(VM_NONE,VCORE_NONE,"Test: Core %d miss_deadlines %d, used time %llu, slice %llu, deadline %llu, host time %llu \n",
676                        next_core->info->vcpu_id,
677                        next_core->miss_deadline,
678                        next_core->used_time,
679                        next_core->slice,
680                        next_core->current_deadline,
681                        host_time);*/
682
683             }
684             else{
685               next_core->extra_time_given += (next_core->used_time - next_core->slice);
686
687               /* PrintError(VM_NONE,VCORE_NONE,"Test: Extra time, core %d, core used time %llu, slice %llu, extra time %llu, Total extra time %llu \n",
688                  next_core->info->vcpu_id,
689                  next_core->used_time,
690                  next_core->slice,
691                  (next_core->used_time - next_core->slice),
692                  next_core->extra_time_given);*/
693
694             }
695
696         }
697         else{
698             break;
699         }
700         node = v3_rb_next(node);
701     }
702
703
704     for(i=0;i<nr_dcores;i++){
705
706         next_core = deadline_cores[nr_dcores-1-i];
707
708         deactivate_core(next_core,runqueue);
709         activate_core(next_core,runqueue);
710
711     }
712     runqueue->check_deadline_time = host_time;
713 }
714
715
716
717 /*
718  * pick_next_core: Returns the next core to be scheduled from the red black tree
719  */
720
721 static struct vm_core_edf_sched *
722 pick_next_core(struct vm_edf_rq *runqueue){
723
724     /*
725      * Pick first earliest deadline core
726      */
727     struct rb_node *node=NULL;
728     struct vm_core_edf_sched *next_core=NULL;
729     time_us core_deadline=0;         // Deadline of the core with more time remaining
730     time_us core_extra_deadline=0;   // Deadline of the core with extra time enable and earliest deadline
731     time_us max_remaining=0;
732     time_us host_time = get_curr_host_time(&runqueue->curr_vCPU->info->time_state);
733
734     if(host_time - runqueue->check_deadline_time >= runqueue->smallest_period ){
735         check_deadlines(runqueue);
736     }
737
738     node = v3_rb_first(&runqueue->vCPUs_tree);
739     next_core = container_of(node, struct vm_core_edf_sched, node);
740
741     if (next_core->used_time < next_core->slice){
742          return next_core;
743     }
744
745     if(next_core->total_time < next_core->expected_time){
746         max_remaining = next_core->remaining_time;
747         core_deadline = next_core->current_deadline;
748     }
749
750     if(next_core->extra_time){
751         core_extra_deadline = next_core->current_deadline;
752     }
753
754     // Pick the next core that has not used its whole slice
755     while(node){
756
757         next_core = container_of(node, struct vm_core_edf_sched, node);
758
759         if(next_core->remaining_time > max_remaining){
760             max_remaining = next_core->extra_time_given;
761             core_deadline = next_core->current_deadline;
762             if(core_extra_deadline !=0 && next_core->extra_time){
763                 core_extra_deadline = next_core->current_deadline;
764             }
765         }
766
767         if(next_core->used_time < next_core->slice){
768           return next_core;
769         }
770
771
772         node = v3_rb_next(node);
773     }
774
775     if(core_extra_deadline !=0){
776         next_core  = search_core_edf(core_extra_deadline, runqueue);
777     }
778
779     else if (core_deadline != 0){
780         next_core  = search_core_edf(core_deadline, runqueue);
781     }
782
783      return NULL;
784 }
785
786
787 /*
788  * run_next_core: Pick next core to be scheduled and wakeup it
789  */
790
791 static void
792 run_next_core(struct guest_info *info, int usec)
793 {
794     struct vm_core_edf_sched *core = info->sched_priv_data;
795     struct vm_core_edf_sched *next_core;
796     struct vm_edf_rq *runqueue = get_runqueue(info);
797     time_us host_time = get_curr_host_time(&info->time_state);
798
799      /* The next core to be scheduled is choosen from the tree (Function pick_next_core).
800      * The selected core is the one with the earliest deadline and with available time
801      * to use within the current period (used_time < slice)
802      */
803     if(!runqueue->yielded){
804         adjust_slice(core);
805     }
806     next_core = pick_next_core(runqueue); // Pick next core to schedule
807
808     if(host_time - core->print_deadline_interval >= DEADLINE_INTERVAL){
809
810         if(core->deadline_interval != 0){
811             core->deadline_percentage_interval = (int)100*core->miss_deadline_interval/core->deadline_interval;
812         }
813
814         PrintDebug(info->vm_info,info,"deadline: %d %llu %llu %d",
815             core->info->vcpu_id,
816             core->deadline_interval,
817             core->miss_deadline_interval,
818             core->deadline_percentage_interval);
819
820         core->print_deadline_interval = host_time;
821         core->deadline_interval=0;
822         core->miss_deadline_interval = 0;
823         core->deadline_percentage_interval = 0;
824     }
825
826     if(!next_core){
827         runqueue->yielded=true;
828         V3_Yield();
829         return;
830     }
831     runqueue->yielded=false;
832
833
834     if(core->deadline != 0)
835         core->deadline_percentage = (int)100*core->miss_deadline/core->deadline;
836
837     if (core != next_core){
838
839         print_parameters(host_time, runqueue,core);
840         wakeup_core(next_core->info);
841         V3_Sleep(usec);
842      }
843
844     else{
845         // Necessary to update last_wakeup_time to adjust slice properly later
846         next_core->last_wakeup_time = get_curr_host_time(&next_core->info->time_state);
847
848         if(host_time - runqueue->print_time >= runqueue->smallest_period ){
849             print_parameters(host_time, runqueue,core);
850         }
851      }
852
853 }
854
855
856 /*
857  * edf_schedule: Scheduling function
858  */
859
860 static void
861 edf_schedule(struct guest_info * info, int usec){
862
863   uint64_t host_time = get_curr_host_time(&info->time_state);
864   struct vm_edf_rq *runqueue = get_runqueue(info);
865
866   /*PrintDebug(info->vm_info,info,"Test PCORE. %d host time: %llu Last sched_time: %llu, difference: %llu\n",
867     info->pcpu_id,  host_time, runqueue->last_sched_time, host_time-runqueue->last_sched_time); */
868
869   /* if( (runqueue->last_sched_time !=0) && ((host_time - runqueue->last_sched_time) > 10000)){
870          PrintDebug(info->vm_info,info,"%d Test PCORE. %d LOW SCHED FREQUENCY  host time: %llu Last sched_time: %llu, difference: %llu\n",
871                  ++runqueue->sched_low,info->pcpu_id,  host_time, runqueue->last_sched_time, host_time-runqueue->last_sched_time);
872                  }*/
873
874   runqueue->last_sched_time = host_time;
875   run_next_core(info, usec);
876     return;
877
878 }
879
880 /*
881  * edf_sched_schedule: Main scheduling function. Called each time that an interruption occurs.
882  *
883  */
884
885 void
886 edf_sched_schedule(struct guest_info * info){
887
888     edf_schedule(info, 0);
889     return;
890 }
891
892 /*
893  * edf_sched_yield: Called when yielding the logical cpu for usec is needed
894  */
895
896 void
897 edf_sched_yield(struct guest_info * info, int usec){
898     edf_schedule(info, usec);
899     return;
900 }
901
902 /*
903  * edf_sched_core_stop. Stops virtual machine. All the virtual cores yield the CPU
904  */
905
906 int
907 edf_sched_core_stop(struct guest_info * info){
908
909     struct vm_edf_rq * runqueue =  get_runqueue(info);
910     struct rb_node *node = v3_rb_first(&runqueue->vCPUs_tree);
911     struct vm_core_edf_sched *curr_core;
912
913     while(node){
914
915         curr_core = container_of(node, struct vm_core_edf_sched, node);
916         wakeup_core(curr_core->info);
917         PrintDebug(VM_NONE,VCORE_NONE,"Waking up core %d, thread (%p)\n",
918                    curr_core->info->vcpu_id,
919                  (struct task_struct *)info->core_thread);
920         V3_Yield();
921         PrintDebug(VM_NONE,VCORE_NONE,"Yielding Thread %p\n",(struct task_struct *)info->core_thread);
922         node = v3_rb_next(node);
923
924     }
925    return 0;
926 }
927
928
929 /*
930  * edf_sched_deinit: Frees edf scheduler data structures
931  */
932
933
934 int
935 edf_sched_deinit(struct v3_vm_info *vm)
936 {
937     PrintDebug(VM_NONE,VCORE_NONE,"Freeing vm\n");
938     void *priv_data = vm->sched_priv_data;
939
940     if (priv_data)
941         V3_Free(priv_data);
942
943     return 0;
944
945 }
946
947 /*
948  * edf_sched_deinit: Frees virtual core data structures
949  */
950
951 int
952 edf_sched_core_deinit(struct guest_info *core)
953 {
954     PrintDebug(VM_NONE,VCORE_NONE,"Freeing core\n");
955     void *priv_data = core->sched_priv_data;
956
957     if (priv_data)
958         V3_Free(priv_data);
959
960     return 0;
961 }
962
963 /*
964  * edf_sched_vm_int. Called when the VM starts
965  */
966
967 int edf_sched_vm_init(struct v3_vm_info *vm){
968     return 0;
969 }
970
971 int edf_sched_admit(struct v3_vm_info *vm){
972
973     /*
974      * Initialize priv_data for the vm:
975      * For EDF this is done here because we need the parameter
976      * avail_core which is set in v3_start_vm before the
977      * v3_scheduler_admit_vm function is called.
978      */
979
980     priv_data_init(vm);
981
982     // TODO Admission
983
984     return 0;
985 }
986
987
988 /*
989  * vm_scheduler_impl. Functions that implement vmm_scheduler interface
990  */
991
992 static struct vm_scheduler_impl edf_sched = {
993
994     .name = "edf",
995     .init = NULL,
996     .deinit = NULL,
997     .vm_init = edf_sched_vm_init,
998     .vm_deinit = edf_sched_deinit,
999     .core_init = edf_sched_core_init,
1000     .core_stop = edf_sched_core_stop,
1001     .core_deinit = edf_sched_core_deinit,
1002     .schedule = edf_sched_schedule,
1003     .yield = edf_sched_yield,
1004     .admit = edf_sched_admit,
1005     .remap = NULL,
1006     .dvfs=NULL
1007 };
1008
1009
1010 /*
1011  * ext_sched_edf_init. Creates an register the EDF scheduler.
1012  */
1013
1014 static int
1015 ext_sched_edf_init() {
1016     PrintDebug(VM_NONE, VCORE_NONE,"Sched. Creating (%s) scheduler\n",edf_sched.name);
1017     return v3_register_scheduler(&edf_sched);
1018 }
1019
1020 /*
1021  * ext_sched_edf_vm_int. Called when the VM starts
1022  */
1023
1024 static int
1025 ext_sched_edf_vm_init() {
1026     return 0;
1027 }
1028
1029
1030 /*
1031  * v3_extension_impl. EDF extension functions
1032  */
1033
1034
1035 static struct v3_extension_impl sched_edf_impl = {
1036         .name = "EDF Scheduler",
1037         .init = ext_sched_edf_init,
1038         .vm_init = ext_sched_edf_vm_init,
1039         .vm_deinit = NULL,
1040         .core_init = NULL,
1041         .core_deinit = NULL,
1042         .on_entry = NULL,
1043         .on_exit = NULL
1044 };
1045
1046 register_extension(&sched_edf_impl);