Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


0739575c839741976e6e11b3e633e5522337b506
[palacios.git] / palacios / src / palacios / vmm_scheduler.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu>
11  * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Oscar Mondragon <omondrag@cs.unm.edu>
15  *         Patrick G. Bridges <bridges@cs.unm.edu>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #include <palacios/vmm.h>
22 #include <palacios/vm_guest.h>
23 #include <palacios/vmm_scheduler.h>
24 #include <palacios/vmm_hashtable.h>
25
26 #ifndef V3_CONFIG_DEBUG_SCHEDULER
27 #undef PrintDebug
28 #define PrintDebug(fmt, args...)
29 #endif
30
31 static char default_strategy[] = "host";
32 static struct hashtable * master_scheduler_table = NULL;
33 static int create_host_scheduler();
34
35 static struct vm_scheduler_impl *scheduler = NULL;
36
37 static uint_t scheduler_hash_fn(addr_t key) {
38     char * name = (char *)key;
39     return v3_hash_buffer((uint8_t *)name, strlen(name));
40 }
41
42 static int scheduler_eq_fn(addr_t key1, addr_t key2) {
43     char * name1 = (char *)key1;
44     char * name2 = (char *)key2;
45
46     return (strcmp(name1, name2) == 0);
47 }
48
49 int V3_init_scheduling() {
50    
51      PrintDebug(VM_NONE, VCORE_NONE,"Initializing scheduler");
52
53     master_scheduler_table = v3_create_htable(0, scheduler_hash_fn, scheduler_eq_fn);
54     return create_host_scheduler();
55 }
56
57
58 int v3_register_scheduler(struct vm_scheduler_impl *s) {
59
60     PrintDebug(VM_NONE, VCORE_NONE,"Registering Scheduler (%s)\n", s->name);
61
62     if (v3_htable_search(master_scheduler_table, (addr_t)(s->name))) {
63         PrintError(VM_NONE, VCORE_NONE, "Multiple instances of scheduler (%s)\n", s->name);
64         return -1;
65     }
66     PrintDebug(VM_NONE, VCORE_NONE,"Registering Scheduler (%s) 2\n", s->name);
67
68   
69     if (v3_htable_insert(master_scheduler_table,
70                          (addr_t)(s->name),
71                          (addr_t)(s)) == 0) {
72         PrintError(VM_NONE, VCORE_NONE, "Could not register scheduler (%s)\n", s->name);
73         return -1;
74     }
75
76     PrintDebug(VM_NONE, VCORE_NONE,"Scheduler registered\n");
77     return 0;
78 }
79
80 struct vm_scheduler_impl *v3_scheduler_lookup(char *name)
81 {
82     return (struct vm_scheduler_impl *)v3_htable_search(master_scheduler_table, (addr_t)(name));
83 }
84
85 int V3_enable_scheduler() {
86     /* XXX Lookup the specified scheduler to use for palacios and use it */
87     scheduler = v3_scheduler_lookup(default_strategy);
88     if (!scheduler) {
89         PrintError(VM_NONE, VCORE_NONE,"Specified Palacios scheduler \"%s\" not found.\n", default_strategy);
90         return -1;
91     }
92     if (scheduler->init) {
93         return scheduler->init();
94     } else {
95         return 0;
96     }
97 }
98
99 int v3_scheduler_register_vm(struct v3_vm_info *vm) {
100     if (scheduler->vm_init) {
101         return scheduler->vm_init(vm);
102     } else {
103         return 0;
104     }
105 }
106 int v3_scheduler_register_core(struct guest_info *core) {
107     if (scheduler->core_init) {
108         return scheduler->core_init(core);
109     } else {
110         return 0;
111     }
112 }
113 int v3_scheduler_admit_vm(struct v3_vm_info *vm) {
114     if (scheduler->admit) {
115         return scheduler->admit(vm);
116     } else {
117         return 0;
118     }
119 }
120 int v3_scheduler_notify_remap(struct v3_vm_info *vm) {
121     if (scheduler->remap) {
122         return scheduler->remap(vm);
123     } else {
124         return 0;
125     }
126 }
127 int v3_scheduler_notify_dvfs(struct v3_vm_info *vm) {
128     if (scheduler->dvfs) {
129         return scheduler->dvfs(vm);
130     } else {
131         return 0;
132     }
133 }
134 void v3_schedule(struct guest_info *core) {
135     if (scheduler->schedule) {
136         scheduler->schedule(core);
137     }
138     return;
139 }
140 void v3_yield(struct guest_info *core, int usec) {
141     if (scheduler->yield) {
142         scheduler->yield(core, usec);
143     } 
144     return;
145 }
146
147 int host_sched_vm_init(struct v3_vm_info *vm)
148 {
149
150     PrintDebug(vm, VCORE_NONE,"Sched. host_sched_init\n"); 
151
152     char * schedule_hz_str = v3_cfg_val(vm->cfg_data->cfg, "schedule_hz");
153     uint32_t sched_hz = 100;    
154
155
156     if (schedule_hz_str) {
157         sched_hz = atoi(schedule_hz_str);
158     }
159
160     PrintDebug(vm, VCORE_NONE,"CPU_KHZ = %d, schedule_freq=%p\n", V3_CPU_KHZ(), 
161                (void *)(addr_t)sched_hz);
162
163     uint64_t yield_cycle_period = (V3_CPU_KHZ() * 1000) / sched_hz;
164     vm->sched_priv_data = (void *)yield_cycle_period; 
165
166     return 0;
167 }
168
169 int host_sched_core_init(struct guest_info *core)
170 {
171     PrintDebug(core->vm_info, core,"Sched. host_sched_core_init\n"); 
172
173     uint64_t t = v3_get_host_time(&core->time_state); 
174     core->sched_priv_data = (void *)t;
175
176     return 0;
177 }
178
179 void host_sched_schedule(struct guest_info *core)
180 {
181     uint64_t cur_cycle;
182     cur_cycle = v3_get_host_time(&core->time_state);
183
184     if (cur_cycle > ( (uint64_t)core->sched_priv_data + (uint64_t)core->vm_info->sched_priv_data)) {
185         
186         V3_Yield();
187       
188         uint64_t yield_start_cycle = (uint64_t) core->sched_priv_data;
189         yield_start_cycle +=  (uint64_t)core->vm_info->sched_priv_data;
190         core->sched_priv_data = (void *)yield_start_cycle;
191       
192     }
193 }
194
195 /* 
196  * unconditional cpu yield 
197  * if the yielding thread is a guest context, the guest quantum is reset on resumption 
198  * Non guest context threads should call this function with a NULL argument
199  *
200  * usec <0  => the non-timed yield is used
201  * usec >=0 => the timed yield is used, which also usually implies interruptible
202  */
203 void host_sched_yield(struct guest_info * core, int usec) {
204     uint64_t yield_start_cycle;
205     if (usec < 0) {
206         V3_Yield();
207     } else {
208         V3_Sleep(usec);
209     }
210     yield_start_cycle = (uint64_t) core->sched_priv_data
211                         + (uint64_t)core->vm_info->sched_priv_data;
212     core->sched_priv_data = (void *)yield_start_cycle;
213 }
214
215
216 int host_sched_admit(struct v3_vm_info *vm){
217     return 0;
218 }
219
220 static struct vm_scheduler_impl host_sched_impl = {
221     .name = "host",
222     .init = NULL,
223     .deinit = NULL,
224     .vm_init = host_sched_vm_init,
225     .vm_deinit = NULL,
226     .core_init = host_sched_core_init,
227     .core_deinit = NULL,
228     .schedule = host_sched_schedule,
229     .yield = host_sched_yield,
230     .admit = host_sched_admit,
231     .remap = NULL,
232     .dvfs=NULL
233 };
234
235 static int create_host_scheduler()
236 {
237         v3_register_scheduler(&host_sched_impl);
238         return 0;
239 }