Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


remove extraneous dos linefeeds from scheduling files
[palacios.git] / palacios / src / palacios / vmm_scheduler.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2013, Oscar Mondragon <omondrag@cs.unm.edu> 
11  * Copyright (c) 2013, Patrick G. Bridges <bridges@cs.unm.edu>
12  * Copyright (c) 2013, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Oscar Mondragon <omondrag@cs.unm.edu>
16  *         Patrick G. Bridges <bridges@cs.unm.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm.h>
23 #include <palacios/vm_guest.h>
24 #include <palacios/vmm_scheduler.h>
25 #include <palacios/vmm_hashtable.h>
26
27 #ifndef V3_CONFIG_DEBUG_SCHEDULER
28 #undef PrintDebug
29 #define PrintDebug(fmt, args...)
30 #endif
31
32 static char default_strategy[] = "host";
33 static struct hashtable * master_scheduler_table = NULL;
34 static int create_host_scheduler();
35
36 static struct vm_scheduler_impl *scheduler = NULL;
37
38 static uint_t scheduler_hash_fn(addr_t key) {
39     char * name = (char *)key;
40     return v3_hash_buffer((uint8_t *)name, strlen(name));
41 }
42
43 static int scheduler_eq_fn(addr_t key1, addr_t key2) {
44     char * name1 = (char *)key1;
45     char * name2 = (char *)key2;
46
47     return (strcmp(name1, name2) == 0);
48 }
49
50 int V3_init_scheduling() {
51    
52      PrintDebug(VM_NONE, VCORE_NONE,"Initializing scheduler");
53
54     master_scheduler_table = v3_create_htable(0, scheduler_hash_fn, scheduler_eq_fn);
55     return create_host_scheduler();
56 }
57
58
59 int v3_register_scheduler(struct vm_scheduler_impl *s) {
60
61     PrintDebug(VM_NONE, VCORE_NONE,"Registering Scheduler (%s)\n", s->name);
62
63     if (v3_htable_search(master_scheduler_table, (addr_t)(s->name))) {
64         PrintError(VM_NONE, VCORE_NONE, "Multiple instances of scheduler (%s)\n", s->name);
65         return -1;
66     }
67   
68     if (v3_htable_insert(master_scheduler_table,
69                          (addr_t)(s->name),
70                          (addr_t)(s)) == 0) {
71         PrintError(VM_NONE, VCORE_NONE, "Could not register scheduler (%s)\n", s->name);
72         return -1;
73     }
74
75     return 0;
76 }
77
78 struct vm_scheduler_impl *v3_scheduler_lookup(char *name)
79 {
80     return (struct vm_scheduler_impl *)v3_htable_search(master_scheduler_table, (addr_t)(name));
81 }
82
83 int V3_enable_scheduler() {
84     /* XXX Lookup the specified scheduler to use for palacios and use it */
85     
86     scheduler = v3_scheduler_lookup(default_strategy);
87     PrintDebug(VM_NONE, VCORE_NONE,"Sched. Scheduler %s found",scheduler->name);
88     
89     if (!scheduler) {
90         PrintError(VM_NONE, VCORE_NONE,"Specified Palacios scheduler \"%s\" not found.\n", default_strategy);
91         return -1;
92     }
93     if (scheduler->init) {
94         return scheduler->init();
95     } else {
96         return 0;
97     }
98 }
99
100 int v3_scheduler_register_vm(struct v3_vm_info *vm) {
101     if (scheduler->vm_init) {
102         return scheduler->vm_init(vm);
103     } else {
104         return 0;
105     }
106 }
107 int v3_scheduler_register_core(struct guest_info *core) {
108     if (scheduler->core_init) {
109         return scheduler->core_init(core);
110     } else {
111         return 0;
112     }
113 }
114 int v3_scheduler_admit_vm(struct v3_vm_info *vm) {
115     if (scheduler->admit) {
116         return scheduler->admit(vm);
117     } else {
118         return 0;
119     }
120 }
121 int v3_scheduler_notify_remap(struct v3_vm_info *vm) {
122     if (scheduler->remap) {
123         return scheduler->remap(vm);
124     } else {
125         return 0;
126     }
127 }
128 int v3_scheduler_notify_dvfs(struct v3_vm_info *vm) {
129     if (scheduler->dvfs) {
130         return scheduler->dvfs(vm);
131     } else {
132         return 0;
133     }
134 }
135 void v3_schedule(struct guest_info *core) {
136     if (scheduler->schedule) {
137         scheduler->schedule(core);
138     }
139     return;
140 }
141 void v3_yield(struct guest_info *core, int usec) {
142     if (scheduler->yield) {
143         scheduler->yield(core, usec);
144     } 
145     return;
146 }
147
148 int host_sched_vm_init(struct v3_vm_info *vm)
149 {
150
151     PrintDebug(vm, VCORE_NONE,"Sched. host_sched_init\n"); 
152
153     char * schedule_hz_str = v3_cfg_val(vm->cfg_data->cfg, "schedule_hz");
154     uint32_t sched_hz = 100;    
155
156
157     if (schedule_hz_str) {
158         sched_hz = atoi(schedule_hz_str);
159     }
160
161     PrintDebug(vm, VCORE_NONE,"CPU_KHZ = %d, schedule_freq=%p\n", V3_CPU_KHZ(), 
162                (void *)(addr_t)sched_hz);
163
164     uint64_t yield_cycle_period = (V3_CPU_KHZ() * 1000) / sched_hz;
165     vm->sched_priv_data = (void *)yield_cycle_period; 
166
167     return 0;
168 }
169
170 int host_sched_core_init(struct guest_info *core)
171 {
172     PrintDebug(core->vm_info, core,"Sched. host_sched_core_init\n"); 
173
174     uint64_t t = v3_get_host_time(&core->time_state); 
175     core->sched_priv_data = (void *)t;
176
177     return 0;
178 }
179
180 void host_sched_schedule(struct guest_info *core)
181 {
182     uint64_t cur_cycle;
183     cur_cycle = v3_get_host_time(&core->time_state);
184
185     if (cur_cycle > ( (uint64_t)core->sched_priv_data + (uint64_t)core->vm_info->sched_priv_data)) {
186         
187         V3_Yield();
188       
189         uint64_t yield_start_cycle = (uint64_t) core->sched_priv_data;
190         yield_start_cycle +=  (uint64_t)core->vm_info->sched_priv_data;
191         core->sched_priv_data = (void *)yield_start_cycle;
192       
193     }
194 }
195
196 /* 
197  * unconditional cpu yield 
198  * if the yielding thread is a guest context, the guest quantum is reset on resumption 
199  * Non guest context threads should call this function with a NULL argument
200  *
201  * usec <0  => the non-timed yield is used
202  * usec >=0 => the timed yield is used, which also usually implies interruptible
203  */
204 void host_sched_yield(struct guest_info * core, int usec) {
205     uint64_t yield_start_cycle;
206     if (usec < 0) {
207         V3_Yield();
208     } else {
209         V3_Sleep(usec);
210     }
211     yield_start_cycle = (uint64_t) core->sched_priv_data
212                         + (uint64_t)core->vm_info->sched_priv_data;
213     core->sched_priv_data = (void *)yield_start_cycle;
214 }
215
216
217 int host_sched_admit(struct v3_vm_info *vm){
218     return 0;
219 }
220
221 static struct vm_scheduler_impl host_sched_impl = {
222     .name = "host",
223     .init = NULL,
224     .deinit = NULL,
225     .vm_init = host_sched_vm_init,
226     .vm_deinit = NULL,
227     .core_init = host_sched_core_init,
228     .core_deinit = NULL,
229     .schedule = host_sched_schedule,
230     .yield = host_sched_yield,
231     .admit = host_sched_admit,
232     .remap = NULL,
233     .dvfs=NULL
234 };
235
236 static int create_host_scheduler()
237 {
238         v3_register_scheduler(&host_sched_impl);
239         return 0;
240 }