2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
15 * Patrick G. Bridges <bridges@cs.unm.edu>
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_time.h>
23 #include <palacios/vm_guest.h>
25 #ifndef V3_CONFIG_DEBUG_TIME
27 #define PrintDebug(fmt, args...)
32 * Time handling in VMMs is challenging, and Palacios uses the highest
33 * resolution, lowest overhead timer on modern CPUs that it can - the
34 * processor timestamp counter (TSC). Note that on somewhat old processors
35 * this can be problematic; in particular, older AMD processors did not
36 * have a constant rate timestamp counter in the face of power management
37 * events. However, the latest Intel and AMD CPUs all do (should...) have a
38 * constant rate TSC, and Palacios relies on this fact.
40 * Basically, Palacios keeps track of three quantities as it runs to manage
41 * the passage of time:
42 * (1) The host timestamp counter - read directly from HW and never written
43 * (2) A monotonic guest timestamp counter used to measure the progression of
44 * time in the guest. This is computed using an offsets from (1) above.
45 * (3) The actual guest timestamp counter (which can be written by
46 * writing to the guest TSC MSR - MSR 0x10) from the monotonic guest TSC.
47 * This is also computed as an offset from (2) above when the TSC and
48 * this offset is updated when the TSC MSR is written.
50 * The value used to offset the guest TSC from the host TSC is the *sum* of all
51 * of these offsets (2 and 3) above
53 * Because all other devices are slaved off of the passage of time in the guest,
54 * it is (2) above that drives the firing of other timers in the guest,
55 * including timer devices such as the Programmable Interrupt Timer (PIT).
58 * (1) Add support for temporarily skewing guest time off of where it should
59 * be to support slack simulation of guests. The idea is that simulators
60 * set this skew to be the difference between how much time passed for a
61 * simulated feature and a real implementation of that feature, making time
62 * pass at a different rate from real time on this core. The VMM will then
63 * attempt to move this skew back towards 0 subject to resolution/accuracy
64 * constraints from various system timers.
66 * The main effort in doing this will be to get accuracy/resolution
67 * information from each local timer and to use this to bound how much skew
68 * is removed on each exit.
70 * (2) Look more into sychronizing the offsets *across* virtual and physical
71 * cores so that multicore guests stay mostly in sync.
73 * (3) Look into using the AMD TSC multiplier feature and adding explicit time
74 * dilation support to time handling.
78 static int handle_cpufreq_hcall(struct guest_info * info, uint_t hcall_id, void * priv_data) {
79 struct vm_time * time_state = &(info->time_state);
81 info->vm_regs.rbx = time_state->guest_cpu_freq;
83 PrintDebug("Guest request cpu frequency: return %ld\n", (long)info->vm_regs.rbx);
90 int v3_start_time(struct guest_info * info) {
91 /* We start running with guest_time == host_time */
92 uint64_t t = v3_get_host_time(&info->time_state);
94 PrintDebug("Starting initial guest time as %llu\n", t);
96 info->time_state.enter_time = 0;
97 info->time_state.exit_time = t;
98 info->time_state.last_update = t;
99 info->time_state.initial_time = t;
100 info->yield_start_cycle = t;
105 int v3_offset_time( struct guest_info * info, sint64_t offset )
107 struct vm_time * time_state = &(info->time_state);
108 PrintDebug("Adding additional offset of %lld to guest time.\n", offset);
109 time_state->guest_host_offset += offset;
113 #ifdef V3_CONFIG_TIME_DILATION
114 static uint64_t compute_target_host_time(struct guest_info * info, uint64_t guest_time)
116 struct vm_time * time_state = &(info->time_state);
117 uint64_t guest_elapsed, desired_elapsed;
119 guest_elapsed = (guest_time - time_state->initial_time);
120 desired_elapsed = (guest_elapsed * time_state->host_cpu_freq) / time_state->guest_cpu_freq;
121 return time_state->initial_time + desired_elapsed;
124 static uint64_t compute_target_guest_time(struct guest_info *info)
126 struct vm_time * time_state = &(info->time_state);
127 uint64_t host_elapsed, desired_elapsed;
129 host_elapsed = v3_get_host_time(time_state) - time_state->initial_time;
130 desired_elapsed = (host_elapsed * time_state->guest_cpu_freq) / time_state->host_cpu_freq;
132 return time_state->initial_time + desired_elapsed;
136 /* Yield time in the host to deal with a guest that wants to run slower than
137 * the native host cycle frequency */
138 static int yield_host_time(struct guest_info * info) {
139 struct vm_time * time_state = &(info->time_state);
140 uint64_t host_time, target_host_time;
141 uint64_t guest_time, old_guest_time;
143 /* Now, let the host run while the guest is stopped to make the two
144 * sync up. Note that this doesn't assume that guest time is stopped;
145 * the offsetting in the next step will change add an offset to guest
146 * time to account for the time paused even if the geust isn't
147 * usually paused in the VMM. */
148 host_time = v3_get_host_time(time_state);
149 old_guest_time = v3_compute_guest_time(time_state, host_time);
150 target_host_time = compute_target_host_time(info, old_guest_time);
152 while (target_host_time > host_time) {
154 host_time = v3_get_host_time(time_state);
157 guest_time = v3_compute_guest_time(time_state, host_time);
159 /* We do *not* assume the guest timer was paused in the VM. If it was
160 * this offseting is 0. If it wasn't, we need this. */
161 v3_offset_time(info, (sint64_t)(old_guest_time - guest_time));
166 static int skew_guest_time(struct guest_info * info) {
167 struct vm_time * time_state = &(info->time_state);
168 uint64_t target_guest_time, guest_time;
169 /* Now the host may have gotten ahead of the guest because
170 * yielding is a coarse grained thing. Figure out what guest time
171 * we want to be at, and use the use the offsetting mechanism in
172 * the VMM to make the guest run forward. We limit *how* much we skew
173 * it forward to prevent the guest time making large jumps,
175 target_guest_time = compute_target_guest_time(info);
176 guest_time = v3_get_guest_time(time_state);
178 if (guest_time < target_guest_time) {
179 sint64_t max_skew, desired_skew, skew;
181 if (time_state->enter_time) {
182 /* Limit forward skew to 10% of the amount the guest has
183 * run since we last could skew time */
184 max_skew = (sint64_t)(guest_time - time_state->enter_time) / 10.0;
189 desired_skew = (sint64_t)(target_guest_time - guest_time);
190 skew = desired_skew > max_skew ? max_skew : desired_skew;
191 PrintDebug("Guest %lld cycles behind where it should be.\n",
193 PrintDebug("Limit on forward skew is %lld. Skewing forward %lld.\n",
196 v3_offset_time(info, skew);
201 #endif /* V3_CONFIG_TIME_DILATION */
203 // Control guest time in relation to host time so that the two stay
204 // appropriately synchronized to the extent possible.
205 int v3_adjust_time(struct guest_info * info) {
207 #ifdef V3_CONFIG_TIME_DILATION
208 /* First deal with yielding if we want to slow down the guest */
209 yield_host_time(info);
211 /* Now, if the guest is too slow, (either from excess yielding above,
212 * or because the VMM is doing something that takes a long time to emulate)
213 * allow guest time to jump forward a bit */
214 skew_guest_time(info);
219 /* Called immediately upon entry in the the VMM */
221 v3_time_exit_vm( struct guest_info * info )
223 struct vm_time * time_state = &(info->time_state);
225 time_state->exit_time = v3_get_host_time(time_state);
230 /* Called immediately prior to entry to the VM */
232 v3_time_enter_vm( struct guest_info * info )
234 struct vm_time * time_state = &(info->time_state);
237 host_time = v3_get_host_time(time_state);
238 time_state->enter_time = host_time;
239 #ifdef V3_CONFIG_TIME_DILATION
243 guest_time = v3_compute_guest_time(time_state, host_time);
244 // XXX we probably want to use an inline function to do these
245 // time differences to deal with sign and overflow carefully
246 offset = (sint64_t)guest_time - (sint64_t)host_time;
247 PrintDebug("v3_time_enter_vm: guest time offset %lld from host time.\n", offset);
248 time_state->guest_host_offset = offset;
251 time_state->guest_host_offset = 0;
259 struct v3_timer * v3_add_timer(struct guest_info * info,
260 struct v3_timer_ops * ops,
261 void * private_data) {
262 struct v3_timer * timer = NULL;
263 timer = (struct v3_timer *)V3_Malloc(sizeof(struct v3_timer));
264 V3_ASSERT(timer != NULL);
267 timer->private_data = private_data;
269 list_add(&(timer->timer_link), &(info->time_state.timers));
270 info->time_state.num_timers++;
275 int v3_remove_timer(struct guest_info * info, struct v3_timer * timer) {
276 list_del(&(timer->timer_link));
277 info->time_state.num_timers--;
283 void v3_update_timers(struct guest_info * info) {
284 struct vm_time *time_state = &info->time_state;
285 struct v3_timer * tmp_timer;
287 uint64_t old_time = info->time_state.last_update;
289 time_state->last_update = v3_get_guest_time(time_state);
290 cycles = (sint64_t)(time_state->last_update - old_time);
291 V3_ASSERT(cycles >= 0);
293 // V3_Print("Updating timers with %lld elapsed cycles.\n", cycles);
294 list_for_each_entry(tmp_timer, &(time_state->timers), timer_link) {
295 tmp_timer->ops->update_timer(info, cycles, time_state->guest_cpu_freq, tmp_timer->private_data);
299 /* Handle TSC timeout hooks */
300 struct v3_timeout_hook *
301 v3_add_timeout_hook(struct guest_info * info, v3_timeout_callback_t callback,
303 struct v3_timeout_hook * timeout = NULL;
304 timeout = (struct v3_timeout_hook *)V3_Malloc(sizeof(struct v3_timeout_hook));
305 V3_ASSERT(timeout != NULL);
307 timeout->callback = callback;
308 timeout->private_data = priv_data;
310 list_add(&(timeout->hook_link), &(info->time_state.timeout_hooks));
315 v3_remove_timeout_hook(struct guest_info * info, struct v3_timeout_hook * hook) {
316 list_del(&(hook->hook_link));
321 int v3_schedule_timeout(struct guest_info * info, ullong_t guest_timeout) {
322 struct vm_time *time_state = &info->time_state;
323 /* Note that virtualization architectures that support it (like newer
324 * VMX systems) will turn on an active preemption timeout if
325 * available to get this timeout as closely as possible. Other systems
326 * only catch it in the periodic interrupt and so are less precise */
327 if (guest_timeout < time_state->next_timeout) {
328 time_state->next_timeout = guest_timeout;
333 int v3_check_timeout( struct guest_info * info ) {
334 struct vm_time *time_state = &info->time_state;
335 if (time_state->next_timeout <= v3_get_guest_time(time_state)) {
336 struct v3_timeout_hook * tmp_timeout;
337 time_state->next_timeout = (ullong_t)-1;
338 list_for_each_entry(tmp_timeout, &(time_state->timeout_hooks), hook_link) {
339 tmp_timeout->callback(info, tmp_timeout->private_data);
346 * Handle full virtualization of the time stamp counter. As noted
347 * above, we don't store the actual value of the TSC, only the guest's
348 * offset from monotonic guest's time. If the guest writes to the TSC, we
349 * handle this by changing that offset.
351 * Possible TODO: Proper hooking of TSC read/writes?
354 int v3_rdtsc(struct guest_info * info) {
355 uint64_t tscval = v3_get_guest_tsc(&info->time_state);
357 info->vm_regs.rdx = tscval >> 32;
358 info->vm_regs.rax = tscval & 0xffffffffLL;
363 int v3_handle_rdtsc(struct guest_info * info) {
366 info->vm_regs.rax &= 0x00000000ffffffffLL;
367 info->vm_regs.rdx &= 0x00000000ffffffffLL;
374 int v3_rdtscp(struct guest_info * info) {
376 /* First get the MSR value that we need. It's safe to futz with
377 * ra/c/dx here since they're modified by this instruction anyway. */
378 info->vm_regs.rcx = TSC_AUX_MSR;
379 ret = v3_handle_msr_read(info);
385 info->vm_regs.rcx = info->vm_regs.rax;
387 /* Now do the TSC half of the instruction */
388 ret = v3_rdtsc(info);
398 int v3_handle_rdtscp(struct guest_info * info) {
399 PrintDebug("Handling virtual RDTSCP call.\n");
403 info->vm_regs.rax &= 0x00000000ffffffffLL;
404 info->vm_regs.rcx &= 0x00000000ffffffffLL;
405 info->vm_regs.rdx &= 0x00000000ffffffffLL;
412 static int tsc_aux_msr_read_hook(struct guest_info *info, uint_t msr_num,
413 struct v3_msr *msr_val, void *priv) {
414 struct vm_time * time_state = &(info->time_state);
416 V3_ASSERT(msr_num == TSC_AUX_MSR);
418 msr_val->lo = time_state->tsc_aux.lo;
419 msr_val->hi = time_state->tsc_aux.hi;
424 static int tsc_aux_msr_write_hook(struct guest_info *info, uint_t msr_num,
425 struct v3_msr msr_val, void *priv) {
426 struct vm_time * time_state = &(info->time_state);
428 V3_ASSERT(msr_num == TSC_AUX_MSR);
430 time_state->tsc_aux.lo = msr_val.lo;
431 time_state->tsc_aux.hi = msr_val.hi;
436 static int tsc_msr_read_hook(struct guest_info *info, uint_t msr_num,
437 struct v3_msr *msr_val, void *priv) {
438 uint64_t time = v3_get_guest_tsc(&info->time_state);
440 V3_ASSERT(msr_num == TSC_MSR);
442 msr_val->hi = time >> 32;
443 msr_val->lo = time & 0xffffffffLL;
448 static int tsc_msr_write_hook(struct guest_info *info, uint_t msr_num,
449 struct v3_msr msr_val, void *priv) {
450 struct vm_time * time_state = &(info->time_state);
451 uint64_t guest_time, new_tsc;
453 V3_ASSERT(msr_num == TSC_MSR);
455 new_tsc = (((uint64_t)msr_val.hi) << 32) | (uint64_t)msr_val.lo;
456 guest_time = v3_get_guest_time(time_state);
457 time_state->tsc_guest_offset = (sint64_t)new_tsc - (sint64_t)guest_time;
463 int v3_init_time_vm(struct v3_vm_info * vm) {
466 PrintDebug("Installing TSC MSR hook.\n");
467 ret = v3_hook_msr(vm, TSC_MSR,
468 tsc_msr_read_hook, tsc_msr_write_hook, NULL);
474 PrintDebug("Installing TSC_AUX MSR hook.\n");
475 ret = v3_hook_msr(vm, TSC_AUX_MSR, tsc_aux_msr_read_hook,
476 tsc_aux_msr_write_hook, NULL);
482 PrintDebug("Registering TIME_CPUFREQ hypercall.\n");
483 ret = v3_register_hypercall(vm, TIME_CPUFREQ_HCALL,
484 handle_cpufreq_hcall, NULL);
489 void v3_deinit_time_vm(struct v3_vm_info * vm) {
490 v3_unhook_msr(vm, TSC_MSR);
491 v3_unhook_msr(vm, TSC_AUX_MSR);
493 v3_remove_hypercall(vm, TIME_CPUFREQ_HCALL);
496 void v3_init_time_core(struct guest_info * info) {
497 struct vm_time * time_state = &(info->time_state);
498 v3_cfg_tree_t * cfg_tree = info->core_cfg_data;
501 time_state->host_cpu_freq = V3_CPU_KHZ();
502 khz = v3_cfg_val(cfg_tree, "khz");
505 time_state->guest_cpu_freq = atoi(khz);
506 PrintDebug("Logical Core %d (vcpu=%d) CPU frequency requested at %d khz.\n",
507 info->pcpu_id, info->vcpu_id, time_state->guest_cpu_freq);
510 if ( (khz == NULL) ||
511 (time_state->guest_cpu_freq <= 0) ||
512 (time_state->guest_cpu_freq > time_state->host_cpu_freq) ) {
514 time_state->guest_cpu_freq = time_state->host_cpu_freq;
517 PrintDebug("Logical Core %d (vcpu=%d) CPU frequency set to %d KHz (host CPU frequency = %d KHz).\n",
518 info->pcpu_id, info->vcpu_id,
519 time_state->guest_cpu_freq,
520 time_state->host_cpu_freq);
522 time_state->initial_time = 0;
523 time_state->last_update = 0;
524 time_state->guest_host_offset = 0;
525 time_state->tsc_guest_offset = 0;
527 INIT_LIST_HEAD(&(time_state->timeout_hooks));
528 time_state->next_timeout = (ullong_t)-1;
530 INIT_LIST_HEAD(&(time_state->timers));
531 time_state->num_timers = 0;
533 time_state->tsc_aux.lo = 0;
534 time_state->tsc_aux.hi = 0;
538 void v3_deinit_time_core(struct guest_info * core) {
539 struct vm_time * time_state = &(core->time_state);
540 struct v3_timer * tmr = NULL;
541 struct v3_timer * tmp = NULL;
543 list_for_each_entry_safe(tmr, tmp, &(time_state->timers), timer_link) {
544 v3_remove_timer(core, tmr);