2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
15 * Patrick G. Bridges <bridges@cs.unm.edu>
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_time.h>
23 #include <palacios/vm_guest.h>
25 #ifndef V3_CONFIG_DEBUG_TIME
27 #define PrintDebug(fmt, args...)
32 * Time handling in VMMs is challenging, and Palacios uses the highest
33 * resolution, lowest overhead timer on modern CPUs that it can - the
34 * processor timestamp counter (TSC). Note that on somewhat old processors
35 * this can be problematic; in particular, older AMD processors did not
36 * have a constant rate timestamp counter in the face of power management
37 * events. However, the latest Intel and AMD CPUs all do (should...) have a
38 * constant rate TSC, and Palacios relies on this fact.
40 * Basically, Palacios keeps track of three quantities as it runs to manage
41 * the passage of time:
42 * (1) The host timestamp counter - read directly from HW and never written
43 * (2) A monotonic guest timestamp counter used to measure the progression of
44 * time in the guest. This is stored as an absolute number of cycles elapsed
45 * and is updated on guest entry and exit; it can also be updated explicitly
46 * in the monitor at times
47 * (3) The actual guest timestamp counter (which can be written by
48 * writing to the guest TSC MSR - MSR 0x10) from the monotonic guest TSC.
49 * This is also computed as an offset from (2) above when the TSC and
50 * this offset is updated when the TSC MSR is written.
52 * Because all other devices are slaved off of the passage of time in the guest,
53 * it is (2) above that drives the firing of other timers in the guest,
54 * including timer devices such as the Programmable Interrupt Timer (PIT).
57 * (1) Add support for temporarily skewing guest time off of where it should
58 * be to support slack simulation of guests. The idea is that simulators
59 * set this skew to be the difference between how much time passed for a
60 * simulated feature and a real implementation of that feature, making time
61 * pass at a different rate from real time on this core. The VMM will then
62 * attempt to move this skew back towards 0 subject to resolution/accuracy
63 * constraints from various system timers.
65 * The main effort in doing this will be to get accuracy/resolution
66 * information from each local timer and to use this to bound how much skew
67 * is removed on each exit.
69 * (2) Look more into sychronizing the offsets *across* virtual and physical
70 * cores so that multicore guests stay mostly in sync.
72 * (3) Look into using the AMD TSC multiplier feature and adding explicit time
73 * dilation support to time handling.
77 static int handle_cpufreq_hcall(struct guest_info * info, uint_t hcall_id, void * priv_data) {
78 struct vm_core_time * time_state = &(info->time_state);
80 info->vm_regs.rbx = time_state->guest_cpu_freq;
82 PrintDebug("Guest request cpu frequency: return %ld\n", (long)info->vm_regs.rbx);
89 int v3_start_time(struct guest_info * info) {
90 /* We start running with guest_time == host_time */
91 uint64_t t = v3_get_host_time(&info->time_state);
93 info->time_state.enter_time = 0;
94 info->time_state.pause_time = t;
95 info->time_state.initial_time = t;
96 info->yield_start_cycle = t;
98 info->time_state.last_update = 0;
99 info->time_state.guest_cycles = 0;
100 PrintDebug("Starting time for core %d at host time %llu/guest time %llu.\n",
101 info->vcpu_id, t, info->time_state.guest_cycles);
106 int v3_offset_time( struct guest_info * info, sint64_t offset )
108 struct vm_core_time * time_state = &(info->time_state);
109 time_state->guest_cycles += offset;
113 int v3_skip_time(struct guest_info * info) {
114 if (info->vm_info->time_state.follow_host_time) {
115 PrintError("Cannot skip host time passage while slaved to host clock.\n");
118 info->time_state.pause_time = v3_get_host_time(&info->time_state);
123 int v3_advance_time(struct guest_info * info) {
124 uint64_t t = v3_get_host_time(&info->time_state);
125 if (info->vm_info->time_state.follow_host_time) {
126 /* How many guest cycles should have elapsed? */
127 sint64_t host_elapsed = t - info->time_state.initial_time;
128 sint64_t guest_target = (host_elapsed * info->time_state.guest_cpu_freq) / info->time_state.host_cpu_freq;
129 sint64_t cycle_lag = guest_target - info->time_state.guest_cycles;
130 v3_offset_time(info, cycle_lag);
132 v3_offset_time(info, (sint64_t)(t - info->time_state.pause_time));
134 info->time_state.pause_time = t;
139 /* Called immediately upon entry in the the VMM */
141 v3_time_exit_vm( struct guest_info * info, uint64_t * guest_cycles )
143 struct vm_core_time * time_state = &(info->time_state);
145 time_state->pause_time = v3_get_host_time(time_state);
147 time_state->guest_cycles += *guest_cycles;
149 sint64_t cycles_exec;
150 cycles_exec = (sint64_t)(time_state->pause_time - time_state->enter_time);
151 time_state->guest_cycles += cycles_exec;
156 /* Called immediately prior to entry to the VM */
158 v3_time_enter_vm( struct guest_info * info )
160 struct vm_core_time * time_state = &(info->time_state);
161 uint64_t host_time = v3_get_host_time(&info->time_state);
163 v3_advance_time(info);
164 time_state->enter_time = host_time;
170 struct v3_timer * v3_add_timer(struct guest_info * info,
171 struct v3_timer_ops * ops,
172 void * private_data) {
173 struct v3_timer * timer = NULL;
174 timer = (struct v3_timer *)V3_Malloc(sizeof(struct v3_timer));
175 V3_ASSERT(timer != NULL);
178 timer->private_data = private_data;
180 list_add(&(timer->timer_link), &(info->time_state.timers));
181 info->time_state.num_timers++;
186 int v3_remove_timer(struct guest_info * info, struct v3_timer * timer) {
187 list_del(&(timer->timer_link));
188 info->time_state.num_timers--;
194 void v3_update_timers(struct guest_info * info) {
195 struct vm_core_time *time_state = &info->time_state;
196 struct v3_timer * tmp_timer;
198 uint64_t old_time = info->time_state.last_update;
200 time_state->last_update = v3_get_guest_time(time_state);
201 cycles = (sint64_t)(time_state->last_update - old_time);
202 V3_ASSERT(cycles >= 0);
204 // V3_Print("Updating timers with %lld elapsed cycles.\n", cycles);
205 list_for_each_entry(tmp_timer, &(time_state->timers), timer_link) {
206 tmp_timer->ops->update_timer(info, cycles, time_state->guest_cpu_freq, tmp_timer->private_data);
212 * Handle full virtualization of the time stamp counter. As noted
213 * above, we don't store the actual value of the TSC, only the guest's
214 * offset from monotonic guest's time. If the guest writes to the TSC, we
215 * handle this by changing that offset.
217 * Possible TODO: Proper hooking of TSC read/writes?
220 int v3_rdtsc(struct guest_info * info) {
221 uint64_t tscval = v3_get_guest_tsc(&info->time_state);
223 info->vm_regs.rdx = tscval >> 32;
224 info->vm_regs.rax = tscval & 0xffffffffLL;
229 int v3_handle_rdtsc(struct guest_info * info) {
232 info->vm_regs.rax &= 0x00000000ffffffffLL;
233 info->vm_regs.rdx &= 0x00000000ffffffffLL;
240 int v3_rdtscp(struct guest_info * info) {
242 /* First get the MSR value that we need. It's safe to futz with
243 * ra/c/dx here since they're modified by this instruction anyway. */
244 info->vm_regs.rcx = TSC_AUX_MSR;
245 ret = v3_handle_msr_read(info);
251 info->vm_regs.rcx = info->vm_regs.rax;
253 /* Now do the TSC half of the instruction */
254 ret = v3_rdtsc(info);
264 int v3_handle_rdtscp(struct guest_info * info) {
265 PrintDebug("Handling virtual RDTSCP call.\n");
269 info->vm_regs.rax &= 0x00000000ffffffffLL;
270 info->vm_regs.rcx &= 0x00000000ffffffffLL;
271 info->vm_regs.rdx &= 0x00000000ffffffffLL;
278 static int tsc_aux_msr_read_hook(struct guest_info *info, uint_t msr_num,
279 struct v3_msr *msr_val, void *priv) {
280 struct vm_core_time * time_state = &(info->time_state);
282 V3_ASSERT(msr_num == TSC_AUX_MSR);
284 msr_val->lo = time_state->tsc_aux.lo;
285 msr_val->hi = time_state->tsc_aux.hi;
290 static int tsc_aux_msr_write_hook(struct guest_info *info, uint_t msr_num,
291 struct v3_msr msr_val, void *priv) {
292 struct vm_core_time * time_state = &(info->time_state);
294 V3_ASSERT(msr_num == TSC_AUX_MSR);
296 time_state->tsc_aux.lo = msr_val.lo;
297 time_state->tsc_aux.hi = msr_val.hi;
302 static int tsc_msr_read_hook(struct guest_info *info, uint_t msr_num,
303 struct v3_msr *msr_val, void *priv) {
304 uint64_t time = v3_get_guest_tsc(&info->time_state);
306 V3_ASSERT(msr_num == TSC_MSR);
308 msr_val->hi = time >> 32;
309 msr_val->lo = time & 0xffffffffLL;
314 static int tsc_msr_write_hook(struct guest_info *info, uint_t msr_num,
315 struct v3_msr msr_val, void *priv) {
316 struct vm_core_time * time_state = &(info->time_state);
317 uint64_t guest_time, new_tsc;
319 V3_ASSERT(msr_num == TSC_MSR);
321 new_tsc = (((uint64_t)msr_val.hi) << 32) | (uint64_t)msr_val.lo;
322 guest_time = v3_get_guest_time(time_state);
323 time_state->tsc_guest_offset = (sint64_t)(new_tsc - guest_time);
329 int v3_init_time_vm(struct v3_vm_info * vm) {
332 PrintDebug("Installing TSC MSR hook.\n");
333 ret = v3_hook_msr(vm, TSC_MSR,
334 tsc_msr_read_hook, tsc_msr_write_hook, NULL);
340 PrintDebug("Installing TSC_AUX MSR hook.\n");
341 ret = v3_hook_msr(vm, TSC_AUX_MSR, tsc_aux_msr_read_hook,
342 tsc_aux_msr_write_hook, NULL);
348 PrintDebug("Registering TIME_CPUFREQ hypercall.\n");
349 ret = v3_register_hypercall(vm, TIME_CPUFREQ_HCALL,
350 handle_cpufreq_hcall, NULL);
352 vm->time_state.td_mult = 1;
353 PrintDebug("Setting base time dilation factor to %d.\n", vm->time_state.td_mult);
355 vm->time_state.follow_host_time = 1;
356 PrintDebug("Locking guest time to host time.\n");
360 void v3_deinit_time_vm(struct v3_vm_info * vm) {
361 v3_unhook_msr(vm, TSC_MSR);
362 v3_unhook_msr(vm, TSC_AUX_MSR);
364 v3_remove_hypercall(vm, TIME_CPUFREQ_HCALL);
367 void v3_init_time_core(struct guest_info * info) {
368 struct vm_core_time * time_state = &(info->time_state);
369 v3_cfg_tree_t * cfg_tree = info->core_cfg_data;
372 time_state->host_cpu_freq = V3_CPU_KHZ();
373 khz = v3_cfg_val(cfg_tree, "khz");
376 time_state->guest_cpu_freq = atoi(khz);
377 PrintDebug("Logical Core %d (vcpu=%d) CPU frequency requested at %d khz.\n",
378 info->pcpu_id, info->vcpu_id, time_state->guest_cpu_freq);
381 if ( (khz == NULL) ||
382 (time_state->guest_cpu_freq <= 0) ||
383 (time_state->guest_cpu_freq > time_state->host_cpu_freq) ) {
385 time_state->guest_cpu_freq = time_state->host_cpu_freq;
388 PrintDebug("Logical Core %d (vcpu=%d) CPU frequency set to %d KHz (host CPU frequency = %d KHz).\n",
389 info->pcpu_id, info->vcpu_id,
390 time_state->guest_cpu_freq,
391 time_state->host_cpu_freq);
393 time_state->initial_time = 0;
394 time_state->last_update = 0;
395 time_state->tsc_guest_offset = 0;
396 time_state->enter_time = 0;
397 time_state->pause_time = 0;
398 time_state->guest_cycles = 0;
400 INIT_LIST_HEAD(&(time_state->timers));
401 time_state->num_timers = 0;
403 time_state->tsc_aux.lo = 0;
404 time_state->tsc_aux.hi = 0;
408 void v3_deinit_time_core(struct guest_info * core) {
409 struct vm_core_time * time_state = &(core->time_state);
410 struct v3_timer * tmr = NULL;
411 struct v3_timer * tmp = NULL;
413 list_for_each_entry_safe(tmr, tmp, &(time_state->timers), timer_link) {
414 v3_remove_timer(core, tmr);