* the passage of time:
* (1) The host timestamp counter - read directly from HW and never written
* (2) A monotonic guest timestamp counter used to measure the progression of
- * time in the guest. This is computed using an offsets from (1) above.
+ * time in the guest. This is stored as an absolute number of cycles elapsed
+ * and is updated on guest entry and exit; it can also be updated explicitly
+ * in the monitor at times
* (3) The actual guest timestamp counter (which can be written by
* writing to the guest TSC MSR - MSR 0x10) from the monotonic guest TSC.
* This is also computed as an offset from (2) above when the TSC and
* this offset is updated when the TSC MSR is written.
*
- * The value used to offset the guest TSC from the host TSC is the *sum* of all
- * of these offsets (2 and 3) above
- *
* Because all other devices are slaved off of the passage of time in the guest,
* it is (2) above that drives the firing of other timers in the guest,
* including timer devices such as the Programmable Interrupt Timer (PIT).
* (1) Add support for temporarily skewing guest time off of where it should
* be to support slack simulation of guests. The idea is that simulators
* set this skew to be the difference between how much time passed for a
- * simulated feature and a real implementation of that feature, making
+ * simulated feature and a real implementation of that feature, making time
* pass at a different rate from real time on this core. The VMM will then
* attempt to move this skew back towards 0 subject to resolution/accuracy
* constraints from various system timers.
* The main effort in doing this will be to get accuracy/resolution
* information from each local timer and to use this to bound how much skew
* is removed on each exit.
+ *
+ * (2) Look more into sychronizing the offsets *across* virtual and physical
+ * cores so that multicore guests stay mostly in sync.
+ *
+ * (3) Look into using the AMD TSC multiplier feature and adding explicit time
+ * dilation support to time handling.
*/
static int handle_cpufreq_hcall(struct guest_info * info, uint_t hcall_id, void * priv_data) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
info->vm_regs.rbx = time_state->guest_cpu_freq;
/* We start running with guest_time == host_time */
uint64_t t = v3_get_host_time(&info->time_state);
- PrintDebug("Starting initial guest time as %llu\n", t);
-
- info->time_state.enter_time = 0;
- info->time_state.exit_time = t;
- info->time_state.last_update = t;
- info->time_state.initial_time = t;
+ info->time_state.vm_enter_host_time = 0;
+ info->time_state.vm_pause_host_time = t;
+ info->time_state.initial_host_time = t;
info->yield_start_cycle = t;
+ info->time_state.last_update = 0;
+ info->time_state.guest_cycles = 0;
+ PrintDebug("Starting time for core %d at host time %llu/guest time %llu.\n",
+ info->vcpu_id, t, info->time_state.guest_cycles);
+ v3_yield(info);
return 0;
}
int v3_offset_time( struct guest_info * info, sint64_t offset )
{
- struct vm_time * time_state = &(info->time_state);
-// PrintDebug("Adding additional offset of %lld to guest time.\n", offset);
- time_state->guest_host_offset += offset;
+ struct vm_core_time * time_state = &(info->time_state);
+ if (info->vm_info->time_state.follow_host_time) {
+ PrintError("Cannot offset guest time passage while slaved to host clock.\n");
+ return 1;
+ } else {
+ time_state->guest_cycles += offset;
+ }
return 0;
}
-static uint64_t compute_target_host_time(struct guest_info * info)
-{
- struct vm_time * time_state = &(info->time_state);
- uint64_t guest_elapsed, desired_elapsed;
-
- guest_elapsed = (v3_get_guest_time(time_state) - time_state->initial_time);
- desired_elapsed = (guest_elapsed * time_state->host_cpu_freq) / time_state->guest_cpu_freq;
- return time_state->initial_time + desired_elapsed;
-}
-
-static uint64_t compute_target_guest_time(struct guest_info *info)
-{
- struct vm_time * time_state = &(info->time_state);
- uint64_t host_elapsed, desired_elapsed;
-
- host_elapsed = v3_get_host_time(time_state) - time_state->initial_time;
- desired_elapsed = (host_elapsed * time_state->guest_cpu_freq) / time_state->host_cpu_freq;
-
- return time_state->initial_time + desired_elapsed;
-
-}
-
-/* Yield time in the host to deal with a guest that wants to run slower than
- * the native host cycle frequency */
-static int yield_host_time(struct guest_info * info) {
- struct vm_time * time_state = &(info->time_state);
- uint64_t host_time, target_host_time;
- uint64_t guest_time, old_guest_time;
-
- /* Compute the target host time given how much time has *already*
- * passed in the guest */
- target_host_time = compute_target_host_time(info);
-
- /* Now, let the host run while the guest is stopped to make the two
- * sync up. Note that this doesn't assume that guest time is stopped;
- * the offsetting in the next step will change add an offset to guest
- * time to account for the time paused even if the geust isn't
- * usually paused in the VMM. */
- host_time = v3_get_host_time(time_state);
- old_guest_time = v3_get_guest_time(time_state);
-
- while (target_host_time > host_time) {
- v3_yield(info);
- host_time = v3_get_host_time(time_state);
+int v3_skip_time(struct guest_info * info) {
+ if (info->vm_info->time_state.follow_host_time) {
+ PrintError("Cannot skip guest time passage while slaved to host clock.\n");
+ return 1;
+ } else {
+ info->time_state.vm_pause_host_time = v3_get_host_time(&info->time_state);
}
-
- guest_time = v3_get_guest_time(time_state);
-
- /* We do *not* assume the guest timer was paused in the VM. If it was
- * this offseting is 0. If it wasn't, we need this. */
- v3_offset_time(info, (sint64_t)old_guest_time - (sint64_t)guest_time);
-
return 0;
}
-static int skew_guest_time(struct guest_info * info) {
- struct vm_time * time_state = &(info->time_state);
- uint64_t target_guest_time, guest_time;
- /* Now the host may have gotten ahead of the guest because
- * yielding is a coarse grained thing. Figure out what guest time
- * we want to be at, and use the use the offsetting mechanism in
- * the VMM to make the guest run forward. We limit *how* much we skew
- * it forward to prevent the guest time making large jumps,
- * however. */
- target_guest_time = compute_target_guest_time(info);
- guest_time = v3_get_guest_time(time_state);
+static sint64_t host_to_guest_cycles(struct guest_info * info, sint64_t host_cycles) {
+ return (host_cycles * info->time_state.clock_ratio_num) / info->time_state.clock_ratio_denom;
+}
- if (guest_time < target_guest_time) {
- sint64_t max_skew, desired_skew, skew;
+int v3_time_advance_cycles(struct guest_info * info, uint64_t *host_cycles)
+{
+ uint64_t t = v3_get_host_time(&info->time_state);
- if (time_state->enter_time) {
- /* Limit forward skew to 10% of the amount the guest has
- * run since we last could skew time */
- max_skew = ((sint64_t)guest_time - (sint64_t)time_state->enter_time) / 10;
+ info->time_state.vm_pause_host_time = t;
+
+ if (info->vm_info->time_state.follow_host_time) {
+ /* How many guest cycles should have elapsed? */
+ sint64_t host_elapsed = t - info->time_state.initial_host_time;
+ sint64_t guest_elapsed = host_to_guest_cycles(info, host_elapsed);
+
+ info->time_state.guest_cycles = guest_elapsed;
+ } else {
+ uint64_t guest_cycles;
+ if (*host_cycles) {
+ guest_cycles = host_to_guest_cycles(info, *host_cycles);
} else {
- max_skew = 0;
+ guest_cycles = host_to_guest_cycles(info, (sint64_t)(t - info->time_state.vm_pause_host_time));
}
-
- desired_skew = (sint64_t)target_guest_time - (sint64_t)guest_time;
- skew = desired_skew > max_skew ? max_skew : desired_skew;
- PrintDebug("Guest %lld cycles behind where it should be.\n",
- desired_skew);
- PrintDebug("Limit on forward skew is %lld. Skewing forward %lld.\n",
- max_skew, skew);
-
- v3_offset_time(info, skew);
- }
+ info->time_state.guest_cycles += guest_cycles;
+ }
return 0;
}
-// Control guest time in relation to host time so that the two stay
-// appropriately synchronized to the extent possible.
-int v3_adjust_time(struct guest_info * info) {
-
- /* First deal with yielding if we want to slow down the guest */
- yield_host_time(info);
-
- /* Now, if the guest is too slow, (either from excess yielding above,
- * or because the VMM is doing something that takes a long time to emulate)
- * allow guest time to jump forward a bit */
- skew_guest_time(info);
-
- return 0;
+int v3_advance_time(struct guest_info * info) {
+ return v3_time_advance_cycles(info, NULL);
}
/* Called immediately upon entry in the the VMM */
int
-v3_time_exit_vm( struct guest_info * info )
+v3_time_exit_vm( struct guest_info * info, uint64_t * host_cycles )
{
- struct vm_time * time_state = &(info->time_state);
-
- time_state->exit_time = v3_get_host_time(time_state);
-
- return 0;
+ return v3_time_advance_cycles(info, host_cycles);
}
/* Called immediately prior to entry to the VM */
int
v3_time_enter_vm( struct guest_info * info )
{
- struct vm_time * time_state = &(info->time_state);
- uint64_t guest_time, host_time;
-
- host_time = v3_get_host_time(time_state);
- guest_time = v3_get_guest_time(time_state);
- time_state->enter_time = host_time;
- time_state->guest_host_offset = (sint64_t)guest_time - (sint64_t)host_time;
+ struct vm_core_time * time_state = &(info->time_state);
+ uint64_t host_time = v3_get_host_time(&info->time_state);
+ time_state->vm_enter_host_time = host_time;
return 0;
}
}
void v3_update_timers(struct guest_info * info) {
- struct vm_time *time_state = &info->time_state;
+ struct vm_core_time *time_state = &info->time_state;
struct v3_timer * tmp_timer;
- uint64_t old_time = info->time_state.last_update;
sint64_t cycles;
+ uint64_t old_time = time_state->last_update;
time_state->last_update = v3_get_guest_time(time_state);
- cycles = time_state->last_update - old_time;
- V3_ASSERT(cycles >= 0);
+ cycles = (sint64_t)(time_state->last_update - old_time);
+ if (cycles < 0) {
+ PrintError("Cycles appears to have rolled over - old time %lld, current time %lld.\n",
+ old_time, time_state->last_update);
+ return;
+ }
- // V3_Print("Updating timers with %lld elapsed cycles.\n", cycles);
+ PrintDebug("Updating timers with %lld elapsed cycles.\n", cycles);
list_for_each_entry(tmp_timer, &(time_state->timers), timer_link) {
tmp_timer->ops->update_timer(info, cycles, time_state->guest_cpu_freq, tmp_timer->private_data);
}
}
+
/*
* Handle full virtualization of the time stamp counter. As noted
* above, we don't store the actual value of the TSC, only the guest's
static int tsc_aux_msr_read_hook(struct guest_info *info, uint_t msr_num,
struct v3_msr *msr_val, void *priv) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
V3_ASSERT(msr_num == TSC_AUX_MSR);
static int tsc_aux_msr_write_hook(struct guest_info *info, uint_t msr_num,
struct v3_msr msr_val, void *priv) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
V3_ASSERT(msr_num == TSC_AUX_MSR);
static int tsc_msr_write_hook(struct guest_info *info, uint_t msr_num,
struct v3_msr msr_val, void *priv) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t guest_time, new_tsc;
V3_ASSERT(msr_num == TSC_MSR);
new_tsc = (((uint64_t)msr_val.hi) << 32) | (uint64_t)msr_val.lo;
guest_time = v3_get_guest_time(time_state);
- time_state->tsc_guest_offset = (sint64_t)new_tsc - (sint64_t)guest_time;
+ time_state->tsc_guest_offset = (sint64_t)(new_tsc - guest_time);
return 0;
}
ret = v3_register_hypercall(vm, TIME_CPUFREQ_HCALL,
handle_cpufreq_hcall, NULL);
+ vm->time_state.td_num = 1;
+ vm->time_state.td_denom = 1;
+ PrintDebug("Setting base time dilation factor to %d/%d.\n",
+ vm->time_state.td_num, vm->time_state.td_denom);
+
+ vm->time_state.follow_host_time = 1;
+ PrintDebug("Locking guest time to host time.\n");
return ret;
}
}
void v3_init_time_core(struct guest_info * info) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
v3_cfg_tree_t * cfg_tree = info->core_cfg_data;
char * khz = NULL;
time_state->guest_cpu_freq = time_state->host_cpu_freq;
}
+ /* Compute these using the GCD() of the guest and host CPU freq.
+ * If the GCD is too small, make it "big enough" */
+ time_state->clock_ratio_num = 1;
+ time_state->clock_ratio_denom = 1;
+
PrintDebug("Logical Core %d (vcpu=%d) CPU frequency set to %d KHz (host CPU frequency = %d KHz).\n",
info->pcpu_id, info->vcpu_id,
time_state->guest_cpu_freq,
time_state->host_cpu_freq);
- time_state->initial_time = 0;
- time_state->last_update = 0;
- time_state->guest_host_offset = 0;
+ time_state->guest_cycles = 0;
time_state->tsc_guest_offset = 0;
+ time_state->last_update = 0;
+
+ time_state->initial_host_time = 0;
+ time_state->vm_enter_host_time = 0;
+ time_state->vm_pause_host_time = 0;
INIT_LIST_HEAD(&(time_state->timers));
time_state->num_timers = 0;
void v3_deinit_time_core(struct guest_info * core) {
- struct vm_time * time_state = &(core->time_state);
+ struct vm_core_time * time_state = &(core->time_state);
struct v3_timer * tmr = NULL;
struct v3_timer * tmp = NULL;