* the passage of time:
* (1) The host timestamp counter - read directly from HW and never written
* (2) A monotonic guest timestamp counter used to measure the progression of
- * time in the guest. This is computed using an offsets from (1) above.
+ * time in the guest. This is stored as an absolute number of cycles elapsed
+ * and is updated on guest entry and exit; it can also be updated explicitly
+ * in the monitor at times
* (3) The actual guest timestamp counter (which can be written by
* writing to the guest TSC MSR - MSR 0x10) from the monotonic guest TSC.
* This is also computed as an offset from (2) above when the TSC and
* this offset is updated when the TSC MSR is written.
*
- * The value used to offset the guest TSC from the host TSC is the *sum* of all
- * of these offsets (2 and 3) above
- *
* Because all other devices are slaved off of the passage of time in the guest,
* it is (2) above that drives the firing of other timers in the guest,
* including timer devices such as the Programmable Interrupt Timer (PIT).
* (1) Add support for temporarily skewing guest time off of where it should
* be to support slack simulation of guests. The idea is that simulators
* set this skew to be the difference between how much time passed for a
- * simulated feature and a real implementation of that feature, making
+ * simulated feature and a real implementation of that feature, making time
* pass at a different rate from real time on this core. The VMM will then
* attempt to move this skew back towards 0 subject to resolution/accuracy
* constraints from various system timers.
* The main effort in doing this will be to get accuracy/resolution
* information from each local timer and to use this to bound how much skew
* is removed on each exit.
+ *
+ * (2) Look more into sychronizing the offsets *across* virtual and physical
+ * cores so that multicore guests stay mostly in sync.
+ *
+ * (3) Look into using the AMD TSC multiplier feature and adding explicit time
+ * dilation support to time handling.
*/
static int handle_cpufreq_hcall(struct guest_info * info, uint_t hcall_id, void * priv_data) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
info->vm_regs.rbx = time_state->guest_cpu_freq;
/* We start running with guest_time == host_time */
uint64_t t = v3_get_host_time(&info->time_state);
- PrintDebug("Starting initial guest time as %llu\n", t);
-
info->time_state.enter_time = 0;
info->time_state.exit_time = t;
- info->time_state.last_update = t;
info->time_state.initial_time = t;
info->yield_start_cycle = t;
+ info->time_state.last_update = 0;
+ info->time_state.guest_cycles = 0;
+ PrintDebug("Starting time for core %d at host time %llu/guest time %llu.\n",
+ info->vcpu_id, t, info->time_state.guest_cycles);
+ v3_yield(info);
return 0;
}
int v3_offset_time( struct guest_info * info, sint64_t offset )
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
PrintDebug("Adding additional offset of %lld to guest time.\n", offset);
- time_state->guest_host_offset += offset;
+ time_state->guest_cycles += offset;
return 0;
}
#ifdef V3_CONFIG_TIME_DILATION
static uint64_t compute_target_host_time(struct guest_info * info, uint64_t guest_time)
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t guest_elapsed, desired_elapsed;
guest_elapsed = (guest_time - time_state->initial_time);
static uint64_t compute_target_guest_time(struct guest_info *info)
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t host_elapsed, desired_elapsed;
host_elapsed = v3_get_host_time(time_state) - time_state->initial_time;
/* Yield time in the host to deal with a guest that wants to run slower than
* the native host cycle frequency */
static int yield_host_time(struct guest_info * info) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t host_time, target_host_time;
uint64_t guest_time, old_guest_time;
* time to account for the time paused even if the geust isn't
* usually paused in the VMM. */
host_time = v3_get_host_time(time_state);
- old_guest_time = v3_compute_guest_time(time_state, host_time);
+ old_guest_time = v3_get_guest_time(time_state);
target_host_time = compute_target_host_time(info, old_guest_time);
while (target_host_time > host_time) {
host_time = v3_get_host_time(time_state);
}
- guest_time = v3_compute_guest_time(time_state, host_time);
+ guest_time = v3_get_guest_time(time_state);
/* We do *not* assume the guest timer was paused in the VM. If it was
* this offseting is 0. If it wasn't, we need this. */
}
static int skew_guest_time(struct guest_info * info) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t target_guest_time, guest_time;
/* Now the host may have gotten ahead of the guest because
* yielding is a coarse grained thing. Figure out what guest time
if (time_state->enter_time) {
/* Limit forward skew to 10% of the amount the guest has
* run since we last could skew time */
- max_skew = (sint64_t)(guest_time - time_state->enter_time) / 10.0;
+ max_skew = (sint64_t)(guest_time - time_state->enter_time) / 10;
} else {
max_skew = 0;
}
/* Called immediately upon entry in the the VMM */
int
-v3_time_exit_vm( struct guest_info * info )
+v3_time_exit_vm( struct guest_info * info, uint64_t * guest_cycles )
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
time_state->exit_time = v3_get_host_time(time_state);
-
+ if (guest_cycles) {
+ time_state->guest_cycles += *guest_cycles;
+ } else {
+ uint64_t cycles_exec;
+ cycles_exec = time_state->exit_time - time_state->enter_time;
+ time_state->guest_cycles += cycles_exec;
+ }
return 0;
}
int
v3_time_enter_vm( struct guest_info * info )
{
- struct vm_time * time_state = &(info->time_state);
- uint64_t host_time;
+ struct vm_core_time * time_state = &(info->time_state);
+ uint64_t host_time, vmm_cycles;
host_time = v3_get_host_time(time_state);
time_state->enter_time = host_time;
-#ifdef V3_CONFIG_TIME_DILATION
- {
- uint64_t guest_time;
- sint64_t offset;
- guest_time = v3_compute_guest_time(time_state, host_time);
- // XXX we probably want to use an inline function to do these
- // time differences to deal with sign and overflow carefully
- offset = (sint64_t)guest_time - (sint64_t)host_time;
- PrintDebug("v3_time_enter_vm: guest time offset %lld from host time.\n", offset);
- time_state->guest_host_offset = offset;
- }
-#else
- time_state->guest_host_offset = 0;
-#endif
-
+ vmm_cycles = host_time - time_state->exit_time;
+ /* XXX How do we want to take into account host/guest CPU speed differences
+ * and time dilation here? Probably time just won't advance in the VMM in that
+ * case so its irrelvant XXX */
+ time_state->guest_cycles += vmm_cycles;
return 0;
}
}
void v3_update_timers(struct guest_info * info) {
- struct vm_time *time_state = &info->time_state;
+ struct vm_core_time *time_state = &info->time_state;
struct v3_timer * tmp_timer;
sint64_t cycles;
uint64_t old_time = info->time_state.last_update;
}
}
+
/*
* Handle full virtualization of the time stamp counter. As noted
* above, we don't store the actual value of the TSC, only the guest's
static int tsc_aux_msr_read_hook(struct guest_info *info, uint_t msr_num,
struct v3_msr *msr_val, void *priv) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
V3_ASSERT(msr_num == TSC_AUX_MSR);
static int tsc_aux_msr_write_hook(struct guest_info *info, uint_t msr_num,
struct v3_msr msr_val, void *priv) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
V3_ASSERT(msr_num == TSC_AUX_MSR);
static int tsc_msr_write_hook(struct guest_info *info, uint_t msr_num,
struct v3_msr msr_val, void *priv) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t guest_time, new_tsc;
V3_ASSERT(msr_num == TSC_MSR);
ret = v3_register_hypercall(vm, TIME_CPUFREQ_HCALL,
handle_cpufreq_hcall, NULL);
+ vm->time_state.td_mult = 1;
+ PrintDebug("Setting base time dilation factor to %d.\n", vm->time_state.td_mult);
+
return ret;
}
}
void v3_init_time_core(struct guest_info * info) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
v3_cfg_tree_t * cfg_tree = info->core_cfg_data;
char * khz = NULL;
time_state->initial_time = 0;
time_state->last_update = 0;
- time_state->guest_host_offset = 0;
time_state->tsc_guest_offset = 0;
+ time_state->enter_time = 0;
+ time_state->exit_time = 0;
+ time_state->guest_cycles = 0;
INIT_LIST_HEAD(&(time_state->timers));
time_state->num_timers = 0;
void v3_deinit_time_core(struct guest_info * core) {
- struct vm_time * time_state = &(core->time_state);
+ struct vm_core_time * time_state = &(core->time_state);
struct v3_timer * tmr = NULL;
struct v3_timer * tmp = NULL;