* (1) Add support for temporarily skewing guest time off of where it should
* be to support slack simulation of guests. The idea is that simulators
* set this skew to be the difference between how much time passed for a
- * simulated feature and a real implementation of that feature, making
+ * simulated feature and a real implementation of that feature, making time
* pass at a different rate from real time on this core. The VMM will then
* attempt to move this skew back towards 0 subject to resolution/accuracy
* constraints from various system timers.
* The main effort in doing this will be to get accuracy/resolution
* information from each local timer and to use this to bound how much skew
* is removed on each exit.
+ *
+ * (2) Look more into sychronizing the offsets *across* virtual and physical
+ * cores so that multicore guests stay mostly in sync.
+ *
+ * (3) Look into using the AMD TSC multiplier feature and adding explicit time
+ * dilation support to time handling.
*/
int v3_offset_time( struct guest_info * info, sint64_t offset )
{
struct vm_time * time_state = &(info->time_state);
-// PrintDebug("Adding additional offset of %lld to guest time.\n", offset);
+ PrintDebug("Adding additional offset of %lld to guest time.\n", offset);
time_state->guest_host_offset += offset;
return 0;
}
-static uint64_t compute_target_host_time(struct guest_info * info)
+#ifdef V3_CONFIG_TIME_DILATION
+static uint64_t compute_target_host_time(struct guest_info * info, uint64_t guest_time)
{
struct vm_time * time_state = &(info->time_state);
uint64_t guest_elapsed, desired_elapsed;
- guest_elapsed = (v3_get_guest_time(time_state) - time_state->initial_time);
+ guest_elapsed = (guest_time - time_state->initial_time);
desired_elapsed = (guest_elapsed * time_state->host_cpu_freq) / time_state->guest_cpu_freq;
return time_state->initial_time + desired_elapsed;
}
uint64_t host_time, target_host_time;
uint64_t guest_time, old_guest_time;
- /* Compute the target host time given how much time has *already*
- * passed in the guest */
- target_host_time = compute_target_host_time(info);
-
/* Now, let the host run while the guest is stopped to make the two
* sync up. Note that this doesn't assume that guest time is stopped;
* the offsetting in the next step will change add an offset to guest
* time to account for the time paused even if the geust isn't
* usually paused in the VMM. */
host_time = v3_get_host_time(time_state);
- old_guest_time = v3_get_guest_time(time_state);
+ old_guest_time = v3_compute_guest_time(time_state, host_time);
+ target_host_time = compute_target_host_time(info, old_guest_time);
while (target_host_time > host_time) {
v3_yield(info);
host_time = v3_get_host_time(time_state);
}
- guest_time = v3_get_guest_time(time_state);
+ guest_time = v3_compute_guest_time(time_state, host_time);
/* We do *not* assume the guest timer was paused in the VM. If it was
* this offseting is 0. If it wasn't, we need this. */
- v3_offset_time(info, (sint64_t)old_guest_time - (sint64_t)guest_time);
+ v3_offset_time(info, (sint64_t)(old_guest_time - guest_time));
return 0;
}
if (time_state->enter_time) {
/* Limit forward skew to 10% of the amount the guest has
* run since we last could skew time */
- max_skew = ((sint64_t)guest_time - (sint64_t)time_state->enter_time) / 10;
+ max_skew = (sint64_t)(guest_time - time_state->enter_time) / 10.0;
} else {
max_skew = 0;
}
- desired_skew = (sint64_t)target_guest_time - (sint64_t)guest_time;
+ desired_skew = (sint64_t)(target_guest_time - guest_time);
skew = desired_skew > max_skew ? max_skew : desired_skew;
PrintDebug("Guest %lld cycles behind where it should be.\n",
desired_skew);
return 0;
}
+#endif /* V3_CONFIG_TIME_DILATION */
// Control guest time in relation to host time so that the two stay
// appropriately synchronized to the extent possible.
int v3_adjust_time(struct guest_info * info) {
+#ifdef V3_CONFIG_TIME_DILATION
/* First deal with yielding if we want to slow down the guest */
yield_host_time(info);
* or because the VMM is doing something that takes a long time to emulate)
* allow guest time to jump forward a bit */
skew_guest_time(info);
-
+#endif
return 0;
}
v3_time_enter_vm( struct guest_info * info )
{
struct vm_time * time_state = &(info->time_state);
- uint64_t guest_time, host_time;
+ uint64_t host_time;
host_time = v3_get_host_time(time_state);
- guest_time = v3_get_guest_time(time_state);
time_state->enter_time = host_time;
- time_state->guest_host_offset = (sint64_t)guest_time - (sint64_t)host_time;
+#ifdef V3_CONFIG_TIME_DILATION
+ {
+ uint64_t guest_time;
+ sint64_t offset;
+ guest_time = v3_compute_guest_time(time_state, host_time);
+ // XXX we probably want to use an inline function to do these
+ // time differences to deal with sign and overflow carefully
+ offset = (sint64_t)guest_time - (sint64_t)host_time;
+ PrintDebug("v3_time_enter_vm: guest time offset %lld from host time.\n", offset);
+ time_state->guest_host_offset = offset;
+ }
+#else
+ time_state->guest_host_offset = 0;
+#endif
return 0;
}
void v3_update_timers(struct guest_info * info) {
struct vm_time *time_state = &info->time_state;
struct v3_timer * tmp_timer;
- uint64_t old_time = info->time_state.last_update;
sint64_t cycles;
+ uint64_t old_time = info->time_state.last_update;
time_state->last_update = v3_get_guest_time(time_state);
- cycles = time_state->last_update - old_time;
+ cycles = (sint64_t)(time_state->last_update - old_time);
V3_ASSERT(cycles >= 0);
// V3_Print("Updating timers with %lld elapsed cycles.\n", cycles);
}
}
+/* Handle TSC timeout hooks */
+struct v3_timeout_hook *
+v3_add_timeout_hook(struct guest_info * info, v3_timeout_callback_t callback,
+ void * priv_data) {
+ struct v3_timeout_hook * timeout = NULL;
+ timeout = (struct v3_timeout_hook *)V3_Malloc(sizeof(struct v3_timeout_hook));
+ V3_ASSERT(timeout != NULL);
+
+ timeout->callback = callback;
+ timeout->private_data = priv_data;
+
+ list_add(&(timeout->hook_link), &(info->time_state.timeout_hooks));
+ return timeout;
+}
+
+int
+v3_remove_timeout_hook(struct guest_info * info, struct v3_timeout_hook * hook) {
+ list_del(&(hook->hook_link));
+ V3_Free(hook);
+ return 0;
+}
+
+int v3_schedule_timeout(struct guest_info * info, ullong_t guest_timeout) {
+ struct vm_time *time_state = &info->time_state;
+ /* Note that virtualization architectures that support it (like newer
+ * VMX systems) will turn on an active preemption timeout if
+ * available to get this timeout as closely as possible. Other systems
+ * only catch it in the periodic interrupt and so are less precise */
+ if (guest_timeout < time_state->next_timeout) {
+ time_state->next_timeout = guest_timeout;
+ }
+ return 0;
+}
+
+int v3_check_timeout( struct guest_info * info ) {
+ struct vm_time *time_state = &info->time_state;
+ if (time_state->next_timeout <= v3_get_guest_time(time_state)) {
+ struct v3_timeout_hook * tmp_timeout;
+ time_state->next_timeout = (ullong_t)-1;
+ list_for_each_entry(tmp_timeout, &(time_state->timeout_hooks), hook_link) {
+ tmp_timeout->callback(info, tmp_timeout->private_data);
+ }
+ }
+ return 0;
+}
+
/*
* Handle full virtualization of the time stamp counter. As noted
* above, we don't store the actual value of the TSC, only the guest's
time_state->guest_host_offset = 0;
time_state->tsc_guest_offset = 0;
+ INIT_LIST_HEAD(&(time_state->timeout_hooks));
+ time_state->next_timeout = (ullong_t)-1;
+
INIT_LIST_HEAD(&(time_state->timers));
time_state->num_timers = 0;