static int handle_cpufreq_hcall(struct guest_info * info, uint_t hcall_id, void * priv_data) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
info->vm_regs.rbx = time_state->guest_cpu_freq;
return 0;
}
+int v3_pause_time( struct guest_info * info )
+{
+ struct vm_core_time * time_state = &(info->time_state);
+ if (time_state->pause_time != 0) {
+ PrintError("Attempted to pause time when time already paused.\n");
+ return -1;
+ }
+ time_state->pause_time = v3_get_host_time( time_state );
+
+ return 0;
+}
+
+int v3_resume_time( struct guest_info * info )
+{
+ struct vm_core_time * time_state = &(info->time_state);
+ uint64_t host_time, guest_time;
+ sint64_t offset;
+
+ if (time_state->pause_time == 0) {
+ PrintError("Attempted to resume time when time not paused.\n");
+ return -1;
+ }
+
+ host_time = v3_get_host_time(time_state);
+ guest_time = v3_compute_guest_time(time_state, host_time);
+ offset = (sint64_t)guest_time - (sint64_t)host_time;
+ time_state->guest_host_offset = offset;
+ time_state->pause_time = 0;
+
+ return 0;
+}
+
int v3_offset_time( struct guest_info * info, sint64_t offset )
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
PrintDebug("Adding additional offset of %lld to guest time.\n", offset);
time_state->guest_host_offset += offset;
return 0;
#ifdef V3_CONFIG_TIME_DILATION
static uint64_t compute_target_host_time(struct guest_info * info, uint64_t guest_time)
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t guest_elapsed, desired_elapsed;
guest_elapsed = (guest_time - time_state->initial_time);
static uint64_t compute_target_guest_time(struct guest_info *info)
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t host_elapsed, desired_elapsed;
host_elapsed = v3_get_host_time(time_state) - time_state->initial_time;
/* Yield time in the host to deal with a guest that wants to run slower than
* the native host cycle frequency */
static int yield_host_time(struct guest_info * info) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t host_time, target_host_time;
uint64_t guest_time, old_guest_time;
}
static int skew_guest_time(struct guest_info * info) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t target_guest_time, guest_time;
/* Now the host may have gotten ahead of the guest because
* yielding is a coarse grained thing. Figure out what guest time
if (time_state->enter_time) {
/* Limit forward skew to 10% of the amount the guest has
* run since we last could skew time */
- max_skew = (sint64_t)(guest_time - time_state->enter_time) / 10.0;
+ max_skew = (sint64_t)(guest_time - time_state->enter_time) / 10;
} else {
max_skew = 0;
}
/* Called immediately upon entry in the the VMM */
int
-v3_time_exit_vm( struct guest_info * info )
+v3_time_exit_vm( struct guest_info * info, uint64_t * guest_cycles )
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
time_state->exit_time = v3_get_host_time(time_state);
+#ifdef V3_CONFIG_TIME_DILATION
+ v3_pause_time( info );
+#endif
+
return 0;
}
int
v3_time_enter_vm( struct guest_info * info )
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t host_time;
host_time = v3_get_host_time(time_state);
time_state->enter_time = host_time;
#ifdef V3_CONFIG_TIME_DILATION
- {
- uint64_t guest_time;
- sint64_t offset;
- guest_time = v3_compute_guest_time(time_state, host_time);
- // XXX we probably want to use an inline function to do these
- // time differences to deal with sign and overflow carefully
- offset = (sint64_t)guest_time - (sint64_t)host_time;
- PrintDebug("v3_time_enter_vm: guest time offset %lld from host time.\n", offset);
- time_state->guest_host_offset = offset;
- }
+ v3_resume_time( info );
#else
time_state->guest_host_offset = 0;
#endif
}
void v3_update_timers(struct guest_info * info) {
- struct vm_time *time_state = &info->time_state;
+ struct vm_core_time *time_state = &info->time_state;
struct v3_timer * tmp_timer;
sint64_t cycles;
uint64_t old_time = info->time_state.last_update;
}
}
-/* Handle TSC timeout hooks */
-struct v3_timeout_hook *
-v3_add_timeout_hook(struct guest_info * info, v3_timeout_callback_t callback,
- void * priv_data) {
- struct v3_timeout_hook * timeout = NULL;
- timeout = (struct v3_timeout_hook *)V3_Malloc(sizeof(struct v3_timeout_hook));
- V3_ASSERT(timeout != NULL);
-
- timeout->callback = callback;
- timeout->private_data = priv_data;
-
- list_add(&(timeout->hook_link), &(info->time_state.timeout_hooks));
- return timeout;
-}
-
-int
-v3_remove_timeout_hook(struct guest_info * info, struct v3_timeout_hook * hook) {
- list_del(&(hook->hook_link));
- V3_Free(hook);
- return 0;
-}
-
-int v3_schedule_timeout(struct guest_info * info, ullong_t guest_timeout) {
- struct vm_time *time_state = &info->time_state;
- /* Note that virtualization architectures that support it (like newer
- * VMX systems) will turn on an active preemption timeout if
- * available to get this timeout as closely as possible. Other systems
- * only catch it in the periodic interrupt and so are less precise */
- if (guest_timeout < time_state->next_timeout) {
- time_state->next_timeout = guest_timeout;
- }
- return 0;
-}
-
-int v3_check_timeout( struct guest_info * info ) {
- struct vm_time *time_state = &info->time_state;
- if (time_state->next_timeout <= v3_get_guest_time(time_state)) {
- struct v3_timeout_hook * tmp_timeout;
- time_state->next_timeout = (ullong_t)-1;
- list_for_each_entry(tmp_timeout, &(time_state->timeout_hooks), hook_link) {
- tmp_timeout->callback(info, tmp_timeout->private_data);
- }
- }
- return 0;
-}
/*
* Handle full virtualization of the time stamp counter. As noted
static int tsc_aux_msr_read_hook(struct guest_info *info, uint_t msr_num,
struct v3_msr *msr_val, void *priv) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
V3_ASSERT(msr_num == TSC_AUX_MSR);
static int tsc_aux_msr_write_hook(struct guest_info *info, uint_t msr_num,
struct v3_msr msr_val, void *priv) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
V3_ASSERT(msr_num == TSC_AUX_MSR);
static int tsc_msr_write_hook(struct guest_info *info, uint_t msr_num,
struct v3_msr msr_val, void *priv) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t guest_time, new_tsc;
V3_ASSERT(msr_num == TSC_MSR);
ret = v3_register_hypercall(vm, TIME_CPUFREQ_HCALL,
handle_cpufreq_hcall, NULL);
+ PrintDebug("Setting base time dilation factor.\n");
+ vm->time_state.td_mult = 1;
+
return ret;
}
}
void v3_init_time_core(struct guest_info * info) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
v3_cfg_tree_t * cfg_tree = info->core_cfg_data;
char * khz = NULL;
time_state->last_update = 0;
time_state->guest_host_offset = 0;
time_state->tsc_guest_offset = 0;
+ time_state->enter_time = 0;
+ time_state->exit_time = 0;
+ time_state->pause_time = 0;
- INIT_LIST_HEAD(&(time_state->timeout_hooks));
- time_state->next_timeout = (ullong_t)-1;
INIT_LIST_HEAD(&(time_state->timers));
time_state->num_timers = 0;
void v3_deinit_time_core(struct guest_info * core) {
- struct vm_time * time_state = &(core->time_state);
+ struct vm_core_time * time_state = &(core->time_state);
struct v3_timer * tmr = NULL;
struct v3_timer * tmp = NULL;