X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_time.c;h=51ac5a76e3a538c91506a3af39ad0cf6c76c3ab5;hb=5162b3f7c10ddbd4fd54e0c2dd13f147af09c314;hp=203981d9a7f5d9423ef7b63acaa2c75b4e0d47e9;hpb=ff3728843f8471d39cf2ea3511267feb83f14678;p=palacios.git diff --git a/palacios/src/palacios/vmm_time.c b/palacios/src/palacios/vmm_time.c index 203981d..51ac5a7 100644 --- a/palacios/src/palacios/vmm_time.c +++ b/palacios/src/palacios/vmm_time.c @@ -41,15 +41,14 @@ * the passage of time: * (1) The host timestamp counter - read directly from HW and never written * (2) A monotonic guest timestamp counter used to measure the progression of - * time in the guest. This is computed using an offsets from (1) above. + * time in the guest. This is stored as an absolute number of cycles elapsed + * and is updated on guest entry and exit; it can also be updated explicitly + * in the monitor at times * (3) The actual guest timestamp counter (which can be written by * writing to the guest TSC MSR - MSR 0x10) from the monotonic guest TSC. * This is also computed as an offset from (2) above when the TSC and * this offset is updated when the TSC MSR is written. * - * The value used to offset the guest TSC from the host TSC is the *sum* of all - * of these offsets (2 and 3) above - * * Because all other devices are slaved off of the passage of time in the guest, * it is (2) above that drives the firing of other timers in the guest, * including timer devices such as the Programmable Interrupt Timer (PIT). @@ -91,176 +90,79 @@ int v3_start_time(struct guest_info * info) { /* We start running with guest_time == host_time */ uint64_t t = v3_get_host_time(&info->time_state); - PrintDebug("Starting initial guest time as %llu\n", t); - - info->time_state.enter_time = 0; - info->time_state.exit_time = t; - info->time_state.last_update = t; - info->time_state.initial_time = t; + info->time_state.vm_enter_host_time = 0; + info->time_state.vm_pause_host_time = t; + info->time_state.initial_host_time = t; info->yield_start_cycle = t; + info->time_state.last_update = 0; + info->time_state.guest_cycles = 0; + PrintDebug("Starting time for core %d at host time %llu/guest time %llu.\n", + info->vcpu_id, t, info->time_state.guest_cycles); + v3_yield(info); return 0; } -int v3_pause_time( struct guest_info * info ) +int v3_offset_time( struct guest_info * info, sint64_t offset ) { struct vm_core_time * time_state = &(info->time_state); - if (time_state->pause_time != 0) { - PrintError("Attempted to pause time when time already paused.\n"); - return -1; + if (info->vm_info->time_state.follow_host_time) { + PrintError("Cannot offset guest time passage while slaved to host clock.\n"); + return 1; + } else { + time_state->guest_cycles += offset; } - time_state->pause_time = v3_get_host_time( time_state ); - return 0; } -int v3_resume_time( struct guest_info * info ) -{ - struct vm_core_time * time_state = &(info->time_state); - uint64_t host_time, guest_time; - sint64_t offset; - - if (time_state->pause_time == 0) { - PrintError("Attempted to resume time when time not paused.\n"); - return -1; +int v3_skip_time(struct guest_info * info) { + if (info->vm_info->time_state.follow_host_time) { + PrintError("Cannot skip guest time passage while slaved to host clock.\n"); + return 1; + } else { + info->time_state.vm_pause_host_time = v3_get_host_time(&info->time_state); } - - host_time = v3_get_host_time(time_state); - guest_time = v3_compute_guest_time(time_state, host_time); - offset = (sint64_t)guest_time - (sint64_t)host_time; - time_state->guest_host_offset = offset; - time_state->pause_time = 0; - return 0; } -int v3_offset_time( struct guest_info * info, sint64_t offset ) -{ - struct vm_core_time * time_state = &(info->time_state); - PrintDebug("Adding additional offset of %lld to guest time.\n", offset); - time_state->guest_host_offset += offset; - return 0; +static sint64_t host_to_guest_cycles(struct guest_info * info, sint64_t host_cycles) { + return (host_cycles * info->time_state.clock_ratio_num) / info->time_state.clock_ratio_denom; } -#ifdef V3_CONFIG_TIME_DILATION -static uint64_t compute_target_host_time(struct guest_info * info, uint64_t guest_time) +int v3_time_advance_cycles(struct guest_info * info, uint64_t *host_cycles) { - struct vm_core_time * time_state = &(info->time_state); - uint64_t guest_elapsed, desired_elapsed; - - guest_elapsed = (guest_time - time_state->initial_time); - desired_elapsed = (guest_elapsed * time_state->host_cpu_freq) / time_state->guest_cpu_freq; - return time_state->initial_time + desired_elapsed; -} + uint64_t t = v3_get_host_time(&info->time_state); -static uint64_t compute_target_guest_time(struct guest_info *info) -{ - struct vm_core_time * time_state = &(info->time_state); - uint64_t host_elapsed, desired_elapsed; + info->time_state.vm_pause_host_time = t; - host_elapsed = v3_get_host_time(time_state) - time_state->initial_time; - desired_elapsed = (host_elapsed * time_state->guest_cpu_freq) / time_state->host_cpu_freq; + if (info->vm_info->time_state.follow_host_time) { + /* How many guest cycles should have elapsed? */ + sint64_t host_elapsed = t - info->time_state.initial_host_time; + sint64_t guest_elapsed = host_to_guest_cycles(info, host_elapsed); - return time_state->initial_time + desired_elapsed; - -} - -/* Yield time in the host to deal with a guest that wants to run slower than - * the native host cycle frequency */ -static int yield_host_time(struct guest_info * info) { - struct vm_core_time * time_state = &(info->time_state); - uint64_t host_time, target_host_time; - uint64_t guest_time, old_guest_time; - - /* Now, let the host run while the guest is stopped to make the two - * sync up. Note that this doesn't assume that guest time is stopped; - * the offsetting in the next step will change add an offset to guest - * time to account for the time paused even if the geust isn't - * usually paused in the VMM. */ - host_time = v3_get_host_time(time_state); - old_guest_time = v3_compute_guest_time(time_state, host_time); - target_host_time = compute_target_host_time(info, old_guest_time); - - while (target_host_time > host_time) { - v3_yield(info); - host_time = v3_get_host_time(time_state); - } - - guest_time = v3_compute_guest_time(time_state, host_time); - - /* We do *not* assume the guest timer was paused in the VM. If it was - * this offseting is 0. If it wasn't, we need this. */ - v3_offset_time(info, (sint64_t)(old_guest_time - guest_time)); - - return 0; -} - -static int skew_guest_time(struct guest_info * info) { - struct vm_core_time * time_state = &(info->time_state); - uint64_t target_guest_time, guest_time; - /* Now the host may have gotten ahead of the guest because - * yielding is a coarse grained thing. Figure out what guest time - * we want to be at, and use the use the offsetting mechanism in - * the VMM to make the guest run forward. We limit *how* much we skew - * it forward to prevent the guest time making large jumps, - * however. */ - target_guest_time = compute_target_guest_time(info); - guest_time = v3_get_guest_time(time_state); - - if (guest_time < target_guest_time) { - sint64_t max_skew, desired_skew, skew; - - if (time_state->enter_time) { - /* Limit forward skew to 10% of the amount the guest has - * run since we last could skew time */ - max_skew = (sint64_t)(guest_time - time_state->enter_time) / 10.0; + info->time_state.guest_cycles = guest_elapsed; + } else { + uint64_t guest_cycles; + if (*host_cycles) { + guest_cycles = host_to_guest_cycles(info, *host_cycles); } else { - max_skew = 0; + guest_cycles = host_to_guest_cycles(info, (sint64_t)(t - info->time_state.vm_pause_host_time)); } - - desired_skew = (sint64_t)(target_guest_time - guest_time); - skew = desired_skew > max_skew ? max_skew : desired_skew; - PrintDebug("Guest %lld cycles behind where it should be.\n", - desired_skew); - PrintDebug("Limit on forward skew is %lld. Skewing forward %lld.\n", - max_skew, skew); - - v3_offset_time(info, skew); - } + info->time_state.guest_cycles += guest_cycles; + } return 0; } -#endif /* V3_CONFIG_TIME_DILATION */ - -// Control guest time in relation to host time so that the two stay -// appropriately synchronized to the extent possible. -int v3_adjust_time(struct guest_info * info) { - -#ifdef V3_CONFIG_TIME_DILATION - /* First deal with yielding if we want to slow down the guest */ - yield_host_time(info); - /* Now, if the guest is too slow, (either from excess yielding above, - * or because the VMM is doing something that takes a long time to emulate) - * allow guest time to jump forward a bit */ - skew_guest_time(info); -#endif - return 0; +int v3_advance_time(struct guest_info * info) { + return v3_time_advance_cycles(info, NULL); } /* Called immediately upon entry in the the VMM */ int -v3_time_exit_vm( struct guest_info * info ) +v3_time_exit_vm( struct guest_info * info, uint64_t * host_cycles ) { - struct vm_core_time * time_state = &(info->time_state); - - time_state->exit_time = v3_get_host_time(time_state); - -#ifdef V3_CONFIG_TIME_DILATION - v3_pause_time( info ); -#endif - - return 0; + return v3_time_advance_cycles(info, host_cycles); } /* Called immediately prior to entry to the VM */ @@ -268,16 +170,9 @@ int v3_time_enter_vm( struct guest_info * info ) { struct vm_core_time * time_state = &(info->time_state); - uint64_t host_time; - - host_time = v3_get_host_time(time_state); - time_state->enter_time = host_time; -#ifdef V3_CONFIG_TIME_DILATION - v3_resume_time( info ); -#else - time_state->guest_host_offset = 0; -#endif + uint64_t host_time = v3_get_host_time(&info->time_state); + time_state->vm_enter_host_time = host_time; return 0; } @@ -311,63 +206,22 @@ void v3_update_timers(struct guest_info * info) { struct vm_core_time *time_state = &info->time_state; struct v3_timer * tmp_timer; sint64_t cycles; - uint64_t old_time = info->time_state.last_update; + uint64_t old_time = time_state->last_update; time_state->last_update = v3_get_guest_time(time_state); cycles = (sint64_t)(time_state->last_update - old_time); - V3_ASSERT(cycles >= 0); + if (cycles < 0) { + PrintError("Cycles appears to have rolled over - old time %lld, current time %lld.\n", + old_time, time_state->last_update); + return; + } - // V3_Print("Updating timers with %lld elapsed cycles.\n", cycles); + PrintDebug("Updating timers with %lld elapsed cycles.\n", cycles); list_for_each_entry(tmp_timer, &(time_state->timers), timer_link) { tmp_timer->ops->update_timer(info, cycles, time_state->guest_cpu_freq, tmp_timer->private_data); } } -/* Handle TSC timeout hooks */ -struct v3_timeout_hook * -v3_add_timeout_hook(struct guest_info * info, v3_timeout_callback_t callback, - void * priv_data) { - struct v3_timeout_hook * timeout = NULL; - timeout = (struct v3_timeout_hook *)V3_Malloc(sizeof(struct v3_timeout_hook)); - V3_ASSERT(timeout != NULL); - - timeout->callback = callback; - timeout->private_data = priv_data; - - list_add(&(timeout->hook_link), &(info->time_state.timeout_hooks)); - return timeout; -} - -int -v3_remove_timeout_hook(struct guest_info * info, struct v3_timeout_hook * hook) { - list_del(&(hook->hook_link)); - V3_Free(hook); - return 0; -} - -int v3_schedule_timeout(struct guest_info * info, ullong_t guest_timeout) { - struct vm_core_time *time_state = &info->time_state; - /* Note that virtualization architectures that support it (like newer - * VMX systems) will turn on an active preemption timeout if - * available to get this timeout as closely as possible. Other systems - * only catch it in the periodic interrupt and so are less precise */ - if (guest_timeout < time_state->next_timeout) { - time_state->next_timeout = guest_timeout; - } - return 0; -} - -int v3_check_timeout( struct guest_info * info ) { - struct vm_core_time *time_state = &info->time_state; - if (time_state->next_timeout <= v3_get_guest_time(time_state)) { - struct v3_timeout_hook * tmp_timeout; - time_state->next_timeout = (ullong_t)-1; - list_for_each_entry(tmp_timeout, &(time_state->timeout_hooks), hook_link) { - tmp_timeout->callback(info, tmp_timeout->private_data); - } - } - return 0; -} /* * Handle full virtualization of the time stamp counter. As noted @@ -481,7 +335,7 @@ static int tsc_msr_write_hook(struct guest_info *info, uint_t msr_num, new_tsc = (((uint64_t)msr_val.hi) << 32) | (uint64_t)msr_val.lo; guest_time = v3_get_guest_time(time_state); - time_state->tsc_guest_offset = (sint64_t)new_tsc - (sint64_t)guest_time; + time_state->tsc_guest_offset = (sint64_t)(new_tsc - guest_time); return 0; } @@ -510,9 +364,13 @@ int v3_init_time_vm(struct v3_vm_info * vm) { ret = v3_register_hypercall(vm, TIME_CPUFREQ_HCALL, handle_cpufreq_hcall, NULL); - PrintDebug("Setting base time dilation factor.\n"); - vm->time_state.td_mult = 1; + vm->time_state.td_num = 1; + vm->time_state.td_denom = 1; + PrintDebug("Setting base time dilation factor to %d/%d.\n", + vm->time_state.td_num, vm->time_state.td_denom); + vm->time_state.follow_host_time = 1; + PrintDebug("Locking guest time to host time.\n"); return ret; } @@ -544,21 +402,25 @@ void v3_init_time_core(struct guest_info * info) { time_state->guest_cpu_freq = time_state->host_cpu_freq; } + /* Compute these using the GCD() of the guest and host CPU freq. + * If the GCD is too small, make it "big enough" */ + time_state->clock_ratio_num = 1; + time_state->clock_ratio_denom = 1; + PrintDebug("Logical Core %d (vcpu=%d) CPU frequency set to %d KHz (host CPU frequency = %d KHz).\n", info->pcpu_id, info->vcpu_id, time_state->guest_cpu_freq, time_state->host_cpu_freq); - time_state->initial_time = 0; - time_state->last_update = 0; - time_state->guest_host_offset = 0; + time_state->guest_cycles = 0; time_state->tsc_guest_offset = 0; - time_state->enter_time = 0; - time_state->exit_time = 0; - time_state->pause_time = 0; + time_state->last_update = 0; + + time_state->initial_host_time = 0; + time_state->vm_enter_host_time = 0; + time_state->vm_pause_host_time = 0; - INIT_LIST_HEAD(&(time_state->timeout_hooks)); - time_state->next_timeout = (ullong_t)-1; + time_state->time_flags = 0; // XXX need to set trap TSC flag or not wisely INIT_LIST_HEAD(&(time_state->timers)); time_state->num_timers = 0;