struct guest_info;
-struct vm_time {
+/* Per-VM time information */
+struct v3_time {
+ uint32_t td_mult;
+};
+
+/* Per-core time information */
+struct vm_core_time {
uint32_t host_cpu_freq; // in kHZ
uint32_t guest_cpu_freq; // can be lower than host CPU freq!
uint64_t initial_time; // Time when VMM started.
uint64_t enter_time; // Host time the guest was last entered
uint64_t exit_time; // Host time the the VM was exited to
+ uint64_t pause_time; // Time at which the VM core was paused
struct v3_msr tsc_aux; // Auxilliary MSR for RDTSCP
// Installed Timers slaved off of the guest monotonic TSC
int v3_time_enter_vm(struct guest_info * core);
int v3_time_exit_vm(struct guest_info * core);
-int v3_adjust_time(struct guest_info * core);
+int v3_pause_time(struct guest_info * core);
+int v3_resume_time(struct guest_info * core);
int v3_offset_time(struct guest_info * core, sint64_t offset);
+int v3_adjust_time(struct guest_info * core);
+
// Basic functions for attaching timers to the passage of time - these timers
// should eventually specify their accuracy and resolution.
struct v3_timer * v3_add_timer(struct guest_info * info, struct v3_timer_ops * ops, void * private_data);
int v3_check_timeout(struct guest_info * info);
// Functions to return the different notions of time in Palacios.
-static inline uint64_t v3_get_host_time(struct vm_time *t) {
+static inline uint64_t v3_get_host_time(struct vm_core_time *t) {
uint64_t tmp;
rdtscll(tmp);
return tmp;
}
// Returns *monotonic* guest time.
-static inline uint64_t v3_compute_guest_time(struct vm_time *t, uint64_t ht) {
-#ifdef V3_CONFIG_TIME_HIDE_VM_COST
- V3_ASSERT(t->exit_time);
- return t->exit_time + t->guest_host_offset;
-#else
- return ht + t->guest_host_offset;
-#endif
+static inline uint64_t v3_compute_guest_time(struct vm_core_time *t, uint64_t ht) {
+ if (t->pause_time)
+ return t->pause_time + t->guest_host_offset;
+ else
+ return ht + t->guest_host_offset;
}
-static inline uint64_t v3_get_guest_time(struct vm_time *t) {
+static inline uint64_t v3_get_guest_time(struct vm_core_time *t) {
return v3_compute_guest_time(t, v3_get_host_time(t));
}
// Returns the TSC value seen by the guest
-static inline uint64_t v3_compute_guest_tsc(struct vm_time *t, uint64_t ht) {
+static inline uint64_t v3_compute_guest_tsc(struct vm_core_time *t, uint64_t ht) {
return v3_compute_guest_time(t, ht) + t->tsc_guest_offset;
}
-static inline uint64_t v3_get_guest_tsc(struct vm_time *t) {
+static inline uint64_t v3_get_guest_tsc(struct vm_core_time *t) {
return v3_compute_guest_tsc(t, v3_get_host_time(t));
}
// Returns offset of guest TSC from host TSC
-static inline sint64_t v3_tsc_host_offset(struct vm_time *time_state) {
+static inline sint64_t v3_tsc_host_offset(struct vm_core_time *time_state) {
return time_state->guest_host_offset + time_state->tsc_guest_offset;
}
static int handle_cpufreq_hcall(struct guest_info * info, uint_t hcall_id, void * priv_data) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
info->vm_regs.rbx = time_state->guest_cpu_freq;
return 0;
}
+int v3_pause_time( struct guest_info * info )
+{
+ struct vm_core_time * time_state = &(info->time_state);
+ if (time_state->pause_time != 0) {
+ PrintError("Attempted to pause time when time already paused.\n");
+ return -1;
+ }
+ time_state->pause_time = v3_get_host_time( time_state );
+
+ return 0;
+}
+
+int v3_resume_time( struct guest_info * info )
+{
+ struct vm_core_time * time_state = &(info->time_state);
+ uint64_t host_time, guest_time;
+ sint64_t offset;
+
+ if (time_state->pause_time == 0) {
+ PrintError("Attempted to resume time when time not paused.\n");
+ return -1;
+ }
+
+ host_time = v3_get_host_time(time_state);
+ guest_time = v3_compute_guest_time(time_state, host_time);
+ offset = (sint64_t)guest_time - (sint64_t)host_time;
+ time_state->guest_host_offset = offset;
+ time_state->pause_time = 0;
+
+ return 0;
+}
+
int v3_offset_time( struct guest_info * info, sint64_t offset )
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
PrintDebug("Adding additional offset of %lld to guest time.\n", offset);
time_state->guest_host_offset += offset;
return 0;
#ifdef V3_CONFIG_TIME_DILATION
static uint64_t compute_target_host_time(struct guest_info * info, uint64_t guest_time)
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t guest_elapsed, desired_elapsed;
guest_elapsed = (guest_time - time_state->initial_time);
static uint64_t compute_target_guest_time(struct guest_info *info)
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t host_elapsed, desired_elapsed;
host_elapsed = v3_get_host_time(time_state) - time_state->initial_time;
/* Yield time in the host to deal with a guest that wants to run slower than
* the native host cycle frequency */
static int yield_host_time(struct guest_info * info) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t host_time, target_host_time;
uint64_t guest_time, old_guest_time;
}
static int skew_guest_time(struct guest_info * info) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t target_guest_time, guest_time;
/* Now the host may have gotten ahead of the guest because
* yielding is a coarse grained thing. Figure out what guest time
int
v3_time_exit_vm( struct guest_info * info )
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
time_state->exit_time = v3_get_host_time(time_state);
+#ifdef V3_CONFIG_TIME_DILATION
+ v3_pause_time( info );
+#endif
+
return 0;
}
int
v3_time_enter_vm( struct guest_info * info )
{
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t host_time;
host_time = v3_get_host_time(time_state);
time_state->enter_time = host_time;
#ifdef V3_CONFIG_TIME_DILATION
- {
- uint64_t guest_time;
- sint64_t offset;
- guest_time = v3_compute_guest_time(time_state, host_time);
- // XXX we probably want to use an inline function to do these
- // time differences to deal with sign and overflow carefully
- offset = (sint64_t)guest_time - (sint64_t)host_time;
- PrintDebug("v3_time_enter_vm: guest time offset %lld from host time.\n", offset);
- time_state->guest_host_offset = offset;
- }
+ v3_resume_time( info );
#else
time_state->guest_host_offset = 0;
#endif
}
void v3_update_timers(struct guest_info * info) {
- struct vm_time *time_state = &info->time_state;
+ struct vm_core_time *time_state = &info->time_state;
struct v3_timer * tmp_timer;
sint64_t cycles;
uint64_t old_time = info->time_state.last_update;
}
int v3_schedule_timeout(struct guest_info * info, ullong_t guest_timeout) {
- struct vm_time *time_state = &info->time_state;
+ struct vm_core_time *time_state = &info->time_state;
/* Note that virtualization architectures that support it (like newer
* VMX systems) will turn on an active preemption timeout if
* available to get this timeout as closely as possible. Other systems
}
int v3_check_timeout( struct guest_info * info ) {
- struct vm_time *time_state = &info->time_state;
+ struct vm_core_time *time_state = &info->time_state;
if (time_state->next_timeout <= v3_get_guest_time(time_state)) {
struct v3_timeout_hook * tmp_timeout;
time_state->next_timeout = (ullong_t)-1;
static int tsc_aux_msr_read_hook(struct guest_info *info, uint_t msr_num,
struct v3_msr *msr_val, void *priv) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
V3_ASSERT(msr_num == TSC_AUX_MSR);
static int tsc_aux_msr_write_hook(struct guest_info *info, uint_t msr_num,
struct v3_msr msr_val, void *priv) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
V3_ASSERT(msr_num == TSC_AUX_MSR);
static int tsc_msr_write_hook(struct guest_info *info, uint_t msr_num,
struct v3_msr msr_val, void *priv) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
uint64_t guest_time, new_tsc;
V3_ASSERT(msr_num == TSC_MSR);
ret = v3_register_hypercall(vm, TIME_CPUFREQ_HCALL,
handle_cpufreq_hcall, NULL);
+ PrintDebug("Setting base time dilation factor.\n");
+ vm->time_state.td_mult = 1;
+
return ret;
}
}
void v3_init_time_core(struct guest_info * info) {
- struct vm_time * time_state = &(info->time_state);
+ struct vm_core_time * time_state = &(info->time_state);
v3_cfg_tree_t * cfg_tree = info->core_cfg_data;
char * khz = NULL;
time_state->last_update = 0;
time_state->guest_host_offset = 0;
time_state->tsc_guest_offset = 0;
+ time_state->enter_time = 0;
+ time_state->exit_time = 0;
+ time_state->pause_time = 0;
INIT_LIST_HEAD(&(time_state->timeout_hooks));
time_state->next_timeout = (ullong_t)-1;
void v3_deinit_time_core(struct guest_info * core) {
- struct vm_time * time_state = &(core->time_state);
+ struct vm_core_time * time_state = &(core->time_state);
struct v3_timer * tmr = NULL;
struct v3_timer * tmp = NULL;