struct guest_info;
+
/* Per-VM time information */
struct v3_time {
- uint32_t td_num, td_denom; /* Currently unused! */
- char follow_host_time;
+ int flags;
+ uint32_t td_num, td_denom;
};
-
-#define V3_TIME_TRAP_RDTSC 0x1
+#define V3_TIME_SLAVE_HOST (1 << 1)
/* Per-core time information */
struct vm_core_time {
uint32_t host_cpu_freq; // in kHZ
uint32_t guest_cpu_freq; // can be lower than host CPU freq!
- uint32_t clock_ratio_num; // Multipliers for converting from host
- uint32_t clock_ratio_denom;// cycles to guest cycles.
+ // Multipliers for TSC speed and performance speed
+ uint32_t clock_ratio_num, clock_ratio_denom;
+ uint32_t ipc_ratio_num, ipc_ratio_denom;
uint64_t guest_cycles;
sint64_t tsc_guest_offset; // Offset of guest TSC from guest cycles
// timers were updated
uint64_t initial_host_time;// Host time when VMM started.
- uint64_t vm_enter_host_time; // Host time the guest was last entered
- uint64_t vm_pause_host_time; // Host time when we went into the VMM
struct v3_msr tsc_aux; // Auxilliary MSR for RDTSCP
- int time_flags;
+ int flags;
// Installed Timers slaved off of the guest monotonic TSC
uint_t num_timers;
};
+#define VM_TIME_TRAP_RDTSC (1 << 0)
+#define VM_TIME_SLAVE_HOST (1 << 1)
+
struct v3_timer_ops {
void (*update_timer)(struct guest_info * info, ullong_t cpu_cycles, ullong_t cpu_freq, void * priv_data);
void (*advance_timer)(struct guest_info * info, void * private_data);
int v3_start_time(struct guest_info * core);
-int v3_time_enter_vm(struct guest_info * core);
-int v3_time_exit_vm(struct guest_info * core, uint64_t * guest_cycles);
-
-int v3_offset_time(struct guest_info * core, sint64_t offset);
-int v3_skip_time(struct guest_info * core);
-int v3_advance_time(struct guest_info * core);
+int v3_advance_time(struct guest_info * core, uint64_t * guest_cycles);
// Basic functions for attaching timers to the passage of time - these timers
// should eventually specify their accuracy and resolution.
v3_svm_config_tsc_virtualization(struct guest_info * info) {
vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
- if (info->time_state.time_flags & V3_TIME_TRAP_RDTSC) {
+ if (info->time_state.flags & VM_TIME_TRAP_RDTSC) {
ctrl_area->instrs.RDTSC = 1;
ctrl_area->svm_instrs.RDTSCP = 1;
} else {
// Conditionally yield the CPU if the timeslice has expired
v3_yield_cond(info);
+ // Update timer devices after being in the VM before doing
+ // IRQ updates, so that any interrupts they raise get seen
+ // immediately.
+ v3_advance_time(info, NULL);
+ v3_update_timers(info);
+
// disable global interrupts for vm state transition
v3_clgi();
- // Update timer devices after being in the VM, with interupts
- // disabled, but before doing IRQ updates, so that any interrupts they
- //raise get seen immediately.
- v3_advance_time(info);
- v3_update_timers(info);
-
// Synchronize the guest state to the VMCB
guest_state->cr0 = info->ctrl_regs.cr0;
guest_state->cr2 = info->ctrl_regs.cr2;
}
#endif
- v3_time_enter_vm(info);
v3_svm_config_tsc_virtualization(info);
//V3_Print("Calling v3_svm_launch\n");
v3_last_exit = (uint32_t)(guest_ctrl->exit_code);
- // Immediate exit from VM time bookkeeping
- v3_time_exit_vm(info, &guest_cycles);
+ v3_advance_time(info, &guest_cycles);
info->num_exits++;
// Conditionally yield the CPU if the timeslice has expired
v3_yield_cond(info);
+ // This update timers is for time-dependent handlers
+ // if we're slaved to host time
+ v3_advance_time(info, NULL);
+ v3_update_timers(info);
+
{
int ret = v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2);
/* We start running with guest_time == host_time */
uint64_t t = v3_get_host_time(&info->time_state);
- info->time_state.vm_enter_host_time = 0;
- info->time_state.vm_pause_host_time = t;
info->time_state.initial_host_time = t;
info->yield_start_cycle = t;
return 0;
}
-int v3_offset_time( struct guest_info * info, sint64_t offset )
-{
- struct vm_core_time * time_state = &(info->time_state);
- if (info->vm_info->time_state.follow_host_time) {
- PrintError("Cannot offset guest time passage while slaved to host clock.\n");
- return 1;
- } else {
- time_state->guest_cycles += offset;
- }
- return 0;
-}
+static sint64_t
+host_to_guest_cycles(struct guest_info * info, sint64_t host_cycles) {
+ struct vm_core_time * core_time_state = &(info->time_state);
+ uint32_t cl_num, cl_denom;
-int v3_skip_time(struct guest_info * info) {
- if (info->vm_info->time_state.follow_host_time) {
- PrintError("Cannot skip guest time passage while slaved to host clock.\n");
- return 1;
- } else {
- info->time_state.vm_pause_host_time = v3_get_host_time(&info->time_state);
- }
- return 0;
-}
+ cl_num = core_time_state->clock_ratio_num;
+ cl_denom = core_time_state->clock_ratio_denom;
-static sint64_t host_to_guest_cycles(struct guest_info * info, sint64_t host_cycles) {
- return (host_cycles * info->time_state.clock_ratio_num) / info->time_state.clock_ratio_denom;
+ return (host_cycles * cl_num) / cl_denom;
}
-int v3_time_advance_cycles(struct guest_info * info, uint64_t *host_cycles)
-{
- uint64_t t = v3_get_host_time(&info->time_state);
-
- info->time_state.vm_pause_host_time = t;
+/*
+static sint64_t
+guest_to_host_cycles(struct guest_info * info, sint64_t guest_cycles) {
+ struct vm_core_time * core_time_state = &(info->time_state);
+ uint32_t cl_num, cl_denom;
- if (info->vm_info->time_state.follow_host_time) {
- /* How many guest cycles should have elapsed? */
- sint64_t host_elapsed = t - info->time_state.initial_host_time;
- sint64_t guest_elapsed = host_to_guest_cycles(info, host_elapsed);
+ cl_num = core_time_state->clock_ratio_num;
+ cl_denom = core_time_state->clock_ratio_denom;
- info->time_state.guest_cycles = guest_elapsed;
- } else {
- uint64_t guest_cycles;
- if (*host_cycles) {
- guest_cycles = host_to_guest_cycles(info, *host_cycles);
- } else {
- guest_cycles = host_to_guest_cycles(info, (sint64_t)(t - info->time_state.vm_pause_host_time));
- }
- info->time_state.guest_cycles += guest_cycles;
- }
-
- return 0;
-}
-
-int v3_advance_time(struct guest_info * info) {
- return v3_time_advance_cycles(info, NULL);
-}
-
-/* Called immediately upon entry in the the VMM */
-int
-v3_time_exit_vm( struct guest_info * info, uint64_t * host_cycles )
-{
- return v3_time_advance_cycles(info, host_cycles);
+ return (guest_cycles * cl_denom) / cl_num;
}
+*/
-/* Called immediately prior to entry to the VM */
-int
-v3_time_enter_vm( struct guest_info * info )
+int v3_advance_time(struct guest_info * info, uint64_t *host_cycles)
{
- struct vm_core_time * time_state = &(info->time_state);
- uint64_t host_time = v3_get_host_time(&info->time_state);
+ uint64_t guest_cycles;
+
+ if (info->flags & VM_TIME_SLAVE_HOST) {
+ struct v3_time *vm_ts = &(info->vm_info->time_state);
+ uint64_t ht = v3_get_host_time(&info->time_state);
+ uint64_t host_elapsed = ht - info->time_state.initial_host_time;
+ uint64_t dilated_elapsed = (host_elapsed * vm_ts->td_num) / vm_ts->td_denom;
+ uint64_t guest_elapsed = host_to_guest_cycles(info, dilated_elapsed);
+ guest_cycles = guest_elapsed - v3_get_guest_time(&info->time_state);
+ } else if (*host_cycles) {
+ guest_cycles = host_to_guest_cycles(info, *host_cycles);
+ } else {
+ guest_cycles = 0;
+ }
+
+ info->time_state.guest_cycles += guest_cycles;
- time_state->vm_enter_host_time = host_time;
return 0;
-}
-
+}
-
struct v3_timer * v3_add_timer(struct guest_info * info,
struct v3_timer_ops * ops,
void * private_data) {
return;
}
- PrintDebug("Updating timers with %lld elapsed cycles.\n", cycles);
+ //PrintDebug("Updating timers with %lld elapsed cycles.\n", cycles);
list_for_each_entry(tmp_timer, &(time_state->timers), timer_link) {
tmp_timer->ops->update_timer(info, cycles, time_state->guest_cpu_freq, tmp_timer->private_data);
}
return 0;
}
+static int
+handle_time_configuration(struct v3_vm_info * vm, v3_cfg_tree_t *cfg) {
+ v3_cfg_tree_t * slave;
+
+ vm->time_state.flags = 0;
+ vm->time_state.td_num = vm->time_state.td_denom = 1;
+
+ if (!cfg) return 0;
+
+ slave = v3_cfg_subtree(cfg, "slave");
+
+ if (slave) {
+ char *source = v3_cfg_val(slave, "source");
+ v3_cfg_tree_t *dilation = v3_cfg_subtree(slave, "dilation");
+ if (source) {
+ if (strcasecmp(source, "host") == 0) {
+ PrintDebug("Slaving VM guest time to host time.\n");
+ vm->time_state.flags |= V3_TIME_SLAVE_HOST;
+ } else {
+ PrintError("Unknown time source for slaving.\n");
+ }
+ }
+ if (dilation && (vm->time_state.flags & V3_TIME_SLAVE_HOST)) {
+ char *str1, *str2;
+ uint32_t num = 1, denom = 1;
+ if ((str1 = v3_cfg_val(dilation, "value"))) {
+ denom = atoi(str1);
+ } else if ((str1 = v3_cfg_val(dilation, "num"))
+ && (str2 = v3_cfg_val(dilation, "denom"))) {
+ num = atoi(str1);
+ denom = atoi(str2);
+ }
+ if ((num > 0) && (denom > 0)) {
+ vm->time_state.td_num = num;
+ vm->time_state.td_denom = denom;
+ }
+ if ((vm->time_state.td_num != 1)
+ || (vm->time_state.td_denom != 1)) {
+ V3_Print("Time dilated from host time by a factor of %d/%d"
+ " in guest.\n", denom, num);
+ } else {
+ PrintError("Time dilation specifier in configuration did not"
+ " result in actual time dilation in VM.\n");
+ }
+ }
+ }
+ return 0;
+}
int v3_init_time_vm(struct v3_vm_info * vm) {
+ v3_cfg_tree_t * cfg_tree = vm->cfg_data->cfg;
int ret;
-
+
PrintDebug("Installing TSC MSR hook.\n");
ret = v3_hook_msr(vm, TSC_MSR,
tsc_msr_read_hook, tsc_msr_write_hook, NULL);
ret = v3_register_hypercall(vm, TIME_CPUFREQ_HCALL,
handle_cpufreq_hcall, NULL);
- vm->time_state.td_num = 1;
- vm->time_state.td_denom = 1;
- PrintDebug("Setting base time dilation factor to %d/%d.\n",
- vm->time_state.td_num, vm->time_state.td_denom);
+ handle_time_configuration(vm, v3_cfg_subtree(cfg_tree, "time"));
- vm->time_state.follow_host_time = 1;
- PrintDebug("Locking guest time to host time.\n");
return ret;
}
v3_remove_hypercall(vm, TIME_CPUFREQ_HCALL);
}
+static uint32_t
+gcd ( uint32_t a, uint32_t b )
+{
+ uint32_t c;
+ while ( a != 0 ) {
+ c = a; a = b%a; b = c;
+ }
+ return b;
+}
+
+static int compute_core_ratios(struct guest_info * info,
+ uint32_t hostKhz, uint32_t guestKhz)
+{
+ struct vm_core_time * time_state = &(info->time_state);
+ uint32_t khzGCD;
+
+ /* Compute these using the GCD() of the guest and host CPU freq.
+ * If the GCD is too small, make it "big enough" */
+ khzGCD = gcd(hostKhz, guestKhz);
+ if (khzGCD < 1024)
+ khzGCD = 1000;
+
+ time_state->clock_ratio_num = guestKhz / khzGCD;
+ time_state->clock_ratio_denom = hostKhz / khzGCD;
+
+ time_state->ipc_ratio_num = 1;
+ time_state->ipc_ratio_denom = 1;
+
+ return 0;
+}
+
void v3_init_time_core(struct guest_info * info) {
struct vm_core_time * time_state = &(info->time_state);
v3_cfg_tree_t * cfg_tree = info->core_cfg_data;
time_state->guest_cpu_freq = time_state->host_cpu_freq;
}
-
- /* Compute these using the GCD() of the guest and host CPU freq.
- * If the GCD is too small, make it "big enough" */
- time_state->clock_ratio_num = 1;
- time_state->clock_ratio_denom = 1;
-
PrintDebug("Logical Core %d (vcpu=%d) CPU frequency set to %d KHz (host CPU frequency = %d KHz).\n",
info->pcpu_id, info->vcpu_id,
time_state->guest_cpu_freq,
time_state->host_cpu_freq);
+ compute_core_ratios(info, time_state->host_cpu_freq,
+ time_state->guest_cpu_freq);
+
+ PrintDebug(" td_mult = %d/%d, cl_mult = %u/%u, ipc_mult = %u/%u.\n",
+ info->vm_info->time_state.td_num,
+ info->vm_info->time_state.td_denom,
+ time_state->clock_ratio_num, time_state->clock_ratio_denom,
+ time_state->ipc_ratio_num, time_state->ipc_ratio_denom);
time_state->guest_cycles = 0;
time_state->tsc_guest_offset = 0;
time_state->last_update = 0;
time_state->initial_host_time = 0;
- time_state->vm_enter_host_time = 0;
- time_state->vm_pause_host_time = 0;
- time_state->time_flags = 0; // XXX need to set trap TSC flag or not wisely
+ time_state->flags = 0;
+ if (info->vm_info->time_state.flags & V3_TIME_SLAVE_HOST) {
+ time_state->flags |= VM_TIME_SLAVE_HOST;
+ }
+ if ((time_state->clock_ratio_denom != 1) ||
+ (time_state->clock_ratio_num != 1)) {
+ time_state->flags |= VM_TIME_TRAP_RDTSC;
+ }
INIT_LIST_HEAD(&(time_state->timers));
time_state->num_timers = 0;
v3_vmx_config_tsc_virtualization(struct guest_info * info) {
struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
- if (info->time_state.time_flags & V3_TIME_TRAP_RDTSC) {
+ if (info->time_state.flags & VM_TIME_TRAP_RDTSC) {
if (!vmx_info->pri_proc_ctrls.rdtsc_exit) {
vmx_info->pri_proc_ctrls.rdtsc_exit = 1;
check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
// Conditionally yield the CPU if the timeslice has expired
v3_yield_cond(info);
- // disable global interrupts for vm state transition
- v3_disable_ints();
-
// Update timer devices late after being in the VM so that as much
// of the time in the VM is accounted for as possible. Also do it before
// updating IRQ entry state so that any interrupts the timers raise get
- // handled on the next VM entry. Must be done with interrupts disabled.
- v3_advance_time(info);
+ // handled on the next VM entry.
+ v3_advance_time(info, NULL);
v3_update_timers(info);
+ // disable global interrupts for vm state transition
+ v3_disable_ints();
+
if (vmcs_store() != vmx_info->vmcs_ptr_phys) {
vmcs_clear(vmx_info->vmcs_ptr_phys);
vmcs_load(vmx_info->vmcs_ptr_phys);
}
- // Perform last-minute time bookkeeping prior to entering the VM
- v3_time_enter_vm(info);
+ // Perform last-minute time setup prior to entering the VM
v3_vmx_config_tsc_virtualization(info);
-
-
if (v3_update_vmcs_host_state(info)) {
v3_enable_ints();
PrintError("Could not write host state\n");
}
// Immediate exit from VM time bookkeeping
- v3_time_exit_vm(info, &guest_cycles);
-
+ v3_advance_time(info, &guest_cycles);
/* Update guest state */
v3_vmx_save_vmcs(info);
// Conditionally yield the CPU if the timeslice has expired
v3_yield_cond(info);
+ v3_advance_time(info, NULL);
+ v3_update_timers(info);
if (v3_handle_vmx_exit(info, &exit_info) == -1) {
PrintError("Error in VMX exit handler (Exit reason=%x)\n", exit_info.exit_reason);