control of whether RDTSC traps to both SVM and VMX.
char follow_host_time;
};
+#define V3_TIME_TRAP_RDTSC 0x1
+
/* Per-core time information */
struct vm_core_time {
uint32_t host_cpu_freq; // in kHZ
uint64_t vm_pause_host_time; // Host time when we went into the VMM
struct v3_msr tsc_aux; // Auxilliary MSR for RDTSCP
+ int time_flags;
+
// Installed Timers slaved off of the guest monotonic TSC
uint_t num_timers;
struct list_head timers;
ctrl_area->instrs.HLT = 1;
-#ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
- ctrl_area->instrs.RDTSC = 1;
- ctrl_area->svm_instrs.RDTSCP = 1;
-#endif
+ /* Set at VMM launch as needed */
+ ctrl_area->instrs.RDTSC = 0;
+ ctrl_area->svm_instrs.RDTSCP = 0;
// guest_state->cr0 = 0x00000001; // PE
return 0;
}
+int
+v3_svm_config_tsc_virtualization(struct guest_info * info) {
+ vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
+
+ if (info->time_state.time_flags & V3_TIME_TRAP_RDTSC) {
+ ctrl_area->instrs.RDTSC = 1;
+ ctrl_area->svm_instrs.RDTSCP = 1;
+ } else {
+ ctrl_area->instrs.RDTSC = 0;
+ ctrl_area->svm_instrs.RDTSCP = 0;
+ ctrl_area->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
+ }
+ return 0;
+}
/*
* CAUTION and DANGER!!!
vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
- sint64_t tsc_offset;
uint64_t guest_cycles = 0;
// Conditionally yield the CPU if the timeslice has expired
#endif
v3_time_enter_vm(info);
- tsc_offset = v3_tsc_host_offset(&info->time_state);
- guest_ctrl->TSC_OFFSET = tsc_offset;
-
+ v3_svm_config_tsc_virtualization(info);
//V3_Print("Calling v3_svm_launch\n");
{
// (void *)cur_cycle, (void *)info->yield_start_cycle,
// (void *)info->yield_cycle_period);
+ info->yield_start_cycle += info->vm_info->yield_cycle_period;
V3_Yield();
- info->yield_start_cycle = v3_get_host_time(&info->time_state);
}
}
time_state->vm_enter_host_time = 0;
time_state->vm_pause_host_time = 0;
+ time_state->time_flags = 0; // XXX need to set virtualize TSC flag or not...
+
INIT_LIST_HEAD(&(time_state->timers));
time_state->num_timers = 0;
}
+int
+v3_vmx_config_tsc_virtualization(struct guest_info * info) {
+ struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
+
+ if (info->time_state.time_flags & V3_TIME_TRAP_RDTSC) {
+ if (!vmx_info->pri_proc_ctrls.rdtsc_exit) {
+ vmx_info->pri_proc_ctrls.rdtsc_exit = 1;
+ check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
+ }
+ } else {
+ sint64_t tsc_offset;
+ uint32_t tsc_offset_low, tsc_offset_high;
+
+ if (vmx_info->pri_proc_ctrls.rdtsc_exit) {
+ vmx_info->pri_proc_ctrls.rdtsc_exit = 0;
+ check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
+ }
+ tsc_offset = v3_tsc_host_offset(&info->time_state);
+ tsc_offset_high = (uint32_t)(( tsc_offset >> 32) & 0xffffffff);
+ tsc_offset_low = (uint32_t)(tsc_offset & 0xffffffff);
+
+ check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
+ check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
+ }
+ return 0;
+}
/*
* CAUTION and DANGER!!!
*/
int v3_vmx_enter(struct guest_info * info) {
int ret = 0;
- sint64_t tsc_offset;
- uint32_t tsc_offset_low, tsc_offset_high;
struct vmx_exit_info exit_info;
struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
uint64_t guest_cycles = 0;
// Perform last-minute time bookkeeping prior to entering the VM
v3_time_enter_vm(info);
-
- tsc_offset = v3_tsc_host_offset(&info->time_state);
- tsc_offset_high = (uint32_t)(( tsc_offset >> 32) & 0xffffffff);
- tsc_offset_low = (uint32_t)(tsc_offset & 0xffffffff);
-
- check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
- check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
+ v3_vmx_config_tsc_virtualization(info);