}
// Returns *monotonic* guest time.
-static inline uint64_t v3_get_guest_time(struct vm_time *t) {
+static inline uint64_t v3_compute_guest_time(struct vm_time *t, uint64_t ht) {
#ifdef V3_CONFIG_TIME_HIDE_VM_COST
V3_ASSERT(t->exit_time);
return t->exit_time + t->guest_host_offset;
#endif
}
+static inline uint64_t v3_get_guest_time(struct vm_time *t) {
+ return v3_compute_guest_time(t, v3_get_host_time(t));
+}
+
// Returns the TSC value seen by the guest
+static inline uint64_t v3_compute_guest_tsc(struct vm_time *t, uint64_t ht) {
+ return v3_compute_guest_time(t, ht) + t->tsc_guest_offset;
+}
+
static inline uint64_t v3_get_guest_tsc(struct vm_time *t) {
- return v3_get_guest_time(t) + t->tsc_guest_offset;
+ return v3_compute_guest_tsc(t, v3_get_host_time(t));
}
// Returns offset of guest TSC from host TSC
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
// raise irq
- PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d) (div=%d)\n",
+ PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d)\n",
apic->lapic_id.val, core->vcpu_id,
- apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num);
+ apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt);
if (apic_intr_pending(core, priv_data)) {
PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n",
if (tmr_ticks < apic->tmr_cur_cnt) {
apic->tmr_cur_cnt -= tmr_ticks;
if (apic->missed_ints) {
+ PrintDebug("apic %u: core %u: Injecting queued APIC timer interrupt.\n",
+ apic->lapic_id.val, core->vcpu_id);
apic_inject_timer_intr(core, priv_data);
apic->missed_ints--;
}
apic_inject_timer_intr(core, priv_data);
if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) {
- apic->missed_ints += tmr_ticks / apic->tmr_init_cnt;
+ int queued_ints = tmr_ticks / apic->tmr_init_cnt;
+ if (queued_ints)
+ PrintDebug("apic %u: core %u: Deferring %u APIC timer interrupts.\n",
+ apic->lapic_id.val, core->vcpu_id, queued_ints);
+ apic->missed_ints += queued_ints;
tmr_ticks = tmr_ticks % apic->tmr_init_cnt;
apic->tmr_cur_cnt = apic->tmr_init_cnt - tmr_ticks;
}
}
#ifdef V3_CONFIG_TIME_DILATION
-static uint64_t compute_target_host_time(struct guest_info * info)
+static uint64_t compute_target_host_time(struct guest_info * info, uint64_t guest_time)
{
struct vm_time * time_state = &(info->time_state);
uint64_t guest_elapsed, desired_elapsed;
- guest_elapsed = (v3_get_guest_time(time_state) - time_state->initial_time);
+ guest_elapsed = (guest_time - time_state->initial_time);
desired_elapsed = (guest_elapsed * time_state->host_cpu_freq) / time_state->guest_cpu_freq;
return time_state->initial_time + desired_elapsed;
}
uint64_t host_time, target_host_time;
uint64_t guest_time, old_guest_time;
- /* Compute the target host time given how much time has *already*
- * passed in the guest */
- target_host_time = compute_target_host_time(info);
-
/* Now, let the host run while the guest is stopped to make the two
* sync up. Note that this doesn't assume that guest time is stopped;
* the offsetting in the next step will change add an offset to guest
* time to account for the time paused even if the geust isn't
* usually paused in the VMM. */
host_time = v3_get_host_time(time_state);
- old_guest_time = v3_get_guest_time(time_state);
+ old_guest_time = v3_compute_guest_time(time_state, host_time);
+ target_host_time = compute_target_host_time(info, old_guest_time);
while (target_host_time > host_time) {
v3_yield(info);
host_time = v3_get_host_time(time_state);
}
- guest_time = v3_get_guest_time(time_state);
+ guest_time = v3_compute_guest_time(time_state, host_time);
/* We do *not* assume the guest timer was paused in the VM. If it was
* this offseting is 0. If it wasn't, we need this. */