uint32_t tmr_cur_cnt;
uint32_t tmr_init_cnt;
-
+ uint32_t missed_ints;
struct local_vec_tbl_reg ext_intr_vec_tbl[4];
apic->rem_rd_data = 0x00000000;
apic->tmr_init_cnt = 0x00000000;
apic->tmr_cur_cnt = 0x00000000;
+ apic->missed_ints = 0;
apic->lapic_id.val = id;
}
+/* Timer Functions */
+
+static void apic_inject_timer_intr(struct guest_info *core,
+ void * priv_data) {
+ struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
+ // raise irq
+ PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d) (div=%d)\n",
+ apic->lapic_id.val, core->vcpu_id,
+ apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num);
+
+ if (apic_intr_pending(core, priv_data)) {
+ PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n",
+ apic->lapic_id.val, core->vcpu_id,
+ apic_get_intr_number(core, priv_data));
+ }
+ if (activate_internal_irq(apic, APIC_TMR_INT) == -1) {
+ PrintError("apic %u: core %u: Could not raise Timer interrupt\n",
+ apic->lapic_id.val, core->vcpu_id);
+ }
+
+ return;
+}
+
-/* Timer Functions */
static void apic_update_time(struct guest_info * core,
uint64_t cpu_cycles, uint64_t cpu_freq,
if (tmr_ticks < apic->tmr_cur_cnt) {
apic->tmr_cur_cnt -= tmr_ticks;
+ if (apic->missed_ints) {
+ apic_inject_timer_intr(core, priv_data);
+ apic->missed_ints--;
+ }
} else {
tmr_ticks -= apic->tmr_cur_cnt;
apic->tmr_cur_cnt = 0;
- // raise irq
- PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d) (div=%d)\n",
- apic->lapic_id.val, core->vcpu_id,
- apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num);
-
- if (apic_intr_pending(core, priv_data)) {
- PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n",
- apic->lapic_id.val, core->vcpu_id,
- apic_get_intr_number(core, priv_data));
- }
+ apic_inject_timer_intr(core, priv_data);
- if (activate_internal_irq(apic, APIC_TMR_INT) == -1) {
- PrintError("apic %u: core %u: Could not raise Timer interrupt\n",
- apic->lapic_id.val, core->vcpu_id);
- }
-
if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) {
- static unsigned int nexits = 0;
- static unsigned int missed_ints = 0;
-
- nexits++;
- missed_ints += tmr_ticks / apic->tmr_init_cnt;
-
- if ((missed_ints > 0) && (nexits >= 5000)) {
- V3_Print("apic %u: core %u: missed %u timer interrupts total in last %u exits.\n",
- apic->lapic_id.val, core->vcpu_id, missed_ints, nexits);
- missed_ints = 0;
- nexits = 0;
- }
-
+ apic->missed_ints += tmr_ticks / apic->tmr_init_cnt;
tmr_ticks = tmr_ticks % apic->tmr_init_cnt;
apic->tmr_cur_cnt = apic->tmr_init_cnt - tmr_ticks;
}
return;
}
-
static struct intr_ctrl_ops intr_ops = {
.intr_pending = apic_intr_pending,
.get_intr_number = apic_get_intr_number,
guest_time = v3_get_guest_time(time_state);
if (guest_time < target_guest_time) {
- uint64_t max_skew, desired_skew, skew;
+ sint64_t max_skew, desired_skew, skew;
if (time_state->enter_time) {
/* Limit forward skew to 10% of the amount the guest has
* run since we last could skew time */
- max_skew = (guest_time - time_state->enter_time) / 10;
+ max_skew = ((sint64_t)guest_time - (sint64_t)time_state->enter_time) / 10;
} else {
max_skew = 0;
}
- desired_skew = target_guest_time - guest_time;
+ desired_skew = (sint64_t)target_guest_time - (sint64_t)guest_time;
skew = desired_skew > max_skew ? max_skew : desired_skew;
- PrintDebug("Guest %llu cycles behind where it should be.\n",
+ PrintDebug("Guest %lld cycles behind where it should be.\n",
desired_skew);
- PrintDebug("Limit on forward skew is %llu. Skewing forward %llu.\n",
+ PrintDebug("Limit on forward skew is %lld. Skewing forward %lld.\n",
max_skew, skew);
v3_offset_time(info, skew);
struct vm_time * time_state = &(info->time_state);
uint64_t guest_time, host_time;
- guest_time = v3_get_guest_time(time_state);
host_time = v3_get_host_time(time_state);
+ guest_time = v3_get_guest_time(time_state);
time_state->enter_time = host_time;
- time_state->guest_host_offset = guest_time - host_time;
+ time_state->guest_host_offset = (sint64_t)guest_time - (sint64_t)host_time;
return 0;
}
// Perform last-minute time bookkeeping prior to entering the VM
v3_time_enter_vm(info);
- tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
- tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
- check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
- check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
-
+ // tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
+ // tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
+ // check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
+ // check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
if (v3_update_vmcs_host_state(info)) {
v3_enable_ints();