WARN(!pgs, "Could not allocate pages\n");
- printk("%llu pages (order=%d) aquired from alloc_pages\n",
- num_pages, order);
+ /* printk("%llu pages (order=%d) aquired from alloc_pages\n",
+ num_pages, order); */
addr = page_to_pfn(pgs) << PAGE_SHIFT;
} else {
- printk("Allocating %llu pages from bitmap allocator\n", num_pages);
+ //printk("Allocating %llu pages from bitmap allocator\n", num_pages);
//addr = pool.base_addr;
addr = alloc_contig_pgs(num_pages, alignment);
}
- printk("Returning from alloc addr=%p, vaddr=%p\n", (void *)addr, __va(addr));
+ //printk("Returning from alloc addr=%p, vaddr=%p\n", (void *)addr, __va(addr));
return addr;
}
void free_palacios_pgs(uintptr_t pg_addr, int num_pages) {
- printk("Freeing Memory page %p\n", (void *)pg_addr);
+ //printk("Freeing Memory page %p\n", (void *)pg_addr);
if ((pg_addr >= pool.base_addr) &&
(pg_addr < pool.base_addr + (4096 * pool.num_pages))) {
help
Includes the Virtual APIC device
+config APIC_ENQUEUE_MISSED_TMR_IRQS
+ bool "Enqueue missed APIC timer interrpts"
+ default n
+ depends on APIC
+ help
+ Make up missed APIC periodic timer interrupts on later
+ exits into the virtual machine
config DEBUG_APIC
bool "APIC Debugging"
help
Enable debugging for the APIC
-
-
config IO_APIC
bool "IOAPIC"
depends on APIC
if (tmr_ticks < apic->tmr_cur_cnt) {
apic->tmr_cur_cnt -= tmr_ticks;
- if (apic->missed_ints) {
+#ifdef V3_CONFIG_APIC_ENQUEUE_MISSED_TMR_IRQS
+ if (apic->missed_ints && !apic_intr_pending(core, priv_data)) {
PrintDebug("apic %u: core %u: Injecting queued APIC timer interrupt.\n",
apic->lapic_id.val, core->vcpu_id);
apic_inject_timer_intr(core, priv_data);
apic->missed_ints--;
}
+#endif /* CONFIG_APIC_ENQUEUE_MISSED_TMR_IRQS */
} else {
tmr_ticks -= apic->tmr_cur_cnt;
apic->tmr_cur_cnt = 0;
if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) {
int queued_ints = tmr_ticks / apic->tmr_init_cnt;
- if (queued_ints)
- PrintDebug("apic %u: core %u: Deferring %u APIC timer interrupts.\n",
- apic->lapic_id.val, core->vcpu_id, queued_ints);
- apic->missed_ints += queued_ints;
tmr_ticks = tmr_ticks % apic->tmr_init_cnt;
apic->tmr_cur_cnt = apic->tmr_init_cnt - tmr_ticks;
+ apic->missed_ints += queued_ints;
}
}
vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
+ sint64_t tsc_offset;
// Conditionally yield the CPU if the timeslice has expired
v3_yield_cond(info);
#endif
v3_time_enter_vm(info);
- guest_ctrl->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
+ tsc_offset = v3_tsc_host_offset(&info->time_state);
+ guest_ctrl->TSC_OFFSET = tsc_offset;
//V3_Print("Calling v3_svm_launch\n");
cur_cycle = v3_get_host_time(&info->time_state);
if (cur_cycle > (info->yield_start_cycle + info->vm_info->yield_cycle_period)) {
-
- /*
- PrintDebug("Conditional Yield (cur_cyle=%p, start_cycle=%p, period=%p)\n",
- (void *)cur_cycle, (void *)info->yield_start_cycle, (void *)info->yield_cycle_period);
- */
+ //PrintDebug("Conditional Yield (cur_cyle=%p, start_cycle=%p, period=%p)\n",
+ // (void *)cur_cycle, (void *)info->yield_start_cycle,
+ // (void *)info->yield_cycle_period);
+
V3_Yield();
info->yield_start_cycle = v3_get_host_time(&info->time_state);
}
int v3_offset_time( struct guest_info * info, sint64_t offset )
{
struct vm_time * time_state = &(info->time_state);
-// PrintDebug("Adding additional offset of %lld to guest time.\n", offset);
+ PrintDebug("Adding additional offset of %lld to guest time.\n", offset);
time_state->guest_host_offset += offset;
return 0;
}
/* We do *not* assume the guest timer was paused in the VM. If it was
* this offseting is 0. If it wasn't, we need this. */
- v3_offset_time(info, (sint64_t)old_guest_time - (sint64_t)guest_time);
+ v3_offset_time(info, (sint64_t)(old_guest_time - guest_time));
return 0;
}
if (time_state->enter_time) {
/* Limit forward skew to 10% of the amount the guest has
* run since we last could skew time */
- max_skew = ((sint64_t)guest_time - (sint64_t)time_state->enter_time) / 10;
+ max_skew = (sint64_t)(guest_time - time_state->enter_time) / 10.0;
} else {
max_skew = 0;
}
- desired_skew = (sint64_t)target_guest_time - (sint64_t)guest_time;
+ desired_skew = (sint64_t)(target_guest_time - guest_time);
skew = desired_skew > max_skew ? max_skew : desired_skew;
PrintDebug("Guest %lld cycles behind where it should be.\n",
desired_skew);
v3_time_enter_vm( struct guest_info * info )
{
struct vm_time * time_state = &(info->time_state);
- uint64_t guest_time, host_time;
+ uint64_t host_time;
host_time = v3_get_host_time(time_state);
- guest_time = v3_get_guest_time(time_state);
time_state->enter_time = host_time;
#ifdef V3_CONFIG_TIME_DILATION
- time_state->guest_host_offset = (sint64_t)guest_time - (sint64_t)host_time;
+ {
+ uint64_t guest_time;
+ sint64_t offset;
+ guest_time = v3_compute_guest_time(time_state, host_time);
+ // XXX we probably want to use an inline function to do these
+ // time differences to deal with sign and overflow carefully
+ offset = (sint64_t)guest_time - (sint64_t)host_time;
+ PrintDebug("v3_time_enter_vm: guest time offset %lld from host time.\n", offset);
+ time_state->guest_host_offset = offset;
+ }
#else
time_state->guest_host_offset = 0;
#endif
void v3_update_timers(struct guest_info * info) {
struct vm_time *time_state = &info->time_state;
struct v3_timer * tmp_timer;
- uint64_t old_time = info->time_state.last_update;
sint64_t cycles;
+ uint64_t old_time = info->time_state.last_update;
time_state->last_update = v3_get_guest_time(time_state);
- cycles = time_state->last_update - old_time;
+ cycles = (sint64_t)(time_state->last_update - old_time);
V3_ASSERT(cycles >= 0);
// V3_Print("Updating timers with %lld elapsed cycles.\n", cycles);