menu "Time Management"
-config VIRTUALIZE_TIME
- bool "Enable Time virtualization"
+config TIME_DILATION
+ bool "Control Guest/Host Time Offseting"
default n
+ depends on EXPERIMENTAL
help
- Enables the timer virtualization extensions
-
+ Controls the relative speeds of the guest and host processor
+ to allow the VM to provide the illusion of the guest seeing time
+ pass at a different rate than the host system does.
+
config TIME_HIDE_VM_COST
bool "Hide VMM Run Cost"
default n
- depends on VIRTUALIZE_TIME
+ depends on EXPERIMENTAL
help
Offset guest time from host time sufficiently to hide the cost of
running in the virtual machine. This can aid the consistency of
config TIME_VIRTUALIZE_TSC
bool "Fully virtualize guest TSC"
default n
- depends on VIRTUALIZE_TIME
+ depends on EXPERIMENTAL
help
Virtualize the processor time stamp counter in the guest,
generally increasing consistency between various time sources
but also potentially making guest time run slower than real time.
-
+
endmenu
return 0;
}
-
static int keyed_stream_ioctl_user(struct inode *inode, struct file *filp, unsigned int ioctl, unsigned long arg)
{
void __user *argp = (void __user *)arg;
}
}
+static long keyed_stream_compat_ioctl_user(struct file * filp, unsigned int ioctl, unsigned long arg)
+{
+ return keyed_stream_ioctl_user(NULL, filp, ioctl, arg);
+}
+
static int keyed_stream_release_user(struct inode *inode, struct file *filp)
{
struct user_keyed_stream *s = filp->private_data;
static struct file_operations user_keyed_stream_fops = {
.poll = keyed_stream_poll_user,
+#ifdef HAVE_COMPAT_IOCTL
+ .compat_ioctl = keyed_stream_compat_ioctl_user,
+#else
.ioctl = keyed_stream_ioctl_user,
+#endif
.release = keyed_stream_release_user,
};
WARN(!pgs, "Could not allocate pages\n");
- printk("%llu pages (order=%d) aquired from alloc_pages\n",
- num_pages, order);
+ /* printk("%llu pages (order=%d) aquired from alloc_pages\n",
+ num_pages, order); */
addr = page_to_pfn(pgs) << PAGE_SHIFT;
} else {
- printk("Allocating %llu pages from bitmap allocator\n", num_pages);
+ //printk("Allocating %llu pages from bitmap allocator\n", num_pages);
//addr = pool.base_addr;
addr = alloc_contig_pgs(num_pages, alignment);
}
- printk("Returning from alloc addr=%p, vaddr=%p\n", (void *)addr, __va(addr));
+ //printk("Returning from alloc addr=%p, vaddr=%p\n", (void *)addr, __va(addr));
return addr;
}
void free_palacios_pgs(uintptr_t pg_addr, int num_pages) {
- printk("Freeing Memory page %p\n", (void *)pg_addr);
+ //printk("Freeing Memory page %p\n", (void *)pg_addr);
if ((pg_addr >= pool.base_addr) &&
(pg_addr < pool.base_addr + (4096 * pool.num_pages))) {
case XOR_MEM2_8:
case XOR_IMM2_8:
case INC_8:
+ case INT:
case DEC_8:
case NEG_8:
case NOT_8:
return -1;
}
- case INT:
+ //case INT:
case MOV_DR2:
case MOV_2DR:
case MOV_CR2:
}
// Returns *monotonic* guest time.
-static inline uint64_t v3_get_guest_time(struct vm_time *t) {
+static inline uint64_t v3_compute_guest_time(struct vm_time *t, uint64_t ht) {
#ifdef V3_CONFIG_TIME_HIDE_VM_COST
V3_ASSERT(t->exit_time);
return t->exit_time + t->guest_host_offset;
#endif
}
+static inline uint64_t v3_get_guest_time(struct vm_time *t) {
+ return v3_compute_guest_time(t, v3_get_host_time(t));
+}
+
// Returns the TSC value seen by the guest
+static inline uint64_t v3_compute_guest_tsc(struct vm_time *t, uint64_t ht) {
+ return v3_compute_guest_time(t, ht) + t->tsc_guest_offset;
+}
+
static inline uint64_t v3_get_guest_tsc(struct vm_time *t) {
- return v3_get_guest_time(t) + t->tsc_guest_offset;
+ return v3_compute_guest_tsc(t, v3_get_host_time(t));
}
// Returns offset of guest TSC from host TSC
help
Includes the Virtual APIC device
+config APIC_ENQUEUE_MISSED_TMR_IRQS
+ bool "Enqueue missed APIC timer interrpts"
+ default n
+ depends on APIC
+ help
+ Make up missed APIC periodic timer interrupts on later
+ exits into the virtual machine
config DEBUG_APIC
bool "APIC Debugging"
help
Enable debugging for the APIC
-
-
config IO_APIC
bool "IOAPIC"
depends on APIC
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
// raise irq
- PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d) (div=%d)\n",
+ PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d)\n",
apic->lapic_id.val, core->vcpu_id,
- apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num);
+ apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt);
if (apic_intr_pending(core, priv_data)) {
PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n",
if (tmr_ticks < apic->tmr_cur_cnt) {
apic->tmr_cur_cnt -= tmr_ticks;
- if (apic->missed_ints) {
+#ifdef V3_CONFIG_APIC_ENQUEUE_MISSED_TMR_IRQS
+ if (apic->missed_ints && !apic_intr_pending(core, priv_data)) {
+ PrintDebug("apic %u: core %u: Injecting queued APIC timer interrupt.\n",
+ apic->lapic_id.val, core->vcpu_id);
apic_inject_timer_intr(core, priv_data);
apic->missed_ints--;
}
+#endif /* CONFIG_APIC_ENQUEUE_MISSED_TMR_IRQS */
} else {
tmr_ticks -= apic->tmr_cur_cnt;
apic->tmr_cur_cnt = 0;
apic_inject_timer_intr(core, priv_data);
if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) {
- apic->missed_ints += tmr_ticks / apic->tmr_init_cnt;
+ int queued_ints = tmr_ticks / apic->tmr_init_cnt;
tmr_ticks = tmr_ticks % apic->tmr_init_cnt;
apic->tmr_cur_cnt = apic->tmr_init_cnt - tmr_ticks;
+ apic->missed_ints += queued_ints;
}
}
vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
+ sint64_t tsc_offset;
// Conditionally yield the CPU if the timeslice has expired
v3_yield_cond(info);
#endif
v3_time_enter_vm(info);
- // guest_ctrl->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
+ tsc_offset = v3_tsc_host_offset(&info->time_state);
+ guest_ctrl->TSC_OFFSET = tsc_offset;
//V3_Print("Calling v3_svm_launch\n");
cur_cycle = v3_get_host_time(&info->time_state);
if (cur_cycle > (info->yield_start_cycle + info->vm_info->yield_cycle_period)) {
-
- /*
- PrintDebug("Conditional Yield (cur_cyle=%p, start_cycle=%p, period=%p)\n",
- (void *)cur_cycle, (void *)info->yield_start_cycle, (void *)info->yield_cycle_period);
- */
+ //PrintDebug("Conditional Yield (cur_cyle=%p, start_cycle=%p, period=%p)\n",
+ // (void *)cur_cycle, (void *)info->yield_start_cycle,
+ // (void *)info->yield_cycle_period);
+
V3_Yield();
info->yield_start_cycle = v3_get_host_time(&info->time_state);
}
int v3_offset_time( struct guest_info * info, sint64_t offset )
{
struct vm_time * time_state = &(info->time_state);
-// PrintDebug("Adding additional offset of %lld to guest time.\n", offset);
+ PrintDebug("Adding additional offset of %lld to guest time.\n", offset);
time_state->guest_host_offset += offset;
return 0;
}
-static uint64_t compute_target_host_time(struct guest_info * info)
+#ifdef V3_CONFIG_TIME_DILATION
+static uint64_t compute_target_host_time(struct guest_info * info, uint64_t guest_time)
{
struct vm_time * time_state = &(info->time_state);
uint64_t guest_elapsed, desired_elapsed;
- guest_elapsed = (v3_get_guest_time(time_state) - time_state->initial_time);
+ guest_elapsed = (guest_time - time_state->initial_time);
desired_elapsed = (guest_elapsed * time_state->host_cpu_freq) / time_state->guest_cpu_freq;
return time_state->initial_time + desired_elapsed;
}
uint64_t host_time, target_host_time;
uint64_t guest_time, old_guest_time;
- /* Compute the target host time given how much time has *already*
- * passed in the guest */
- target_host_time = compute_target_host_time(info);
-
/* Now, let the host run while the guest is stopped to make the two
* sync up. Note that this doesn't assume that guest time is stopped;
* the offsetting in the next step will change add an offset to guest
* time to account for the time paused even if the geust isn't
* usually paused in the VMM. */
host_time = v3_get_host_time(time_state);
- old_guest_time = v3_get_guest_time(time_state);
+ old_guest_time = v3_compute_guest_time(time_state, host_time);
+ target_host_time = compute_target_host_time(info, old_guest_time);
while (target_host_time > host_time) {
v3_yield(info);
host_time = v3_get_host_time(time_state);
}
- guest_time = v3_get_guest_time(time_state);
+ guest_time = v3_compute_guest_time(time_state, host_time);
/* We do *not* assume the guest timer was paused in the VM. If it was
* this offseting is 0. If it wasn't, we need this. */
- v3_offset_time(info, (sint64_t)old_guest_time - (sint64_t)guest_time);
+ v3_offset_time(info, (sint64_t)(old_guest_time - guest_time));
return 0;
}
if (time_state->enter_time) {
/* Limit forward skew to 10% of the amount the guest has
* run since we last could skew time */
- max_skew = ((sint64_t)guest_time - (sint64_t)time_state->enter_time) / 10;
+ max_skew = (sint64_t)(guest_time - time_state->enter_time) / 10.0;
} else {
max_skew = 0;
}
- desired_skew = (sint64_t)target_guest_time - (sint64_t)guest_time;
+ desired_skew = (sint64_t)(target_guest_time - guest_time);
skew = desired_skew > max_skew ? max_skew : desired_skew;
PrintDebug("Guest %lld cycles behind where it should be.\n",
desired_skew);
return 0;
}
+#endif /* V3_CONFIG_TIME_DILATION */
// Control guest time in relation to host time so that the two stay
// appropriately synchronized to the extent possible.
int v3_adjust_time(struct guest_info * info) {
+#ifdef V3_CONFIG_TIME_DILATION
/* First deal with yielding if we want to slow down the guest */
yield_host_time(info);
* or because the VMM is doing something that takes a long time to emulate)
* allow guest time to jump forward a bit */
skew_guest_time(info);
-
+#endif
return 0;
}
v3_time_enter_vm( struct guest_info * info )
{
struct vm_time * time_state = &(info->time_state);
- uint64_t guest_time, host_time;
+ uint64_t host_time;
host_time = v3_get_host_time(time_state);
- guest_time = v3_get_guest_time(time_state);
time_state->enter_time = host_time;
- time_state->guest_host_offset = (sint64_t)guest_time - (sint64_t)host_time;
+#ifdef V3_CONFIG_TIME_DILATION
+ {
+ uint64_t guest_time;
+ sint64_t offset;
+ guest_time = v3_compute_guest_time(time_state, host_time);
+ // XXX we probably want to use an inline function to do these
+ // time differences to deal with sign and overflow carefully
+ offset = (sint64_t)guest_time - (sint64_t)host_time;
+ PrintDebug("v3_time_enter_vm: guest time offset %lld from host time.\n", offset);
+ time_state->guest_host_offset = offset;
+ }
+#else
+ time_state->guest_host_offset = 0;
+#endif
return 0;
}
void v3_update_timers(struct guest_info * info) {
struct vm_time *time_state = &info->time_state;
struct v3_timer * tmp_timer;
- uint64_t old_time = info->time_state.last_update;
sint64_t cycles;
+ uint64_t old_time = info->time_state.last_update;
time_state->last_update = v3_get_guest_time(time_state);
- cycles = time_state->last_update - old_time;
+ cycles = (sint64_t)(time_state->last_update - old_time);
V3_ASSERT(cycles >= 0);
// V3_Print("Updating timers with %lld elapsed cycles.\n", cycles);
break;
}
+ case INT: {
+ instr->dst_operand.type = IMM_OPERAND;
+ instr->dst_operand.size = operand_width;
+ instr->dst_operand.operand = *(uint8_t *)instr_ptr;
+ instr_ptr += operand_width;
+ instr->num_operands = 1;
+
+ break;
+ }
case INVLPG: {
uint8_t reg_code = 0;
case INVLPG:
return V3_OP_INVLPG;
- case INT:
+ case INT:
return V3_OP_INT;
case MOV_CR2:
*/
int v3_vmx_enter(struct guest_info * info) {
int ret = 0;
- //uint32_t tsc_offset_low, tsc_offset_high;
+ uint32_t tsc_offset_low, tsc_offset_high;
struct vmx_exit_info exit_info;
struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
// Perform last-minute time bookkeeping prior to entering the VM
v3_time_enter_vm(info);
- // tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
- // tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
- // check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
- // check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
+ tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
+ tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
+ check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
+ check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
if (v3_update_vmcs_host_state(info)) {
v3_enable_ints();
<device class="8254_PIT" id="PIT" />
<device class="BOCHS_DEBUG" id="bochs debug"/>
<device class="OS_DEBUG" id="os debug" />
-
-
-<!--
<device class="LAPIC" id="apic"/>
<device class="IOAPIC" id="ioapic">
<apic>apic</apic>
</device>
--->
<!--
<device class="CGA_VIDEO" id="cga" passthrough="enable" />
<device class="TELNET_CONSOLE" id="telnet console">