vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
+ // Setup Guests initial PAT field
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
+
/* Setup paging */
if (core->shdw_pg_mode == SHADOW_PAGING) {
PrintDebug("Creating initial shadow page table\n");
core->rip = 0xfff0;
core->vm_regs.rdx = 0x00000f00;
core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
- core->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
-
+ core->ctrl_regs.cr0 = 0x00000030;
+ core->ctrl_regs.cr4 = 0x00002010; // Enable VMX and PSE flag
+
core->segments.cs.selector = 0xf000;
core->segments.cs.limit = 0xffff;
core->segments.ldtr.selector = 0x0000;
core->segments.ldtr.limit = 0x0000ffff;
core->segments.ldtr.base = 0x0000000000000000LL;
- core->segments.ldtr.type = 2;
+ core->segments.ldtr.type = 0x2;
core->segments.ldtr.present = 1;
core->segments.tr.selector = 0x0000;
}
+int
+v3_vmx_schedule_timeout(struct guest_info * info)
+{
+ struct vmx_data * vmx_state = (struct vmx_data *)(info->vmm_data);
+ sint64_t cycles;
+ uint32_t timeout;
+
+ /* Check if the hardware supports an active timeout */
+#define VMX_ACTIVE_PREEMPT_TIMER_PIN 0x40
+ if (hw_info.pin_ctrls.req_mask & VMX_ACTIVE_PREEMPT_TIMER_PIN) {
+ /* The hardware doesn't support us modifying this pin control */
+ return 0;
+ }
+
+ /* Check if we have one to schedule and schedule it if we do */
+ cycles = (sint64_t)info->time_state.next_timeout - (sint64_t)v3_get_guest_time(&info->time_state);
+ if (info->time_state.next_timeout == (ullong_t) -1) {
+ timeout = 0;
+ vmx_state->pin_ctrls.active_preempt_timer = 0;
+ } else if (cycles < 0) {
+ /* set the timeout to 0 to force an immediate re-exit since it expired between
+ * when we checked a timeout and now. IF SOMEONE CONTINAULLY SETS A SHORT TIMEOUT,
+ * THIS CAN LOCK US OUT OF THE GUEST! */
+ timeout = 0;
+ vmx_state->pin_ctrls.active_preempt_timer = 1;
+ } else {
+ /* The hardware supports scheduling a timeout, and we have one to
+ * schedule */
+ timeout = (uint32_t)cycles >> hw_info.misc_info.tsc_multiple;
+ vmx_state->pin_ctrls.active_preempt_timer = 1;
+ }
+
+ /* Actually program the timer based on the settings above. */
+ check_vmcs_write(VMCS_PREEMPT_TIMER, timeout);
+ check_vmcs_write(VMCS_PIN_CTRLS, vmx_state->pin_ctrls.value);
+ return 0;
+}
+
/*
* CAUTION and DANGER!!!
*
// Perform any additional yielding needed for time adjustment
v3_adjust_time(info);
+ // Check for timeout - since this calls generic hooks in devices
+ // that may do things like pause the VM, it cannot be with interrupts
+ // disabled.
+ v3_check_timeout(info);
+
// disable global interrupts for vm state transition
v3_disable_ints();
// Update timer devices late after being in the VM so that as much
- // of hte time in the VM is accounted for as possible. Also do it before
+ // of the time in the VM is accounted for as possible. Also do it before
// updating IRQ entry state so that any interrupts the timers raise get
// handled on the next VM entry. Must be done with interrupts disabled.
v3_update_timers(info);
vmcs_write(VMCS_GUEST_CR3, guest_cr3);
}
+ // Update vmx active preemption timer to exit at the next timeout if
+ // the hardware supports it.
+ v3_vmx_schedule_timeout(info);
+
// Perform last-minute time bookkeeping prior to entering the VM
v3_time_enter_vm(info);