static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
int ret = 0;
- ret = vmcs_write(field,val);
+ ret = vmcs_write(field, val);
if (ret != VMX_SUCCESS) {
PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
return ret;
}
-#if 0
-// For the 32 bit reserved bit fields
-// MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
-static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
- v3_msr_t mask_msr;
- PrintDebug("sanitize_bits1 (MSR:%x)\n", msr_num);
-
- v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo);
-
- PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
-
- val |= mask_msr.lo;
- val |= mask_msr.hi;
-
- return val;
-}
-
-
-
-static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
- v3_msr_t msr0, msr1;
- addr_t msr0_val, msr1_val;
-
- PrintDebug("sanitize_bits2 (MSR0=%x, MSR1=%x)\n", msr_num0, msr_num1);
-
- v3_get_msr(msr_num0, &msr0.hi, &msr0.lo);
- v3_get_msr(msr_num1, &msr1.hi, &msr1.lo);
-
- // This generates a mask that is the natural bit width of the CPU
- msr0_val = msr0.value;
- msr1_val = msr1.value;
-
- PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
-
- val |= msr0_val;
- val |= msr1_val;
-
- return val;
-}
-
-
-
-#endif
static addr_t allocate_vmcs() {
// reenable global interrupts for vm state initialization now
// that the vm state is initialized. If another VM kicks us off,
// it'll update our vmx state so that we know to reload ourself
- v3_disable_ints();
+ v3_enable_ints();
return 0;
}
// disable global interrupts for vm state transition
v3_disable_ints();
+
+ if (active_vmcs_ptrs[V3_Get_CPU()] != vmx_info->vmcs_ptr_phys) {
+ vmcs_load(vmx_info->vmcs_ptr_phys);
+ active_vmcs_ptrs[V3_Get_CPU()] = vmx_info->vmcs_ptr_phys;
+ }
+
+
v3_vmx_restore_vmcs(info);
check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
- if (active_vmcs_ptrs[V3_Get_CPU()] != vmx_info->vmcs_ptr_phys) {
- vmcs_load(vmx_info->vmcs_ptr_phys);
- active_vmcs_ptrs[V3_Get_CPU()] = vmx_info->vmcs_ptr_phys;
- }
if (vmx_info->state == VMX_UNLAUNCHED) {
vmx_info->state = VMX_LAUNCHED;
update_irq_exit_state(info);
#endif
- // Handle any exits needed still in the atomic section
- if (v3_handle_vmx_exit(info, &exit_info) == -1) {
- PrintError("Error in atomic VMX exit handler\n");
- return -1;
+ if (exit_info.exit_reason == VMEXIT_INTR_WINDOW) {
+ // This is a special case whose only job is to inject an interrupt
+ vmcs_read(VMCS_PROC_CTRLS, &(vmx_info->pri_proc_ctrls.value));
+ vmx_info->pri_proc_ctrls.int_wndw_exit = 0;
+ vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
+
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("Interrupts available again! (RIP=%llx)\n", info->rip);
+#endif
}
// reenable global interrupts after vm exit
}
static int has_vmx_nested_paging() {
+ /* We assume that both EPT and unrestricted guest mode (Intel's Virtual Real Mode)
+ * are mutually assured. i.e. We have either both or neither.
+ */
+
+
+
return 0;
}