#include <palacios/vmx_io.h>
#include <palacios/vmx_msr.h>
+#include <palacios/vmx_hw_info.h>
#ifndef CONFIG_DEBUG_VMX
#undef PrintDebug
#endif
-static addr_t host_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
+/* These fields contain the hardware feature sets supported by the local CPU */
+static struct vmx_hw_info hw_info;
+
+
static addr_t active_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
+static addr_t host_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
static addr_t allocate_vmcs() {
- reg_ex_t msr;
struct vmcs_data * vmcs_page = NULL;
PrintDebug("Allocating page\n");
vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
memset(vmcs_page, 0, 4096);
- v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
-
- vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision;
- PrintDebug("VMX Revision: 0x%x\n",vmcs_page->revision);
+ vmcs_page->revision = hw_info.basic_info.revision;
+ PrintDebug("VMX Revision: 0x%x\n", vmcs_page->revision);
return (addr_t)V3_PAddr((void *)vmcs_page);
}
static int init_vmcs_bios(struct guest_info * info, struct vmx_data * vmx_state) {
int vmx_ret = 0;
- struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
// disable global interrupts for vm state initialization
v3_disable_ints();
}
+ /*** Setup default state from HW ***/
+
+ vmx_state->pin_ctrls.value = hw_info.pin_ctrls.def_val;
+ vmx_state->pri_proc_ctrls.value = hw_info.proc_ctrls.def_val;
+ vmx_state->exit_ctrls.value = hw_info.exit_ctrls.def_val;
+ vmx_state->entry_ctrls.value = hw_info.entry_ctrls.def_val;;
+
+ /* Print Control MSRs */
+ PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)hw_info.cr0.value);
+ PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)hw_info.cr4.value);
+
+
/******* Setup Host State **********/
/********** Setup and VMX Control Fields from MSR ***********/
- /* Setup IO map */
- struct v3_msr tmp_msr;
-
- v3_get_msr(VMX_PINBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-
/* Add external interrupts, NMI exiting, and virtual NMI */
- vmx_state->pin_ctrls.value = tmp_msr.lo;
vmx_state->pin_ctrls.nmi_exit = 1;
vmx_state->pin_ctrls.ext_int_exit = 1;
- v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-
- vmx_state->pri_proc_ctrls.value = tmp_msr.lo;
vmx_state->pri_proc_ctrls.use_io_bitmap = 1;
vmx_state->pri_proc_ctrls.hlt_exit = 1;
vmx_state->pri_proc_ctrls.invlpg_exit = 1;
vmx_state->pri_proc_ctrls.rdtsc_exit = 1;
#endif
+ /* Setup IO map */
vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(info->vm_info->io_map.arch_data));
vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR,
(addr_t)V3_PAddr(info->vm_info->io_map.arch_data) + PAGE_SIZE_4KB);
vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(info->vm_info->msr_map.arch_data));
- v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_state->exit_ctrls.value = tmp_msr.lo;
+
vmx_state->exit_ctrls.host_64_on = 1;
if ((vmx_state->exit_ctrls.save_efer == 1) || (vmx_state->exit_ctrls.ld_efer == 1)) {
vmx_state->ia32e_avail = 1;
}
- v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_state->entry_ctrls.value = tmp_msr.lo;
- {
- struct vmx_exception_bitmap excp_bmap;
- excp_bmap.value = 0;
-
- excp_bmap.pf = 1;
-
- vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, excp_bmap.value);
- }
/******* Setup VMXAssist guest state ***********/
info->rip = 0xd0000;
info->vm_regs.rsp = 0x80000;
-
- struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
- flags->rsvd1 = 1;
-
- /* Print Control MSRs */
- v3_get_msr(VMX_CR0_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)tmp_msr.value);
-
- v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)tmp_msr.value);
-
+ info->ctrl_regs.rflags->rsvd1 = 1;
#define GUEST_CR0 0x80000031
#define GUEST_CR4 0x00002000
/* Add CR exits */
vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
+
+ /* Add page fault exits */
+ vmx_state->excp_bmap.pf = 1;
}
// Setup segment registers
}
memcpy((void *)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start);
+
+
+ vmx_state->assist_state = VMXASSIST_DISABLED;
}
- /*** Write all the info to the VMCS ***/
+
+
+ /* Sanity check ctrl/reg fields against hw_defaults */
+
+
+
+ /*** Write all the info to the VMCS ***/
+
+ {
#define DEBUGCTL_MSR 0x1d9
- v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
+ struct v3_msr tmp_msr;
+ v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
+ info->dbg_regs.dr7 = 0x400;
+ }
- info->dbg_regs.dr7 = 0x400;
#ifdef __V3_64BIT__
vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
vmx_ret |= check_vmcs_write(VMCS_LINK_PTR_HIGH, (addr_t)0xffffffffUL);
#endif
+
+
+
if (v3_update_vmcs_ctrl_fields(info)) {
PrintError("Could not write control fields!\n");
return -1;
}
- vmx_state->assist_state = VMXASSIST_DISABLED;
// reenable global interrupts for vm state initialization now
// that the vm state is initialized. If another VM kicks us off,
int vmx_ret = 0;
vmx_state = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
+ memset(vmx_state, 0, sizeof(struct vmx_data));
PrintDebug("vmx_data pointer: %p\n", (void *)vmx_state);
}
+
+
+#define VMX_FEATURE_CONTROL_MSR 0x0000003a
+#define CPUID_VMX_FEATURES 0x00000005 /* LOCK and VMXON */
+#define CPUID_1_ECX_VTXFLAG 0x00000020
+
int v3_is_vmx_capable() {
v3_msr_t feature_msr;
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
PrintDebug("MSRREGlow: 0x%.8x\n", feature_msr.lo);
- if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
+ if ((feature_msr.lo & CPUID_VMX_FEATURES) != CPUID_VMX_FEATURES) {
PrintDebug("VMX is locked -- enable in the BIOS\n");
return 0;
}
return 1;
}
-static int has_vmx_nested_paging() {
- /* We assume that both EPT and unrestricted guest mode (Intel's Virtual Real Mode)
- * are mutually assured. i.e. We have either both or neither.
- */
-
- return 0;
-}
void v3_init_vmx_cpu(int cpu_id) {
extern v3_cpu_arch_t v3_cpu_types[];
- struct v3_msr tmp_msr;
- uint64_t ret = 0;
-
- v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-
-#ifdef __V3_64BIT__
- __asm__ __volatile__ (
- "movq %%cr4, %%rbx;"
- "orq $0x00002000, %%rbx;"
- "movq %%rbx, %0;"
- : "=m"(ret)
- :
- : "%rbx"
- );
-
- if ((~ret & tmp_msr.value) == 0) {
- __asm__ __volatile__ (
- "movq %0, %%cr4;"
- :
- : "q"(ret)
- );
- } else {
- PrintError("Invalid CR4 Settings!\n");
- return;
- }
- __asm__ __volatile__ (
- "movq %%cr0, %%rbx; "
- "orq $0x00000020,%%rbx; "
- "movq %%rbx, %%cr0;"
- :
- :
- : "%rbx"
- );
-#elif __V3_32BIT__
- __asm__ __volatile__ (
- "movl %%cr4, %%ecx;"
- "orl $0x00002000, %%ecx;"
- "movl %%ecx, %0;"
- : "=m"(ret)
- :
- : "%ecx"
- );
-
- if ((~ret & tmp_msr.value) == 0) {
- __asm__ __volatile__ (
- "movl %0, %%cr4;"
- :
- : "q"(ret)
- );
- } else {
- PrintError("Invalid CR4 Settings!\n");
- return;
+ if (cpu_id == 0) {
+ if (v3_init_vmx_hw(&hw_info) == -1) {
+ PrintError("Could not initialize VMX hardware features on cpu %d\n", cpu_id);
+ return;
+ }
}
- __asm__ __volatile__ (
- "movl %%cr0, %%ecx; "
- "orl $0x00000020,%%ecx; "
- "movl %%ecx, %%cr0;"
- :
- :
- : "%ecx"
- );
-
-#endif
- //
- // Should check and return Error here....
+ enable_vmx();
// Setup VMXON Region
PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);
- if (v3_enable_vmx(host_vmcs_ptrs[cpu_id]) == VMX_SUCCESS) {
+ if (vmx_on(host_vmcs_ptrs[cpu_id]) == VMX_SUCCESS) {
PrintDebug("VMX Enabled\n");
} else {
PrintError("VMX initialization failure\n");
}
- if (has_vmx_nested_paging() == 1) {
- v3_cpu_types[cpu_id] = V3_VMX_EPT_CPU;
- } else {
- v3_cpu_types[cpu_id] = V3_VMX_CPU;
- }
+ v3_cpu_types[cpu_id] = V3_VMX_CPU;
+
}