/******* Setup Host State **********/
/* Cache GDTR, IDTR, and TR in host struct */
- addr_t gdtr_base;
- struct {
- uint16_t selector;
- addr_t base;
- } __attribute__((packed)) tmp_seg;
-
-
- __asm__ __volatile__(
- "sgdt (%0);"
- :
- : "q"(&tmp_seg)
- : "memory"
- );
- gdtr_base = tmp_seg.base;
- vmx_state->host_state.gdtr.base = gdtr_base;
-
- __asm__ __volatile__(
- "sidt (%0);"
- :
- : "q"(&tmp_seg)
- : "memory"
- );
- vmx_state->host_state.idtr.base = tmp_seg.base;
-
- __asm__ __volatile__(
- "str (%0);"
- :
- : "q"(&tmp_seg)
- : "memory"
- );
- vmx_state->host_state.tr.selector = tmp_seg.selector;
-
- /* The GDTR *index* is bits 3-15 of the selector. */
- struct tss_descriptor * desc = NULL;
- desc = (struct tss_descriptor *)(gdtr_base + (8 * (tmp_seg.selector >> 3)));
-
- tmp_seg.base = ((desc->base1) |
- (desc->base2 << 16) |
- (desc->base3 << 24) |
-#ifdef __V3_64BIT__
- ((uint64_t)desc->base4 << 32)
-#else
- (0)
-#endif
- );
-
- vmx_state->host_state.tr.base = tmp_seg.base;
/********** Setup VMX Control Fields ***********/
-
-
-
-
#ifdef __V3_64BIT__
// Ensure host runs in 64-bit mode at each VM EXIT
vmx_state->exit_ctrls.host_64_on = 1;
// save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
{
-#define IA32_STAR 0xc0000081
-#define IA32_LSTAR 0xc0000082
-#define IA32_FMASK 0xc0000084
-#define IA32_KERN_GS_BASE 0xc0000102
-#define IA32_CSTAR 0xc0000083 // Compatibility mode STAR (ignored for now... hopefully its not that important...)
-
- int msr_ret = 0;
-
- struct vmcs_msr_entry * exit_store_msrs = NULL;
- struct vmcs_msr_entry * exit_load_msrs = NULL;
- struct vmcs_msr_entry * entry_load_msrs = NULL;;
+ struct vmcs_msr_save_area * msr_entries = NULL;
int max_msrs = (hw_info.misc_info.max_msr_cache_size + 1) * 4;
+ int msr_ret = 0;
V3_Print("Setting up MSR load/store areas (max_msr_count=%d)\n", max_msrs);
return -1;
}
- vmx_state->msr_area = V3_VAddr(V3_AllocPages(1));
-
- if (vmx_state->msr_area == NULL) {
+ vmx_state->msr_area_paddr = (addr_t)V3_AllocPages(1);
+
+ if (vmx_state->msr_area_paddr == (addr_t)NULL) {
PrintError("could not allocate msr load/store area\n");
return -1;
}
+ msr_entries = (struct vmcs_msr_save_area *)V3_VAddr((void *)(vmx_state->msr_area_paddr));
+ vmx_state->msr_area = msr_entries; // cache in vmx_info
+
+ memset(msr_entries, 0, PAGE_SIZE);
+
+ msr_entries->guest_star.index = IA32_STAR_MSR;
+ msr_entries->guest_lstar.index = IA32_LSTAR_MSR;
+ msr_entries->guest_fmask.index = IA32_FMASK_MSR;
+ msr_entries->guest_kern_gs.index = IA32_KERN_GS_BASE_MSR;
+
+ msr_entries->host_star.index = IA32_STAR_MSR;
+ msr_entries->host_lstar.index = IA32_LSTAR_MSR;
+ msr_entries->host_fmask.index = IA32_FMASK_MSR;
+ msr_entries->host_kern_gs.index = IA32_KERN_GS_BASE_MSR;
+
msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_CNT, 4);
msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_CNT, 4);
msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_CNT, 4);
-
-
- exit_store_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area);
- exit_load_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area + (sizeof(struct vmcs_msr_entry) * 4));
- entry_load_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area + (sizeof(struct vmcs_msr_entry) * 8));
+ msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_ADDR, (addr_t)V3_PAddr(msr_entries->guest_msrs));
+ msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_ADDR, (addr_t)V3_PAddr(msr_entries->guest_msrs));
+ msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_ADDR, (addr_t)V3_PAddr(msr_entries->host_msrs));
- exit_store_msrs[0].index = IA32_STAR;
- exit_store_msrs[1].index = IA32_LSTAR;
- exit_store_msrs[2].index = IA32_FMASK;
- exit_store_msrs[3].index = IA32_KERN_GS_BASE;
-
- memcpy(exit_store_msrs, exit_load_msrs, sizeof(struct vmcs_msr_entry) * 4);
- memcpy(exit_store_msrs, entry_load_msrs, sizeof(struct vmcs_msr_entry) * 4);
-
- v3_get_msr(IA32_STAR, &(exit_load_msrs[0].hi), &(exit_load_msrs[0].lo));
- v3_get_msr(IA32_LSTAR, &(exit_load_msrs[1].hi), &(exit_load_msrs[1].lo));
- v3_get_msr(IA32_FMASK, &(exit_load_msrs[2].hi), &(exit_load_msrs[2].lo));
- v3_get_msr(IA32_KERN_GS_BASE, &(exit_load_msrs[3].hi), &(exit_load_msrs[3].lo));
+ msr_ret |= v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
+ msr_ret |= v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
+ msr_ret |= v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
+ msr_ret |= v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
+
+
+ // IMPORTANT: These MSRs appear to be cached by the hardware....
+ msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
+ msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
+ msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
+
+ msr_ret |= v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
+ msr_ret |= v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
+
+
+ // Not sure what to do about this... Does not appear to be an explicit hardware cache version...
+ msr_ret |= v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
+
+ if (msr_ret != 0) {
+ PrintError("Error configuring MSR save/restore area\n");
+ return -1;
+ }
- msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_ADDR, (addr_t)V3_PAddr(exit_store_msrs));
- msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_ADDR, (addr_t)V3_PAddr(exit_load_msrs));
- msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_ADDR, (addr_t)V3_PAddr(entry_load_msrs));
}
return -1;
}
+ /*
if (v3_update_vmcs_host_state(core)) {
PrintError("Could not write host state\n");
return -1;
}
+ */
// reenable global interrupts for vm state initialization now
// that the vm state is initialized. If another VM kicks us off,
*/
int v3_vmx_enter(struct guest_info * info) {
int ret = 0;
- //uint32_t tsc_offset_low, tsc_offset_high;
+ uint32_t tsc_offset_low, tsc_offset_high;
struct vmx_exit_info exit_info;
struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
// Perform last-minute time bookkeeping prior to entering the VM
v3_time_enter_vm(info);
- // tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
- // tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
- // check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
- // check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
+ tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
+ tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
+ check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
+ check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
if (v3_update_vmcs_host_state(info)) {
v3_enable_ints();
if (vmx_info->state == VMX_UNLAUNCHED) {
vmx_info->state = VMX_LAUNCHED;
-
- info->vm_info->run_state = VM_RUNNING;
ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
} else {
V3_ASSERT(vmx_info->state != VMX_UNLAUNCHED);
if (info->vcpu_id == 0) {
info->core_run_state = CORE_RUNNING;
- info->vm_info->run_state = VM_RUNNING;
} else {
PrintDebug("VMX core %u: Waiting for core initialization\n", info->vcpu_id);
while (info->core_run_state == CORE_STOPPED) {
+
+ if (info->vm_info->run_state == VM_STOPPED) {
+ // The VM was stopped before this core was initialized.
+ return 0;
+ }
+
v3_yield(info);
//PrintDebug("VMX core %u: still waiting for INIT\n",info->vcpu_id);
}