/******* Setup Host State **********/
/* Cache GDTR, IDTR, and TR in host struct */
- addr_t gdtr_base;
- struct {
- uint16_t selector;
- addr_t base;
- } __attribute__((packed)) tmp_seg;
-
-
- __asm__ __volatile__(
- "sgdt (%0);"
- :
- : "q"(&tmp_seg)
- : "memory"
- );
- gdtr_base = tmp_seg.base;
- vmx_state->host_state.gdtr.base = gdtr_base;
-
- __asm__ __volatile__(
- "sidt (%0);"
- :
- : "q"(&tmp_seg)
- : "memory"
- );
- vmx_state->host_state.idtr.base = tmp_seg.base;
-
- __asm__ __volatile__(
- "str (%0);"
- :
- : "q"(&tmp_seg)
- : "memory"
- );
- vmx_state->host_state.tr.selector = tmp_seg.selector;
-
- /* The GDTR *index* is bits 3-15 of the selector. */
- struct tss_descriptor * desc = NULL;
- desc = (struct tss_descriptor *)(gdtr_base + (8 * (tmp_seg.selector >> 3)));
-
- tmp_seg.base = ((desc->base1) |
- (desc->base2 << 16) |
- (desc->base3 << 24) |
-#ifdef __V3_64BIT__
- ((uint64_t)desc->base4 << 32)
-#else
- (0)
-#endif
- );
-
- vmx_state->host_state.tr.base = tmp_seg.base;
/********** Setup VMX Control Fields ***********/
-
-
-
-
#ifdef __V3_64BIT__
// Ensure host runs in 64-bit mode at each VM EXIT
vmx_state->exit_ctrls.host_64_on = 1;
// save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
{
-#define IA32_STAR 0xc0000081
-#define IA32_LSTAR 0xc0000082
-#define IA32_FMASK 0xc0000084
-#define IA32_KERN_GS_BASE 0xc0000102
-
-#define IA32_CSTAR 0xc0000083 // Compatibility mode STAR (ignored for now... hopefully its not that important...)
-
int msr_ret = 0;
struct vmcs_msr_entry * exit_store_msrs = NULL;
entry_load_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area + (sizeof(struct vmcs_msr_entry) * 8));
- exit_store_msrs[0].index = IA32_STAR;
- exit_store_msrs[1].index = IA32_LSTAR;
- exit_store_msrs[2].index = IA32_FMASK;
- exit_store_msrs[3].index = IA32_KERN_GS_BASE;
+ exit_store_msrs[0].index = IA32_STAR_MSR;
+ exit_store_msrs[1].index = IA32_LSTAR_MSR;
+ exit_store_msrs[2].index = IA32_FMASK_MSR;
+ exit_store_msrs[3].index = IA32_KERN_GS_BASE_MSR;
memcpy(exit_store_msrs, exit_load_msrs, sizeof(struct vmcs_msr_entry) * 4);
memcpy(exit_store_msrs, entry_load_msrs, sizeof(struct vmcs_msr_entry) * 4);
- v3_get_msr(IA32_STAR, &(exit_load_msrs[0].hi), &(exit_load_msrs[0].lo));
- v3_get_msr(IA32_LSTAR, &(exit_load_msrs[1].hi), &(exit_load_msrs[1].lo));
- v3_get_msr(IA32_FMASK, &(exit_load_msrs[2].hi), &(exit_load_msrs[2].lo));
- v3_get_msr(IA32_KERN_GS_BASE, &(exit_load_msrs[3].hi), &(exit_load_msrs[3].lo));
+ v3_get_msr(IA32_STAR_MSR, &(exit_load_msrs[0].hi), &(exit_load_msrs[0].lo));
+ v3_get_msr(IA32_LSTAR_MSR, &(exit_load_msrs[1].hi), &(exit_load_msrs[1].lo));
+ v3_get_msr(IA32_FMASK_MSR, &(exit_load_msrs[2].hi), &(exit_load_msrs[2].lo));
+ v3_get_msr(IA32_KERN_GS_BASE_MSR, &(exit_load_msrs[3].hi), &(exit_load_msrs[3].lo));
msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_ADDR, (addr_t)V3_PAddr(exit_store_msrs));
msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_ADDR, (addr_t)V3_PAddr(exit_load_msrs));
msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_ADDR, (addr_t)V3_PAddr(entry_load_msrs));
+
+ v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
+
+
+ // IMPORTANT: These SYSCALL MSRs are currently not handled by hardware or cached
+ // We should really emulate these ourselves, or ideally include them in the MSR store area if there is room
+ v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
+
+ v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
+
+
}
/* Sanity check ctrl/reg fields against hw_defaults */
/* Get the CPU mode to set the guest_ia32e entry ctrl */
if (core->shdw_pg_mode == SHADOW_PAGING) {
- if (shadow_cr0->pg){
+ if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) {
+ if (v3_activate_shadow_pt(core) == -1) {
+ PrintError("Failed to activate shadow page tables\n");
+ return -1;
+ }
+ } else {
if (v3_activate_passthrough_pt(core) == -1) {
PrintError("Failed to activate passthrough page tables\n");
return -1;
#endif
+void v3_flush_vmx_vm_core(struct guest_info * core) {
+ struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
+ vmcs_clear(vmx_info->vmcs_ptr_phys);
+ vmx_info->state = VMX_UNLAUNCHED;
+}
+
+
+
static int update_irq_exit_state(struct guest_info * info) {
struct vmx_exit_idt_vec_info idt_vec_info;
check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
-
if (v3_update_vmcs_host_state(info)) {
v3_enable_ints();
PrintError("Could not write host state\n");
if (vmx_info->state == VMX_UNLAUNCHED) {
vmx_info->state = VMX_LAUNCHED;
-
- info->vm_info->run_state = VM_RUNNING;
ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
} else {
V3_ASSERT(vmx_info->state != VMX_UNLAUNCHED);
ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
}
+
+
// PrintDebug("VMX Exit: ret=%d\n", ret);
if (ret != VMX_SUCCESS) {
uint32_t error = 0;
-
vmcs_read(VMCS_INSTR_ERR, &error);
v3_enable_ints();
- PrintError("VMENTRY Error: %d\n", error);
+ PrintError("VMENTRY Error: %d (launch_ret = %d)\n", error, ret);
return -1;
}
+
+
// Immediate exit from VM time bookkeeping
v3_time_exit_vm(info);
if (info->vcpu_id == 0) {
info->core_run_state = CORE_RUNNING;
- info->vm_info->run_state = VM_RUNNING;
} else {
PrintDebug("VMX core %u: Waiting for core initialization\n", info->vcpu_id);
while (info->core_run_state == CORE_STOPPED) {
+
+ if (info->vm_info->run_state == VM_STOPPED) {
+ // The VM was stopped before this core was initialized.
+ return 0;
+ }
+
v3_yield(info);
//PrintDebug("VMX core %u: still waiting for INIT\n",info->vcpu_id);
}
PrintDebug("VMX core %u initialized\n", info->vcpu_id);
+
+ // We'll be paranoid about race conditions here
+ v3_wait_at_barrier(info);
}