#include <palacios/vmm_direct_paging.h>
#include <palacios/vmx_io.h>
#include <palacios/vmx_msr.h>
+#include <palacios/vmm_decoder.h>
+#include <palacios/vmm_barrier.h>
+
+#ifdef V3_CONFIG_CHECKPOINT
+#include <palacios/vmm_checkpoint.h>
+#endif
#include <palacios/vmx_ept.h>
#include <palacios/vmx_assist.h>
#include <palacios/vmx_hw_info.h>
-#ifndef CONFIG_DEBUG_VMX
+#ifndef V3_CONFIG_DEBUG_VMX
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
extern v3_cpu_arch_t v3_cpu_types[];
-static addr_t active_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
-static addr_t host_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
+static addr_t host_vmcs_ptrs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
PrintDebug("Loading VMCS\n");
vmx_ret = vmcs_load(vmx_state->vmcs_ptr_phys);
- active_vmcs_ptrs[V3_Get_CPU()] = vmx_state->vmcs_ptr_phys;
vmx_state->state = VMX_UNLAUNCHED;
if (vmx_ret != VMX_SUCCESS) {
/******* Setup Host State **********/
/* Cache GDTR, IDTR, and TR in host struct */
- addr_t gdtr_base;
- struct {
- uint16_t selector;
- addr_t base;
- } __attribute__((packed)) tmp_seg;
-
-
- __asm__ __volatile__(
- "sgdt (%0);"
- :
- : "q"(&tmp_seg)
- : "memory"
- );
- gdtr_base = tmp_seg.base;
- vmx_state->host_state.gdtr.base = gdtr_base;
-
- __asm__ __volatile__(
- "sidt (%0);"
- :
- : "q"(&tmp_seg)
- : "memory"
- );
- vmx_state->host_state.idtr.base = tmp_seg.base;
-
- __asm__ __volatile__(
- "str (%0);"
- :
- : "q"(&tmp_seg)
- : "memory"
- );
- vmx_state->host_state.tr.selector = tmp_seg.selector;
-
- /* The GDTR *index* is bits 3-15 of the selector. */
- struct tss_descriptor * desc = NULL;
- desc = (struct tss_descriptor *)(gdtr_base + (8 * (tmp_seg.selector >> 3)));
-
- tmp_seg.base = ((desc->base1) |
- (desc->base2 << 16) |
- (desc->base3 << 24) |
-#ifdef __V3_64BIT__
- ((uint64_t)desc->base4 << 32)
-#else
- (0)
-#endif
- );
-
- vmx_state->host_state.tr.base = tmp_seg.base;
/********** Setup VMX Control Fields ***********/
vmx_state->pri_proc_ctrls.pause_exit = 0;
vmx_state->pri_proc_ctrls.tsc_offset = 1;
-#ifdef CONFIG_TIME_VIRTUALIZE_TSC
+#ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
vmx_state->pri_proc_ctrls.rdtsc_exit = 1;
#endif
-
-
-
-
#ifdef __V3_64BIT__
+ // Ensure host runs in 64-bit mode at each VM EXIT
vmx_state->exit_ctrls.host_64_on = 1;
#endif
-
- /* Not sure how exactly to handle this... */
+ // Hook all accesses to EFER register
v3_hook_msr(core->vm_info, EFER_MSR,
&v3_handle_efer_read,
&v3_handle_efer_write,
core);
- // Or is it this???
- vmx_state->entry_ctrls.ld_efer = 1;
+ // Restore host's EFER register on each VM EXIT
vmx_state->exit_ctrls.ld_efer = 1;
+
+ // Save/restore guest's EFER register to/from VMCS on VM EXIT/ENTRY
vmx_state->exit_ctrls.save_efer = 1;
- /* *** */
+ vmx_state->entry_ctrls.ld_efer = 1;
- vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
+ // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
+ vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
/* Setup paging */
v3_vmxassist_init(core, vmx_state);
} else if ((core->shdw_pg_mode == NESTED_PAGING) &&
- (v3_cpu_types[core->cpu_id] == V3_VMX_EPT_CPU)) {
+ (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_CPU)) {
#define CR0_PE 0x00000001
#define CR0_PG 0x80000000
}
} else if ((core->shdw_pg_mode == NESTED_PAGING) &&
- (v3_cpu_types[core->cpu_id] == V3_VMX_EPT_UG_CPU)) {
+ (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
int i = 0;
// For now we will assume that unrestricted guest mode is assured w/ EPT
// save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
{
-#define IA32_STAR 0xc0000081
-#define IA32_LSTAR 0xc0000082
-#define IA32_FMASK 0xc0000084
-#define IA32_KERN_GS_BASE 0xc0000102
-
-#define IA32_CSTAR 0xc0000083 // Compatibility mode STAR (ignored for now... hopefully its not that important...)
-
int msr_ret = 0;
struct vmcs_msr_entry * exit_store_msrs = NULL;
entry_load_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area + (sizeof(struct vmcs_msr_entry) * 8));
- exit_store_msrs[0].index = IA32_STAR;
- exit_store_msrs[1].index = IA32_LSTAR;
- exit_store_msrs[2].index = IA32_FMASK;
- exit_store_msrs[3].index = IA32_KERN_GS_BASE;
+ exit_store_msrs[0].index = IA32_STAR_MSR;
+ exit_store_msrs[1].index = IA32_LSTAR_MSR;
+ exit_store_msrs[2].index = IA32_FMASK_MSR;
+ exit_store_msrs[3].index = IA32_KERN_GS_BASE_MSR;
memcpy(exit_store_msrs, exit_load_msrs, sizeof(struct vmcs_msr_entry) * 4);
memcpy(exit_store_msrs, entry_load_msrs, sizeof(struct vmcs_msr_entry) * 4);
- v3_get_msr(IA32_STAR, &(exit_load_msrs[0].hi), &(exit_load_msrs[0].lo));
- v3_get_msr(IA32_LSTAR, &(exit_load_msrs[1].hi), &(exit_load_msrs[1].lo));
- v3_get_msr(IA32_FMASK, &(exit_load_msrs[2].hi), &(exit_load_msrs[2].lo));
- v3_get_msr(IA32_KERN_GS_BASE, &(exit_load_msrs[3].hi), &(exit_load_msrs[3].lo));
+ v3_get_msr(IA32_STAR_MSR, &(exit_load_msrs[0].hi), &(exit_load_msrs[0].lo));
+ v3_get_msr(IA32_LSTAR_MSR, &(exit_load_msrs[1].hi), &(exit_load_msrs[1].lo));
+ v3_get_msr(IA32_FMASK_MSR, &(exit_load_msrs[2].hi), &(exit_load_msrs[2].lo));
+ v3_get_msr(IA32_KERN_GS_BASE_MSR, &(exit_load_msrs[3].hi), &(exit_load_msrs[3].lo));
msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_ADDR, (addr_t)V3_PAddr(exit_store_msrs));
msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_ADDR, (addr_t)V3_PAddr(exit_load_msrs));
msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_ADDR, (addr_t)V3_PAddr(entry_load_msrs));
+
+ v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
+
+
+ // IMPORTANT: These SYSCALL MSRs are currently not handled by hardware or cached
+ // We should really emulate these ourselves, or ideally include them in the MSR store area if there is room
+ v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
+
+ v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
+
+
}
/* Sanity check ctrl/reg fields against hw_defaults */
return -1;
}
+ PrintDebug("Serializing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
+ vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
+
return 0;
}
struct vmx_data * vmx_state = core->vmm_data;
V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1);
- V3_FreePages(vmx_state->msr_area, 1);
+ V3_FreePages(V3_PAddr(vmx_state->msr_area), 1);
V3_Free(vmx_state);
}
+
+#ifdef V3_CONFIG_CHECKPOINT
+/*
+ * JRL: This is broken
+ */
+int v3_vmx_save_core(struct guest_info * core, void * ctx){
+ uint64_t vmcs_ptr = vmcs_store();
+
+ v3_chkpt_save(ctx, "vmcs_data", PAGE_SIZE, (void *)vmcs_ptr);
+
+ return 0;
+}
+
+int v3_vmx_load_core(struct guest_info * core, void * ctx){
+ struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
+ struct cr0_32 * shadow_cr0;
+ char vmcs[PAGE_SIZE_4KB];
+
+ v3_chkpt_load(ctx, "vmcs_data", PAGE_SIZE_4KB, vmcs);
+
+ vmcs_clear(vmx_info->vmcs_ptr_phys);
+ vmcs_load((addr_t)vmcs);
+
+ v3_vmx_save_vmcs(core);
+
+ shadow_cr0 = (struct cr0_32 *)&(core->ctrl_regs.cr0);
+
+
+ /* Get the CPU mode to set the guest_ia32e entry ctrl */
+
+ if (core->shdw_pg_mode == SHADOW_PAGING) {
+ if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) {
+ if (v3_activate_shadow_pt(core) == -1) {
+ PrintError("Failed to activate shadow page tables\n");
+ return -1;
+ }
+ } else {
+ if (v3_activate_passthrough_pt(core) == -1) {
+ PrintError("Failed to activate passthrough page tables\n");
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+#endif
+
+
+void v3_flush_vmx_vm_core(struct guest_info * core) {
+ struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
+ vmcs_clear(vmx_info->vmcs_ptr_phys);
+ vmx_info->state = VMX_UNLAUNCHED;
+}
+
+
+
static int update_irq_exit_state(struct guest_info * info) {
struct vmx_exit_idt_vec_info idt_vec_info;
check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
V3_Print("Calling v3_injecting_intr\n");
#endif
info->intr_core_state.irq_started = 0;
check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
int_info.error_code = 1;
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
V3_Print("Injecting exception %d with error code %x\n",
int_info.vector, info->excp_state.excp_error_code);
#endif
}
int_info.valid = 1;
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
V3_Print("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
#endif
check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
V3_Print("IRQ pending from previous injection\n");
#endif
ent_int.error_code = 0;
ent_int.valid = 1;
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
V3_Print("Injecting Interrupt %d at exit %u(EIP=%p)\n",
info->intr_core_state.irq_vector,
(uint32_t)info->num_exits,
check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
V3_Print("Enabling Interrupt-Window exiting: %d\n", instr_len);
#endif
// Perform any additional yielding needed for time adjustment
v3_adjust_time(info);
- // Update timer devices prior to entering VM.
- v3_update_timers(info);
-
// disable global interrupts for vm state transition
v3_disable_ints();
+ // Update timer devices late after being in the VM so that as much
+ // of hte time in the VM is accounted for as possible. Also do it before
+ // updating IRQ entry state so that any interrupts the timers raise get
+ // handled on the next VM entry. Must be done with interrupts disabled.
+ v3_update_timers(info);
- if (active_vmcs_ptrs[V3_Get_CPU()] != vmx_info->vmcs_ptr_phys) {
+ if (vmcs_store() != vmx_info->vmcs_ptr_phys) {
+ vmcs_clear(vmx_info->vmcs_ptr_phys);
vmcs_load(vmx_info->vmcs_ptr_phys);
- active_vmcs_ptrs[V3_Get_CPU()] = vmx_info->vmcs_ptr_phys;
+ vmx_info->state = VMX_UNLAUNCHED;
}
-
v3_vmx_restore_vmcs(info);
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
if (info->sym_core_state.symcall_state.sym_call_active == 0) {
update_irq_entry_state(info);
}
if (vmx_info->state == VMX_UNLAUNCHED) {
vmx_info->state = VMX_LAUNCHED;
- info->vm_info->run_state = VM_RUNNING;
ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
} else {
V3_ASSERT(vmx_info->state != VMX_UNLAUNCHED);
ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
}
+
+
// PrintDebug("VMX Exit: ret=%d\n", ret);
if (ret != VMX_SUCCESS) {
uint32_t error = 0;
-
vmcs_read(VMCS_INSTR_ERR, &error);
v3_enable_ints();
- PrintError("VMENTRY Error: %d\n", error);
+ PrintError("VMENTRY Error: %d (launch_ret = %d)\n", error, ret);
return -1;
}
+
+
// Immediate exit from VM time bookkeeping
v3_time_exit_vm(info);
exit_log[info->num_exits % 10] = exit_info;
-
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
if (info->sym_core_state.symcall_state.sym_call_active == 0) {
update_irq_exit_state(info);
}
vmx_info->pri_proc_ctrls.int_wndw_exit = 0;
vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
V3_Print("Interrupts available again! (RIP=%llx)\n", info->rip);
#endif
}
v3_yield_cond(info);
if (v3_handle_vmx_exit(info, &exit_info) == -1) {
- PrintError("Error in VMX exit handler\n");
+ PrintError("Error in VMX exit handler (Exit reason=%x)\n", exit_info.exit_reason);
return -1;
}
int v3_start_vmx_guest(struct guest_info * info) {
- PrintDebug("Starting VMX core %u\n", info->cpu_id);
+ PrintDebug("Starting VMX core %u\n", info->vcpu_id);
- if (info->cpu_id == 0) {
+ if (info->vcpu_id == 0) {
info->core_run_state = CORE_RUNNING;
- info->vm_info->run_state = VM_RUNNING;
} else {
- PrintDebug("VMX core %u: Waiting for core initialization\n", info->cpu_id);
+ PrintDebug("VMX core %u: Waiting for core initialization\n", info->vcpu_id);
while (info->core_run_state == CORE_STOPPED) {
+
+ if (info->vm_info->run_state == VM_STOPPED) {
+ // The VM was stopped before this core was initialized.
+ return 0;
+ }
+
v3_yield(info);
- //PrintDebug("VMX core %u: still waiting for INIT\n",info->cpu_id);
+ //PrintDebug("VMX core %u: still waiting for INIT\n",info->vcpu_id);
}
- PrintDebug("VMX core %u initialized\n", info->cpu_id);
+ PrintDebug("VMX core %u initialized\n", info->vcpu_id);
+
+ // We'll be paranoid about race conditions here
+ v3_wait_at_barrier(info);
}
PrintDebug("VMX core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
- info->cpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
+ info->vcpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
info->segments.cs.limit, (void *)(info->rip));
- PrintDebug("VMX core %u: Launching VMX VM\n", info->cpu_id);
+ PrintDebug("VMX core %u: Launching VMX VM on logical core %u\n", info->vcpu_id, info->pcpu_id);
v3_start_time(info);
}
if (v3_vmx_enter(info) == -1) {
+
+ addr_t host_addr;
+ addr_t linear_addr = 0;
+
+ info->vm_info->run_state = VM_ERROR;
+
+ V3_Print("VMX core %u: VMX ERROR!!\n", info->vcpu_id);
+
+ v3_print_guest_state(info);
+
+ V3_Print("VMX core %u\n", info->vcpu_id);
+
+ linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
+
+ if (info->mem_mode == PHYSICAL_MEM) {
+ v3_gpa_to_hva(info, linear_addr, &host_addr);
+ } else if (info->mem_mode == VIRTUAL_MEM) {
+ v3_gva_to_hva(info, linear_addr, &host_addr);
+ }
+
+ V3_Print("VMX core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
+
+ V3_Print("VMX core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
+ v3_dump_mem((uint8_t *)host_addr, 15);
+
+ v3_print_stack(info);
+
+
v3_print_vmcs();
print_exit_log(info);
return -1;
}
+ v3_wait_at_barrier(info);
if (info->vm_info->run_state == VM_STOPPED) {
}
+int v3_reset_vmx_vm_core(struct guest_info * core, addr_t rip) {
+ // init vmcs bios
+
+ if ((core->shdw_pg_mode == NESTED_PAGING) &&
+ (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
+ // easy
+ core->rip = 0;
+ core->segments.cs.selector = rip << 8;
+ core->segments.cs.limit = 0xffff;
+ core->segments.cs.base = rip << 12;
+ } else {
+ core->vm_regs.rdx = core->vcpu_id;
+ core->vm_regs.rbx = rip;
+ }
+ return 0;
+}
void v3_init_vmx_cpu(int cpu_id) {
+ addr_t vmx_on_region = 0;
if (cpu_id == 0) {
if (v3_init_vmx_hw(&hw_info) == -1) {
// Setup VMXON Region
- host_vmcs_ptrs[cpu_id] = allocate_vmcs();
+ vmx_on_region = allocate_vmcs();
- PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);
- if (vmx_on(host_vmcs_ptrs[cpu_id]) == VMX_SUCCESS) {
- PrintDebug("VMX Enabled\n");
+ if (vmx_on(vmx_on_region) == VMX_SUCCESS) {
+ V3_Print("VMX Enabled\n");
+ host_vmcs_ptrs[cpu_id] = vmx_on_region;
} else {
- PrintError("VMX initialization failure\n");
- return;
+ V3_Print("VMX already enabled\n");
+ V3_FreePages((void *)vmx_on_region, 1);
}
-
+
+ PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);
{
struct vmx_sec_proc_ctrls sec_proc_ctrls;
void v3_deinit_vmx_cpu(int cpu_id) {
extern v3_cpu_arch_t v3_cpu_types[];
v3_cpu_types[cpu_id] = V3_INVALID_CPU;
- V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1);
+
+ if (host_vmcs_ptrs[cpu_id] != 0) {
+ V3_Print("Disabling VMX\n");
+
+ if (vmx_off() != VMX_SUCCESS) {
+ PrintError("Error executing VMXOFF\n");
+ }
+
+ V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1);
+
+ host_vmcs_ptrs[cpu_id] = 0;
+ }
}