X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fsvm.c;h=a4e9443d5140839bec83762387884fbc1c2c2267;hb=357764d1d3bc432b149e8864c183c3a39ee4d474;hp=980ce998018c2eb765f09e60aab2e8996282f84a;hpb=a8627ddaccf49073eb04286c5ea4767e2903c351;p=palacios.releases.git diff --git a/palacios/src/palacios/svm.c b/palacios/src/palacios/svm.c index 980ce99..a4e9443 100644 --- a/palacios/src/palacios/svm.c +++ b/palacios/src/palacios/svm.c @@ -18,6 +18,7 @@ */ + #include #include @@ -35,6 +36,14 @@ #include #include +#include +#include + + + +#ifdef V3_CONFIG_CHECKPOINT +#include +#endif #include @@ -80,6 +89,25 @@ static vmcb_t * Allocate_VMCB() { } +static int v3_svm_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) +{ + int status; + + // Call arch-independent handler + if ((status = v3_handle_efer_write(core, msr, src, priv_data)) != 0) { + return status; + } + + // SVM-specific code + { + // Ensure that hardware visible EFER.SVME bit is set (SVM Enable) + struct efer_64 * hw_efer = (struct efer_64 *)&(core->ctrl_regs.efer); + hw_efer->svme = 1; + } + + return 0; +} + static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) { vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb); @@ -105,10 +133,9 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) { ctrl_area->instrs.HLT = 1; -#ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC - ctrl_area->instrs.RDTSC = 1; - ctrl_area->svm_instrs.RDTSCP = 1; -#endif + /* Set at VMM launch as needed */ + ctrl_area->instrs.RDTSC = 0; + ctrl_area->svm_instrs.RDTSCP = 0; // guest_state->cr0 = 0x00000001; // PE @@ -132,7 +159,7 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) { ctrl_area->instrs.NMI = 1; ctrl_area->instrs.SMI = 0; // allow SMIs to run in guest ctrl_area->instrs.INIT = 1; - ctrl_area->instrs.PAUSE = 1; + // ctrl_area->instrs.PAUSE = 1; ctrl_area->instrs.shutdown_evts = 1; @@ -216,11 +243,23 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) { PrintDebug("Exiting on interrupts\n"); ctrl_area->guest_ctrl.V_INTR_MASKING = 1; ctrl_area->instrs.INTR = 1; + // The above also assures the TPR changes (CR8) are only virtual + // However, we need to see TPR writes since they will + // affect the virtual apic + // we reflect out cr8 to ctrl_regs->apic_tpr + ctrl_area->cr_reads.cr8 = 1; + ctrl_area->cr_writes.cr8 = 1; + // We will do all TPR comparisons in the virtual apic + // We also do not want the V_TPR to be able to mask the PIC + ctrl_area->guest_ctrl.V_IGN_TPR = 1; + + + v3_hook_msr(core->vm_info, EFER_MSR, &v3_handle_efer_read, - &v3_handle_efer_write, + &v3_svm_handle_efer_write, core); if (core->shdw_pg_mode == SHADOW_PAGING) { @@ -252,7 +291,6 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) { ctrl_area->cr_writes.cr3 = 1; - ctrl_area->instrs.INVLPG = 1; ctrl_area->exceptions.pf = 1; @@ -260,7 +298,6 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) { guest_state->g_pat = 0x7040600070406ULL; - } else if (core->shdw_pg_mode == NESTED_PAGING) { // Flush the TLB on entries/exits ctrl_area->TLB_CONTROL = 1; @@ -287,6 +324,28 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) { &v3_handle_vm_cr_read, &v3_handle_vm_cr_write, core); + + + { +#define INT_PENDING_AMD_MSR 0xc0010055 + + v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL); + v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL); + v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL); + v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL); + v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL); + + v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL); + v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL); + v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL); + + + v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL); + v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL); + + // Passthrough read operations are ok. + v3_hook_msr(core->vm_info, INT_PENDING_AMD_MSR, NULL, v3_msr_unhandled_write, NULL); + } } @@ -308,6 +367,8 @@ int v3_init_svm_vmcb(struct guest_info * core, v3_vm_class_t vm_class) { return -1; } + core->core_run_state = CORE_STOPPED; + return 0; } @@ -318,6 +379,37 @@ int v3_deinit_svm_vmcb(struct guest_info * core) { } +#ifdef V3_CONFIG_CHECKPOINT +int v3_svm_save_core(struct guest_info * core, void * ctx){ + + if (v3_chkpt_save_8(ctx, "cpl", &(core->cpl)) == -1) { + PrintError("Could not save SVM cpl\n"); + return -1; + } + + if (v3_chkpt_save(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1) { + PrintError("Could not save SVM vmcb\n"); + return -1; + } + + return 0; +} + +int v3_svm_load_core(struct guest_info * core, void * ctx){ + + if (v3_chkpt_load_8(ctx, "cpl", &(core->cpl)) == -1) { + PrintError("Could not load SVM cpl\n"); + return -1; + } + + if (v3_chkpt_load(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1) { + return -1; + } + + return 0; +} +#endif + static int update_irq_exit_state(struct guest_info * info) { vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); @@ -395,8 +487,11 @@ static int update_irq_entry_state(struct guest_info * info) { #endif guest_ctrl->guest_ctrl.V_IRQ = 1; guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_core_state.irq_vector; + + // We ignore the virtual TPR on this injection + // TPR/PPR tests have already been done in the APIC. guest_ctrl->guest_ctrl.V_IGN_TPR = 1; - guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf; + guest_ctrl->guest_ctrl.V_INTR_PRIO = info->intr_core_state.irq_vector >> 4 ; // 0xf; } else { switch (v3_intr_pending(info)) { @@ -405,8 +500,11 @@ static int update_irq_entry_state(struct guest_info * info) { guest_ctrl->guest_ctrl.V_IRQ = 1; guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq; + + // We ignore the virtual TPR on this injection + // TPR/PPR tests have already been done in the APIC. guest_ctrl->guest_ctrl.V_IGN_TPR = 1; - guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf; + guest_ctrl->guest_ctrl.V_INTR_PRIO = info->intr_core_state.irq_vector >> 4 ; // 0xf; #ifdef V3_CONFIG_DEBUG_INTERRUPTS PrintDebug("Injecting Interrupt %d (EIP=%p)\n", @@ -424,6 +522,18 @@ static int update_irq_entry_state(struct guest_info * info) { break; case V3_SOFTWARE_INTR: guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR; + +#ifdef V3_CONFIG_DEBUG_INTERRUPTS + PrintDebug("Injecting software interrupt -- type: %d, vector: %d\n", + SVM_INJECTION_SOFT_INTR, info->intr_core_state.swintr_vector); +#endif + guest_ctrl->EVENTINJ.vector = info->intr_core_state.swintr_vector; + guest_ctrl->EVENTINJ.valid = 1; + + /* reset swintr state */ + info->intr_core_state.swintr_posted = 0; + info->intr_core_state.swintr_vector = 0; + break; case V3_VIRTUAL_IRQ: guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ; @@ -439,6 +549,26 @@ static int update_irq_entry_state(struct guest_info * info) { return 0; } +int +v3_svm_config_tsc_virtualization(struct guest_info * info) { + vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); + + + if (info->time_state.flags & VM_TIME_TRAP_RDTSC) { + ctrl_area->instrs.RDTSC = 1; + ctrl_area->svm_instrs.RDTSCP = 1; + } else { + ctrl_area->instrs.RDTSC = 0; + ctrl_area->svm_instrs.RDTSCP = 0; + + if (info->time_state.flags & VM_TIME_TSC_PASSTHROUGH) { + ctrl_area->TSC_OFFSET = 0; + } else { + ctrl_area->TSC_OFFSET = v3_tsc_host_offset(&info->time_state); + } + } + return 0; +} /* * CAUTION and DANGER!!! @@ -452,19 +582,20 @@ int v3_svm_enter(struct guest_info * info) { vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data)); addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0; + uint64_t guest_cycles = 0; // Conditionally yield the CPU if the timeslice has expired - v3_yield_cond(info); + v3_yield_cond(info,-1); - // Perform any additional yielding needed for time adjustment - v3_adjust_time(info); + // Update timer devices after being in the VM before doing + // IRQ updates, so that any interrupts they raise get seen + // immediately. + v3_advance_time(info, NULL); + v3_update_timers(info); // disable global interrupts for vm state transition v3_clgi(); - // Update timer devices prior to entering VM. - v3_update_timers(info); - // Synchronize the guest state to the VMCB guest_state->cr0 = info->ctrl_regs.cr0; guest_state->cr2 = info->ctrl_regs.cr2; @@ -472,10 +603,26 @@ int v3_svm_enter(struct guest_info * info) { guest_state->cr4 = info->ctrl_regs.cr4; guest_state->dr6 = info->dbg_regs.dr6; guest_state->dr7 = info->dbg_regs.dr7; - guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff; + + // CR8 is now updated by read/writes and it contains the APIC TPR + // the V_TPR should be just the class part of that. + // This update is here just for completeness. We currently + // are ignoring V_TPR on all injections and doing the priority logivc + // in the APIC. + // guest_ctrl->guest_ctrl.V_TPR = ((info->ctrl_regs.apic_tpr) >> 4) & 0xf; + + //guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff; + // + guest_state->rflags = info->ctrl_regs.rflags; guest_state->efer = info->ctrl_regs.efer; + /* Synchronize MSRs */ + guest_state->star = info->msrs.star; + guest_state->lstar = info->msrs.lstar; + guest_state->sfmask = info->msrs.sfmask; + guest_state->KernelGsBase = info->msrs.kern_gs_base; + guest_state->cpl = info->cpl; v3_set_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments)); @@ -509,19 +656,28 @@ int v3_svm_enter(struct guest_info * info) { } #endif - v3_time_enter_vm(info); - guest_ctrl->TSC_OFFSET = v3_tsc_host_offset(&info->time_state); + v3_svm_config_tsc_virtualization(info); //V3_Print("Calling v3_svm_launch\n"); + { + uint64_t entry_tsc = 0; + uint64_t exit_tsc = 0; + + rdtscll(entry_tsc); + + v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]); + + rdtscll(exit_tsc); + + guest_cycles = exit_tsc - entry_tsc; + } - v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]); //V3_Print("SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip); v3_last_exit = (uint32_t)(guest_ctrl->exit_code); - // Immediate exit from VM time bookkeeping - v3_time_exit_vm(info); + v3_advance_time(info, &guest_cycles); info->num_exits++; @@ -538,22 +694,30 @@ int v3_svm_enter(struct guest_info * info) { info->ctrl_regs.cr4 = guest_state->cr4; info->dbg_regs.dr6 = guest_state->dr6; info->dbg_regs.dr7 = guest_state->dr7; - info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR; + // + // We do not track this anymore + // V_TPR is ignored and we do the logic in the APIC + //info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR; + // info->ctrl_regs.rflags = guest_state->rflags; info->ctrl_regs.efer = guest_state->efer; + /* Synchronize MSRs */ + info->msrs.star = guest_state->star; + info->msrs.lstar = guest_state->lstar; + info->msrs.sfmask = guest_state->sfmask; + info->msrs.kern_gs_base = guest_state->KernelGsBase; + v3_get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments)); info->cpu_mode = v3_get_vm_cpu_mode(info); info->mem_mode = v3_get_vm_mem_mode(info); /* ** */ - // save exit info here exit_code = guest_ctrl->exit_code; exit_info1 = guest_ctrl->exit_info1; exit_info2 = guest_ctrl->exit_info2; - #ifdef V3_CONFIG_SYMCALL if (info->sym_core_state.symcall_state.sym_call_active == 0) { update_irq_exit_state(info); @@ -562,20 +726,30 @@ int v3_svm_enter(struct guest_info * info) { update_irq_exit_state(info); #endif - // reenable global interrupts after vm exit v3_stgi(); - // Conditionally yield the CPU if the timeslice has expired - v3_yield_cond(info); + v3_yield_cond(info,-1); + // This update timers is for time-dependent handlers + // if we're slaved to host time + v3_advance_time(info, NULL); + v3_update_timers(info); + { + int ret = v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2); + + if (ret != 0) { + PrintError("Error in SVM exit handler (ret=%d)\n", ret); + PrintError(" last Exit was %d (exit code=0x%llx)\n", v3_last_exit, (uint64_t) exit_code); + return -1; + } + } - if (v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2) != 0) { - PrintError("Error in SVM exit handler\n"); - PrintError(" last exit was %d\n", v3_last_exit); - return -1; + if (info->timeouts.timeout_active) { + /* Check to see if any timeouts have expired */ + v3_handle_timeouts(info, guest_cycles); } @@ -591,16 +765,24 @@ int v3_start_svm_guest(struct guest_info * info) { if (info->vcpu_id == 0) { info->core_run_state = CORE_RUNNING; - info->vm_info->run_state = VM_RUNNING; } else { PrintDebug("SVM core %u (on %u): Waiting for core initialization\n", info->vcpu_id, info->pcpu_id); while (info->core_run_state == CORE_STOPPED) { - v3_yield(info); + + if (info->vm_info->run_state == VM_STOPPED) { + // The VM was stopped before this core was initialized. + return 0; + } + + v3_yield(info,-1); //PrintDebug("SVM core %u: still waiting for INIT\n", info->vcpu_id); } PrintDebug("SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id); + + // We'll be paranoid about race conditions here + v3_wait_at_barrier(info); } PrintDebug("SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n", @@ -660,6 +842,8 @@ int v3_start_svm_guest(struct guest_info * info) { break; } + v3_wait_at_barrier(info); + if (info->vm_info->run_state == VM_STOPPED) { info->core_run_state = CORE_STOPPED; @@ -667,9 +851,11 @@ int v3_start_svm_guest(struct guest_info * info) { } + /* - if ((info->num_exits % 5000) == 0) { + if ((info->num_exits % 50000) == 0) { V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits); + v3_print_guest_state(info); } */ @@ -683,6 +869,31 @@ int v3_start_svm_guest(struct guest_info * info) { +int v3_reset_svm_vm_core(struct guest_info * core, addr_t rip) { + // init vmcb_bios + + // Write the RIP, CS, and descriptor + // assume the rest is already good to go + // + // vector VV -> rip at 0 + // CS = VV00 + // This means we start executing at linear address VV000 + // + // So the selector needs to be VV00 + // and the base needs to be VV000 + // + core->rip = 0; + core->segments.cs.selector = rip << 8; + core->segments.cs.limit = 0xffff; + core->segments.cs.base = rip << 12; + + return 0; +} + + + + + /* Checks machine SVM capability */ /* Implemented from: AMD Arch Manual 3, sect 15.4 */ @@ -732,11 +943,11 @@ int v3_is_svm_capable() { static int has_svm_nested_paging() { uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; - + v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - + //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx); - + if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) { V3_Print("SVM Nested Paging not supported\n"); return 0; @@ -744,7 +955,8 @@ static int has_svm_nested_paging() { V3_Print("SVM Nested Paging supported\n"); return 1; } -} + } + void v3_init_svm_cpu(int cpu_id) { @@ -761,6 +973,11 @@ void v3_init_svm_cpu(int cpu_id) { // Setup the host state save area host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4); + if (!host_vmcbs[cpu_id]) { + PrintError("Failed to allocate VMCB\n"); + return; + } + /* 64-BIT-ISSUE */ // msr.e_reg.high = 0; //msr.e_reg.low = (uint_t)host_vmcb; @@ -1000,6 +1217,11 @@ void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) { ctrl_area->instrs.IOIO_PROT = 1; ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3); + + if (!ctrl_area->IOPM_BASE_PA) { + PrintError("Cannot allocate IO bitmap\n"); + return; + } { reg_ex_t tmp_reg;