X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fsvm.c;h=115e19ca9f2f5689cb752d4ba96d3c4dd0ea0e51;hb=94429f4b9268cf4d5f86dee5c3d929110811ac80;hp=980ce998018c2eb765f09e60aab2e8996282f84a;hpb=a8627ddaccf49073eb04286c5ea4767e2903c351;p=palacios.git diff --git a/palacios/src/palacios/svm.c b/palacios/src/palacios/svm.c index 980ce99..115e19c 100644 --- a/palacios/src/palacios/svm.c +++ b/palacios/src/palacios/svm.c @@ -35,6 +35,11 @@ #include #include +#include + +#ifdef V3_CONFIG_CHECKPOINT +#include +#endif #include @@ -80,6 +85,25 @@ static vmcb_t * Allocate_VMCB() { } +static int v3_svm_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) +{ + int status; + + // Call arch-independent handler + if ((status = v3_handle_efer_write(core, msr, src, priv_data)) != 0) { + return status; + } + + // SVM-specific code + { + // Ensure that hardware visible EFER.SVME bit is set (SVM Enable) + struct efer_64 * hw_efer = (struct efer_64 *)&(core->ctrl_regs.efer); + hw_efer->svme = 1; + } + + return 0; +} + static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) { vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb); @@ -220,7 +244,7 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) { v3_hook_msr(core->vm_info, EFER_MSR, &v3_handle_efer_read, - &v3_handle_efer_write, + &v3_svm_handle_efer_write, core); if (core->shdw_pg_mode == SHADOW_PAGING) { @@ -318,6 +342,68 @@ int v3_deinit_svm_vmcb(struct guest_info * core) { } +#ifdef V3_CONFIG_CHECKPOINT +int v3_svm_save_core(struct guest_info * core, void * ctx){ + + v3_chkpt_save(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data); + + return 0; +} + +int v3_svm_load_core(struct guest_info * core, void * chkpt_ctx){ + struct cr0_32 * shadow_cr0; + vmcb_saved_state_t * guest_state; + vmcb_ctrl_t * guest_ctrl; + + + + if (v3_chkpt_load(chkpt_ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1){ + return -1; + } + + guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t *)(core->vmm_data)); + guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t *)(core->vmm_data)); + + + core->rip = guest_state->rip; + core->vm_regs.rsp = guest_state->rsp; + core->vm_regs.rax = guest_state->rax; + + core->cpl = guest_state->cpl; + + core->ctrl_regs.cr0 = guest_state->cr0; + core->ctrl_regs.cr2 = guest_state->cr2; + core->ctrl_regs.cr4 = guest_state->cr4; + core->dbg_regs.dr6 = guest_state->dr6; + core->dbg_regs.dr7 = guest_state->dr7; + core->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR; + core->ctrl_regs.rflags = guest_state->rflags; + core->ctrl_regs.efer = guest_state->efer; + + + shadow_cr0 = (struct cr0_32 *)&(core->ctrl_regs.cr0); + + + if (core->shdw_pg_mode == SHADOW_PAGING) { + if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) { + if (v3_activate_shadow_pt(core) == -1) { + PrintError("Failed to activate shadow page tables\n"); + return -1; + } + } else { + if (v3_activate_passthrough_pt(core) == -1) { + PrintError("Failed to activate passthrough page tables\n"); + return -1; + } + } + } + + + v3_get_vmcb_segments((vmcb_t *)(core->vmm_data), &(core->segments)); + return 0; +} +#endif + static int update_irq_exit_state(struct guest_info * info) { vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); @@ -424,6 +510,18 @@ static int update_irq_entry_state(struct guest_info * info) { break; case V3_SOFTWARE_INTR: guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR; + +#ifdef V3_CONFIG_DEBUG_INTERRUPTS + PrintDebug("Injecting software interrupt -- type: %d, vector: %d\n", + SVM_INJECTION_SOFT_INTR, info->intr_core_state.swintr_vector); +#endif + guest_ctrl->EVENTINJ.vector = info->intr_core_state.swintr_vector; + guest_ctrl->EVENTINJ.valid = 1; + + /* reset swintr state */ + info->intr_core_state.swintr_posted = 0; + info->intr_core_state.swintr_vector = 0; + break; case V3_VIRTUAL_IRQ: guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ; @@ -462,7 +560,9 @@ int v3_svm_enter(struct guest_info * info) { // disable global interrupts for vm state transition v3_clgi(); - // Update timer devices prior to entering VM. + // Update timer devices after being in the VM, with interupts + // disabled, but before doing IRQ updates, so that any interrupts they + //raise get seen immediately. v3_update_timers(info); // Synchronize the guest state to the VMCB @@ -512,6 +612,7 @@ int v3_svm_enter(struct guest_info * info) { v3_time_enter_vm(info); guest_ctrl->TSC_OFFSET = v3_tsc_host_offset(&info->time_state); + //V3_Print("Calling v3_svm_launch\n"); v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]); @@ -547,13 +648,11 @@ int v3_svm_enter(struct guest_info * info) { info->mem_mode = v3_get_vm_mem_mode(info); /* ** */ - // save exit info here exit_code = guest_ctrl->exit_code; exit_info1 = guest_ctrl->exit_info1; exit_info2 = guest_ctrl->exit_info2; - #ifdef V3_CONFIG_SYMCALL if (info->sym_core_state.symcall_state.sym_call_active == 0) { update_irq_exit_state(info); @@ -562,20 +661,20 @@ int v3_svm_enter(struct guest_info * info) { update_irq_exit_state(info); #endif - // reenable global interrupts after vm exit v3_stgi(); - // Conditionally yield the CPU if the timeslice has expired v3_yield_cond(info); - - - if (v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2) != 0) { - PrintError("Error in SVM exit handler\n"); - PrintError(" last exit was %d\n", v3_last_exit); - return -1; + { + int ret = v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2); + + if (ret != 0) { + PrintError("Error in SVM exit handler (ret=%d)\n", ret); + PrintError(" last Exit was %d (exit code=0x%llx)\n", v3_last_exit, (uint64_t) exit_code); + return -1; + } } @@ -601,6 +700,9 @@ int v3_start_svm_guest(struct guest_info * info) { } PrintDebug("SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id); + + // We'll be paranoid about race conditions here + v3_wait_at_barrier(info); } PrintDebug("SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n", @@ -660,6 +762,8 @@ int v3_start_svm_guest(struct guest_info * info) { break; } + v3_wait_at_barrier(info); + if (info->vm_info->run_state == VM_STOPPED) { info->core_run_state = CORE_STOPPED; @@ -667,9 +771,11 @@ int v3_start_svm_guest(struct guest_info * info) { } + /* - if ((info->num_exits % 5000) == 0) { + if ((info->num_exits % 50000) == 0) { V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits); + v3_print_guest_state(info); } */ @@ -683,6 +789,31 @@ int v3_start_svm_guest(struct guest_info * info) { +int v3_reset_svm_vm_core(struct guest_info * core, addr_t rip) { + // init vmcb_bios + + // Write the RIP, CS, and descriptor + // assume the rest is already good to go + // + // vector VV -> rip at 0 + // CS = VV00 + // This means we start executing at linear address VV000 + // + // So the selector needs to be VV00 + // and the base needs to be VV000 + // + core->rip = 0; + core->segments.cs.selector = rip << 8; + core->segments.cs.limit = 0xffff; + core->segments.cs.base = rip << 12; + + return 0; +} + + + + + /* Checks machine SVM capability */ /* Implemented from: AMD Arch Manual 3, sect 15.4 */ @@ -732,11 +863,11 @@ int v3_is_svm_capable() { static int has_svm_nested_paging() { uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; - + v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - + //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx); - + if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) { V3_Print("SVM Nested Paging not supported\n"); return 0; @@ -744,7 +875,8 @@ static int has_svm_nested_paging() { V3_Print("SVM Nested Paging supported\n"); return 1; } -} + } + void v3_init_svm_cpu(int cpu_id) {