From: Jack Lange Date: Sat, 13 Jun 2009 00:16:51 +0000 (-0500) Subject: fixed crash issue due to inproperly saving host state X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?p=palacios.git;a=commitdiff_plain;h=6ce5a308d167ac392c0f0ca66811f51580358191 fixed crash issue due to inproperly saving host state --- diff --git a/palacios/src/palacios/svm.c b/palacios/src/palacios/svm.c index 9434e49..76a57dd 100644 --- a/palacios/src/palacios/svm.c +++ b/palacios/src/palacios/svm.c @@ -45,10 +45,14 @@ #include + +// This is a global pointer to the host's VMCB +static void * host_vmcb = NULL; + extern void v3_stgi(); extern void v3_clgi(); //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs); -extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs); +extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb); static vmcb_t * Allocate_VMCB() { @@ -281,86 +285,32 @@ static int start_svm_guest(struct guest_info *info) { ullong_t tmp_tsc; -#ifdef __V3_64BIT__ - -#define MSR_LSTAR 0xc0000082 -#define MSR_CSTAR 0xc0000083 -#define MSR_SF_MASK 0xc0000084 -#define MSR_GS_BASE 0xc0000101 -#define MSR_KERNGS_BASE 0xc0000102 - struct v3_msr host_cstar; - struct v3_msr host_lstar; - struct v3_msr host_syscall_mask; - struct v3_msr host_gs_base; - struct v3_msr host_kerngs_base; - -#else - -#define MSR_SYSENTER_CS 0x00000174 -#define MSR_SYSENTER_ESP 0x00000175 -#define MSR_SYSENTER_EIP 0x00000176 - - struct v3_msr host_sysenter_cs; - struct v3_msr host_sysenter_esp; - struct v3_msr host_sysenter_eip; - -#endif - -#define MSR_STAR 0xc0000081 - struct v3_msr host_star; - - /* PrintDebug("SVM Entry to CS=%p rip=%p...\n", (void *)(addr_t)info->segments.cs.base, (void *)(addr_t)info->rip); */ + // disable global interrupts for vm state transition + v3_clgi(); + -#ifdef __V3_64BIT__ - v3_get_msr(MSR_SF_MASK, &(host_syscall_mask.hi), &(host_syscall_mask.lo)); - v3_get_msr(MSR_LSTAR, &(host_lstar.hi), &(host_lstar.lo)); - v3_get_msr(MSR_CSTAR, &(host_cstar.hi), &(host_cstar.lo)); - v3_get_msr(MSR_GS_BASE, &(host_gs_base.hi), &(host_gs_base.lo)); - v3_get_msr(MSR_KERNGS_BASE, &(host_kerngs_base.hi), &(host_kerngs_base.lo)); -#else - v3_get_msr(MSR_SYSENTER_CS, &(host_sysenter_cs.hi), &(host_sysenter_cs.lo)); - v3_get_msr(MSR_SYSENTER_ESP, &(host_sysenter_esp.hi), &(host_sysenter_esp.lo)); - v3_get_msr(MSR_SYSENTER_EIP, &(host_sysenter_eip.hi), &(host_sysenter_eip.lo)); -#endif - v3_get_msr(MSR_STAR, &(host_star.hi), &(host_star.lo)); rdtscll(info->time_state.cached_host_tsc); // guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc; - v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs)); + v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcb); rdtscll(tmp_tsc); - -#ifdef __V3_64BIT__ - v3_set_msr(MSR_SF_MASK, host_syscall_mask.hi, host_syscall_mask.lo); - v3_set_msr(MSR_LSTAR, host_lstar.hi, host_lstar.lo); - v3_set_msr(MSR_CSTAR, host_cstar.hi, host_cstar.lo); - v3_set_msr(MSR_GS_BASE, host_gs_base.hi, host_gs_base.lo); - v3_set_msr(MSR_KERNGS_BASE, host_kerngs_base.hi, host_kerngs_base.lo); -#else - v3_set_msr(MSR_SYSENTER_CS, host_sysenter_cs.hi, host_sysenter_cs.lo); - v3_set_msr(MSR_SYSENTER_ESP, host_sysenter_esp.hi, host_sysenter_esp.lo); - v3_set_msr(MSR_SYSENTER_EIP, host_sysenter_eip.hi, host_sysenter_eip.lo); -#endif - v3_set_msr(MSR_STAR, host_star.hi, host_star.lo); //PrintDebug("SVM Returned\n"); - + // reenable global interrupts after vm exit + v3_stgi(); v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc); num_exits++; - - //PrintDebug("Turning on global interrupts\n"); - v3_stgi(); - v3_clgi(); if ((num_exits % 5000) == 0) { PrintDebug("SVM Exit number %d\n", num_exits); @@ -370,7 +320,6 @@ static int start_svm_guest(struct guest_info *info) { } } - if (v3_handle_svm_exit(info) != 0) { vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); @@ -487,7 +436,6 @@ static int has_svm_nested_paging() { void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { reg_ex_t msr; - void * host_state; extern v3_cpu_arch_t v3_cpu_type; // Enable SVM on the CPU @@ -499,15 +447,15 @@ void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) { // Setup the host state save area - host_state = V3_AllocPages(4); + host_vmcb = V3_AllocPages(4); /* 64-BIT-ISSUE */ // msr.e_reg.high = 0; - //msr.e_reg.low = (uint_t)host_state; - msr.r_reg = (addr_t)host_state; + //msr.e_reg.low = (uint_t)host_vmcb; + msr.r_reg = (addr_t)host_vmcb; - PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_state); + PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_vmcb); v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low); if (has_svm_nested_paging() == 1) { diff --git a/palacios/src/palacios/svm_lowlevel.S b/palacios/src/palacios/svm_lowlevel.S index 975fe98..ac11b36 100644 --- a/palacios/src/palacios/svm_lowlevel.S +++ b/palacios/src/palacios/svm_lowlevel.S @@ -18,6 +18,9 @@ #define clgi .byte 0x0F,0x01,0xDD +#define SVM_VM_HSAVE_PA_MSR .dword 0xc0010117 + + #ifdef __V3_32BIT__ #define Save_SVM_Registers(location) \ @@ -49,14 +52,17 @@ popl %eax; +// 32 bit GCC passes arguments via stack + v3_svm_launch: push %ebp; movl %esp, %ebp; pushf; - push %fs; - push %gs; pusha; + movl 16(%ebp), %eax; + vmsave; + pushl 12(%ebp); pushl 8(%ebp); @@ -71,9 +77,11 @@ v3_svm_launch: addl $4, %esp; + + movl 16(%ebp), %eax; + vmload; + popa; - pop %gs; - pop %fs; popf; pop %ebp; ret @@ -156,29 +164,19 @@ v3_svm_launch: popq %rbx; \ popq %rbp; + +// Note that this is only for 64 bit GCC, 32 bit GCC passes via stack // VMCB => RDI // vm_regs => RSI -// ptr to fs => RDX -// ptr to gs => RCX +// HOST VMCB => RDX v3_svm_launch: pushf; - push %fs; - push %gs; PUSHA - - - -// pushq %rdx // fs -// pushq %rcx // gs - - -// pushq (%rdx) -// pop %fs -// pushq (%rcx) -// pop %gs - - + + pushq %rdx; + movq %rdx, %rax; + vmsave; pushq %rsi @@ -195,21 +193,11 @@ v3_svm_launch: addq $8, %rsp -// popq %rcx -// popq %rdx - -// push %fs -// popq %rax -// movq %rax, (%rdx) - -// push %gs -// popq %rax -// movq %rax, (%rcx) + popq %rax; + vmload; POPA - pop %gs; - pop %fs; popf; ret