Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


ported 32 bit VMX launch code for nested entries
[palacios.git] / palacios / src / palacios / vmx_lowlevel.S
index 4d0e82c..757d835 100644 (file)
        mov     104(%rax), %r13;        \
        mov     112(%rax), %r14;        \
        mov     120(%rax), %r15;        \
-    pushq %rbx;              \
-    movq 56(%rax), %rbx;     \
-    movq %rbx, %rax;         \
-    popq %rbx;;
+       pushq %rbx;                     \
+       movq 56(%rax), %rbx;            \
+       movq %rbx, %rax;                \
+       popq %rbx;
 
-#define save_ctrl_regs(location)    \
+#define save_ctrl_regs(location)  \
     pushq %rax;              \
     pushq %rbx;              \
     movq location, %rax;     \
     pop %rbx;    \
     pop %rax;    
 
-.align 8
-.globl v3_vmx_exit_handler
-v3_vmx_exit_handler:
-    // the save_* argument is a macro expansion; it has to jump past any pushes in the macro
-    // stack: vm_regs ptr, ctrl_regs_ptr
-    // save registers macro stack: vm_regs ptr, ctrl_regs ptr, pushed rax
-    // save_ctrl_regs macro stack: vm_regs ptr, ctrl_regs_ptr, pushed rax, pushed rbx
-    // Both macros jump past 2 saved values to reach their pointers, so both are 16(rsp)
-    save_registers(16(%rsp));
-    save_ctrl_regs(16(%rsp));
-    addq $16, %rsp
-    POPA
-    popf
-    pushq %rdi
-    pushq %rsi
-    pushq %rdx
-    call v3_handle_vmx_exit
-
-    andq %rax, %rax
-    jnz .Lvmm_failure
-
-v3_vmx_vmresume:
-    pop %rdx
-    pop %rsi
-    pop %rdi
-    pushf
-    PUSHA
-    pushq %rdi
-    pushq %rdx
-    restore_ctrl_regs(%rdx);
+
+#define PRE_LAUNCH(return_target)      \
+    pushf;                             \
+    PUSHA;                             \
+    pushq %rdi;                                \
+    pushq %rdx;                                \
+                                       \
+    movq %rsp, %rax;                   \
+    movq $VMCS_HOST_RSP, %rbx;         \
+    vmwrite %rax, %rbx;                        \
+    jz .Lfail_valid;                   \
+    jc .Lfail_invalid;                 \
+                                       \
+    movq return_target, %rax;          \
+    movq $VMCS_HOST_RIP, %rbx;         \
+    vmwrite %rax, %rbx;                        \
+    jz .Lfail_valid;                   \
+    jc .Lfail_invalid;                 \
+                                       \
+    restore_ctrl_regs(%rdx);           \
     restore_registers(%rdi);
 
-    vmresume
 
-    jz .Lfail_valid
-    jc .Lfail_invalid
-    addq $16, %rsp
-    jmp .Lreturn
+
 
 .align 8
-.globl v3_vmx_vmlaunch
+.globl v3_vmx_resume
 // vm_regs = %rdi, guest_info * = %rsi, ctrl_regs = %rdx
-v3_vmx_vmlaunch:
-    pushf
-    PUSHA
-    pushq %rdi
-    pushq %rdx
-    
-    movq %rsp, %rax
-    movq $VMCS_HOST_RSP, %rbx
-    vmwrite %rax, %rbx
-    jz .Lfail_valid
-    jc .Lfail_invalid
+v3_vmx_resume:
 
-    movq $v3_vmx_exit_handler, %rax
-    movq $VMCS_HOST_RIP, %rbx
-    vmwrite %rax, %rbx
+    PRE_LAUNCH($vmx_resume_ret);
+
+    vmresume
+
+vmx_resume_ret:
     jz .Lfail_valid
     jc .Lfail_invalid
+    jmp .Lnormal_exit
 
-    restore_ctrl_regs(%rdx);
-    restore_registers(%rdi);
+
+.align 8
+.globl v3_vmx_launch
+// vm_regs = %rdi, guest_info * = %rsi, ctrl_regs = %rdx
+v3_vmx_launch:
+
+    PRE_LAUNCH($vmx_launch_ret);
 
     vmlaunch
+
+vmx_launch_ret:
     jz .Lfail_valid
     jc .Lfail_invalid
-    jmp .Lreturn
+    jmp .Lnormal_exit
+
+
+
 
 .Lfail_valid:
     addq $16, %rsp
@@ -196,13 +185,23 @@ v3_vmx_vmlaunch:
     movq $VMM_FAILURE, %rax
     jmp .Lreturn
 
+
+.Lnormal_exit:
+    save_registers(16(%rsp));
+    save_ctrl_regs(16(%rsp));
+    addq $16, %rsp
+    POPA
+    popf
+    xorq %rax, %rax
+    jmp .Lreturn
+
+
 .Lreturn:
-    sti
     ret
     
 #else
 
-#define save_resgisters(location)      \
+#define save_registers(location)       \
        pushl   %eax;                   \
        movl    location, %eax;         \
        movl    %edi, (%eax);           \
@@ -230,6 +229,25 @@ v3_vmx_vmlaunch:
        movl    48(%eax), %ecx;         \
        popl    %eax;
  
+
+#define save_ctrl_regs(location)  \
+    push %eax;              \
+    push %ebx;              \
+    movl location, %eax;     \
+    movl %cr2, %ebx;         \
+    movl %ebx, 8(%eax);      \
+    popl %ebx;               \
+    popl %eax
+
+#define restore_ctrl_regs(location)  \
+    push %eax;              \
+    push %ebx;              \
+    movl location, %eax;     \
+    movl 8(%eax), %ebx;      \
+    movl %ebx, %cr2;         \
+    popl %ebx;               \
+    popl %eax
+
 #define PUSHA    \
     push %eax;   \
     push %ebx;   \
@@ -248,82 +266,86 @@ v3_vmx_vmlaunch:
     pop %ebx;    \
     pop %eax;
 
-.align 8
-.globl v3_vmx_exit_handler
-v3_vmx_exit_handler:
-    save_registers(4(%esp))
-    addl $8, %esp
-    POPA
-    popf
-    pushl %edi
-    call v3_handle_vmx_exit
 
-    andl %eax, %eax
-    jnz .Lvmm_failure
+#define PRE_LAUNCH(return_target)      \
+    pushf;                             \
+    PUSHA;                             \
+    pushl %edi;                                \
+    pushl %edx;                                \
+                                       \
+    movl %esp, %eax;                   \
+    movl $VMCS_HOST_RSP, %ebx;         \
+    vmwrite %eax, %ebx;                        \
+    jz .Lfail_valid;                   \
+    jc .Lfail_invalid;                 \
+                                       \
+    movl return_target, %eax;          \
+    movl $VMCS_HOST_RIP, %ebx;         \
+    vmwrite %eax, %ebx;                        \
+    jz .Lfail_valid;                   \
+    jc .Lfail_invalid;                 \
+                                       \
+    restore_ctrl_regs(%edx);           \
+    restore_registers(%edi);
+
 
-v3_vmx_vmresume:
-    popl %edi
-    pushf
-    PUSHA
-    pushl %edi
-    restore_registers(%rdi)
+.align 4
+.globl v3_vmx_resume
+v3_vmx_resume:
+
+    PRE_LAUNCH($vmx_resume_ret);
 
     vmresume
 
-    addl $8, %esp
+vmx_resume_ret:
     jz .Lfail_valid
     jc .Lfail_invalid
-    jmp .Lreturn
+    jmp .Lnormal_exit
 
-.align 8
-.globl v3_vmx_vmlaunch
+.align 4
+.globl v3_vmx_launch
 // vm_regs = %edi
-v3_vmx_vmlaunch:
-    cli
-    pushf
-    PUSHA
-    pushl %edi
-
-    movl %esp, %eax
-    movl $VMCS_HOST_RSP, %ebx
-    vmwrite %eax, %ebx
-    jz .Lfail_valid
-    jc .Lfail_invalid
-
-    movl $v3_vmx_exit_handler, %eax
-    movl $VMCS_HOST_RIP, %ebx
-    vmwrite %eax, %ebx
-    jz .Lfail_valid
-    jc .Lfail_invalid
+v3_vmx_launch:
 
-    restore_registers(%edi)
+    PRE_LAUNCH($vmx_launch_ret);
 
     vmlaunch
+
+vmx_launch_ret:
     jz .Lfail_valid
     jc .Lfail_invalid
-    jmp .Lreturn
+    jmp .Lnormal_exit
 
 .Lfail_valid:
     addl $8, %esp
     POPA
+    popf
     movl $VMX_FAIL_VALID, %eax
     jmp .Lreturn
 
 .Lfail_invalid:
-    addq $8, %esp
+    addl $8, %esp
     POPA
-    movl $MVX_FAIL_INVALID, %eax
+    popf
+    movl $VMX_FAIL_INVALID, %eax
     jmp .Lreturn
 
 .Lvmm_failure:
-    addq $8, %esp
-    POPA
+    addl $12, %esp
     movl $VMM_FAILURE, %eax
     jmp .Lreturn
 
-.Lreturn:
-    sti
+
+.Lnormal_exit:
+    save_registers(8(%esp));
+    save_ctrl_regs(8(%esp));
+    addl $8, %esp
+    POPA
     popf
+    xorl %eax, %eax
+    jmp .Lreturn
+
+.Lreturn:
     ret
 
 #endif