movq %rdx, 40(%rax); \
movq %rcx, 48(%rax); \
pushq %rbx; \
- movq 16(%rsp), %rbx; \
+ movq 8(%rsp), %rbx; \
movq %rbx, 56(%rax); \
popq %rbx; \
\
movq %r15, 120(%rax); \
popq %rax;
-
#define restore_registers(location) \
- push %rax; \
mov location, %rax; \
mov (%rax), %rdi; \
mov 8(%rax), %rsi; \
mov 16(%rax), %rbp; \
mov 32(%rax), %rbx; \
mov 40(%rax), %rdx; \
- mov 48(%rax), %rcx; \
+ mov 48(%rax), %rcx; \
\
mov 64(%rax), %r8; \
mov 72(%rax), %r9; \
mov 104(%rax), %r13; \
mov 112(%rax), %r14; \
mov 120(%rax), %r15; \
- pop %rax;
-
-
+ pushq %rbx; \
+ movq 56(%rax), %rbx; \
+ movq %rbx, %rax; \
+ popq %rbx;
+
+#define save_ctrl_regs(location) \
+ pushq %rax; \
+ pushq %rbx; \
+ movq location, %rax; \
+ movq %cr2, %rbx; \
+ movq %rbx, 8(%rax); \
+ popq %rbx; \
+ popq %rax
+
+#define restore_ctrl_regs(location) \
+ pushq %rax; \
+ pushq %rbx; \
+ movq location, %rax; \
+ movq 8(%rax), %rbx; \
+ movq %rbx, %cr2; \
+ popq %rbx; \
+ popq %rax
#define PUSHA \
push %rax; \
pop %rbx; \
pop %rax;
-.align 8
-.globl v3_vmx_exit_handler
-v3_vmx_exit_handler:
- save_registers(8(%rsp));
- addq $8, %rsp
- POPA
- popf
- pushq %rdi
- pushq %rsi
- call v3_handle_vmx_exit
-
- andq %rax, %rax
- jnz .Lvmm_failure
-
-v3_vmx_vmresume:
- pop %rsi
- pop %rdi
- pushf
- PUSHA
- pushq %rdi
+
+#define PRE_LAUNCH(return_target) \
+ pushf; \
+ PUSHA; \
+ pushq %rdi; \
+ pushq %rdx; \
+ \
+ movq %rsp, %rax; \
+ movq $VMCS_HOST_RSP, %rbx; \
+ vmwrite %rax, %rbx; \
+ jz .Lfail_valid; \
+ jc .Lfail_invalid; \
+ \
+ movq return_target, %rax; \
+ movq $VMCS_HOST_RIP, %rbx; \
+ vmwrite %rax, %rbx; \
+ jz .Lfail_valid; \
+ jc .Lfail_invalid; \
+ \
+ restore_ctrl_regs(%rdx); \
restore_registers(%rdi);
- vmresume
- jz .Lfail_valid
- jc .Lfail_invalid
- addq $8, %rsp
- jmp .Lreturn
+
.align 8
-.globl v3_vmx_vmlaunch
-// vm_regs = %rdi
-v3_vmx_vmlaunch:
- cli
- pushf
- PUSHA
- pushq %rdi
-
- movq %rsp, %rax
- movq $VMCS_HOST_RSP, %rbx
- vmwrite %rax, %rbx
- jz .Lfail_valid
- jc .Lfail_invalid
+.globl v3_vmx_resume
+// vm_regs = %rdi, guest_info * = %rsi, ctrl_regs = %rdx
+v3_vmx_resume:
+
+ PRE_LAUNCH($vmx_resume_ret);
- movq $v3_vmx_exit_handler, %rax
- movq $VMCS_HOST_RIP, %rbx
- vmwrite %rax, %rbx
+ vmresume
+
+vmx_resume_ret:
jz .Lfail_valid
jc .Lfail_invalid
+ jmp .Lnormal_exit
- restore_registers(%rdi);
+
+.align 8
+.globl v3_vmx_launch
+// vm_regs = %rdi, guest_info * = %rsi, ctrl_regs = %rdx
+v3_vmx_launch:
+
+ PRE_LAUNCH($vmx_launch_ret);
vmlaunch
+
+vmx_launch_ret:
jz .Lfail_valid
jc .Lfail_invalid
- jmp .Lreturn
+ jmp .Lnormal_exit
+
+
+
.Lfail_valid:
- addq $8, %rsp
+ addq $16, %rsp
POPA
popf
movq $VMX_FAIL_VALID, %rax
jmp .Lreturn
.Lfail_invalid:
- addq $8, %rsp
+ addq $16, %rsp
POPA
popf
movq $VMX_FAIL_INVALID, %rax
jmp .Lreturn
.Lvmm_failure:
- addq $16, %rsp
+ addq $24, %rsp
movq $VMM_FAILURE, %rax
jmp .Lreturn
+
+.Lnormal_exit:
+ save_registers(16(%rsp));
+ save_ctrl_regs(16(%rsp));
+ addq $16, %rsp
+ POPA
+ popf
+ xorq %rax, %rax
+ jmp .Lreturn
+
+
.Lreturn:
- sti
ret
#else
-#define save_resgisters(location) \
+#define save_registers(location) \
pushl %eax; \
movl location, %eax; \
movl %edi, (%eax); \
movl 48(%eax), %ecx; \
popl %eax;
+
+#define save_ctrl_regs(location) \
+ push %eax; \
+ push %ebx; \
+ movl location, %eax; \
+ movl %cr2, %ebx; \
+ movl %ebx, 8(%eax); \
+ popl %ebx; \
+ popl %eax
+
+#define restore_ctrl_regs(location) \
+ push %eax; \
+ push %ebx; \
+ movl location, %eax; \
+ movl 8(%eax), %ebx; \
+ movl %ebx, %cr2; \
+ popl %ebx; \
+ popl %eax
+
#define PUSHA \
push %eax; \
push %ebx; \
pop %ebx; \
pop %eax;
-.align 8
-.globl v3_vmx_exit_handler
-v3_vmx_exit_handler:
- save_registers(4(%esp))
- addl $8, %esp
- POPA
- popf
- pushl %edi
- call v3_handle_vmx_exit
- andl %eax, %eax
- jnz .Lvmm_failure
+#define PRE_LAUNCH(return_target) \
+ push %ebp; \
+ movl %esp, %ebp; \
+ pushf; \
+ PUSHA; \
+ \
+ movl 8(%ebp), %edi; \
+ movl 12(%ebp), %esi; \
+ movl 16(%ebp), %edx; \
+ \
+ movl %esp, %eax; \
+ movl $VMCS_HOST_RSP, %ebx; \
+ vmwrite %eax, %ebx; \
+ jz .Lfail_valid; \
+ jc .Lfail_invalid; \
+ \
+ movl return_target, %eax; \
+ movl $VMCS_HOST_RIP, %ebx; \
+ vmwrite %eax, %ebx; \
+ jz .Lfail_valid; \
+ jc .Lfail_invalid; \
+ \
+ restore_ctrl_regs(%edx); \
+ restore_registers(%edi);
+
+
+// 32 bit GCC passes arguments via stack
-v3_vmx_vmresume:
- popl %edi
- pushf
- PUSHA
- pushl %edi
- restore_registers(%rdi)
+.align 4
+.globl v3_vmx_resume
+v3_vmx_resume:
+
+ PRE_LAUNCH($vmx_resume_ret);
vmresume
- addl $8, %esp
+vmx_resume_ret:
jz .Lfail_valid
jc .Lfail_invalid
- jmp .Lreturn
+ jmp .Lnormal_exit
-.align 8
-.globl v3_vmx_vmlaunch
+.align 4
+.globl v3_vmx_launch
// vm_regs = %edi
-v3_vmx_vmlaunch:
- cli
- pushf
- PUSHA
- pushl %edi
-
- movl %esp, %eax
- movl $VMCS_HOST_RSP, %ebx
- vmwrite %eax, %ebx
- jz .Lfail_valid
- jc .Lfail_invalid
-
- movl $v3_vmx_exit_handler, %eax
- movl $VMCS_HOST_RIP, %ebx
- vmwrite %eax, %ebx
- jz .Lfail_valid
- jc .Lfail_invalid
+v3_vmx_launch:
- restore_registers(%edi)
+ PRE_LAUNCH($vmx_launch_ret);
vmlaunch
+
+vmx_launch_ret:
jz .Lfail_valid
jc .Lfail_invalid
- jmp .Lreturn
+ jmp .Lnormal_exit
.Lfail_valid:
addl $8, %esp
POPA
+ popf
movl $VMX_FAIL_VALID, %eax
jmp .Lreturn
.Lfail_invalid:
- addq $8, %esp
+ addl $8, %esp
POPA
- movl $MVX_FAIL_INVALID, %eax
+ popf
+ movl $VMX_FAIL_INVALID, %eax
jmp .Lreturn
.Lvmm_failure:
- addq $8, %esp
- POPA
+ addl $12, %esp
movl $VMM_FAILURE, %eax
jmp .Lreturn
-.Lreturn:
- sti
+
+.Lnormal_exit:
+ save_registers(8(%esp));
+ save_ctrl_regs(8(%esp));
+ addl $8, %esp
+ POPA
popf
+ xorl %eax, %eax
+ jmp .Lreturn
+
+.Lreturn:
+ pop %ebp;
ret
#endif