+#; -*- fundamental -*-
#define VMX_SUCCESS 0
#define VMX_FAIL_INVALID 1
#define VMX_FAIL_VALID 2
+#define VMM_FAILURE 3
#define VMCS_HOST_RSP 0x00006C14
+#define VMCS_HOST_RIP 0x00006C16
#if defined(__V3_64BIT__)
-#define r(reg) %r##reg
+#define save_registers(location) \
+ pushq %rax; \
+ movq location, %rax; \
+ movq %rdi, (%rax); \
+ movq %rsi, 8(%rax); \
+ movq %rbp, 16(%rax); \
+ movq $0, 24(%rax); \
+ movq %rbx, 32(%rax); \
+ movq %rdx, 40(%rax); \
+ movq %rcx, 48(%rax); \
+ pushq %rbx; \
+ movq 8(%rsp), %rbx; \
+ movq %rbx, 56(%rax); \
+ popq %rbx; \
+ \
+ movq %r8, 64(%rax); \
+ movq %r9, 72(%rax); \
+ movq %r10, 80(%rax); \
+ movq %r11, 88(%rax); \
+ movq %r12, 96(%rax); \
+ movq %r13, 104(%rax); \
+ movq %r14, 112(%rax); \
+ movq %r15, 120(%rax); \
+ popq %rax;
+
+#define restore_registers(location) \
+ mov location, %rax; \
+ mov (%rax), %rdi; \
+ mov 8(%rax), %rsi; \
+ mov 16(%rax), %rbp; \
+ mov 32(%rax), %rbx; \
+ mov 40(%rax), %rdx; \
+ mov 48(%rax), %rcx; \
+ \
+ mov 64(%rax), %r8; \
+ mov 72(%rax), %r9; \
+ mov 80(%rax), %r10; \
+ mov 88(%rax), %r11; \
+ mov 96(%rax), %r12; \
+ mov 104(%rax), %r13; \
+ mov 112(%rax), %r14; \
+ mov 120(%rax), %r15; \
+ pushq %rbx; \
+ movq 56(%rax), %rbx; \
+ movq %rbx, %rax; \
+ popq %rbx;;
+
+#define save_ctrl_regs(location) \
+ pushq %rax; \
+ pushq %rbx; \
+ movq location, %rax; \
+ movq %cr2, %rbx; \
+ movq %rbx, 8(%rax); \
+ popq %rbx; \
+ popq %rax
+
+#define restore_ctrl_regs(location) \
+ pushq %rax; \
+ pushq %rbx; \
+ movq location, %rax; \
+ movq 8(%rax), %rbx; \
+ movq %rbx, %cr2; \
+ popq %rbx; \
+ popq %rax
#define PUSHA \
push %rax; \
pop %rcx; \
pop %rbx; \
pop %rax;
+
+.align 8
+.globl v3_vmx_exit_handler
+v3_vmx_exit_handler:
+ // the save_* argument is a macro expansion; it has to jump past any pushes in the macro
+ // stack: vm_regs ptr, ctrl_regs_ptr
+ // save registers macro stack: vm_regs ptr, ctrl_regs ptr, pushed rax
+ // save_ctrl_regs macro stack: vm_regs ptr, ctrl_regs_ptr, pushed rax, pushed rbx
+ // Both macros jump past 2 saved values to reach their pointers, so both are 16(rsp)
+ save_registers(16(%rsp));
+ save_ctrl_regs(16(%rsp));
+ addq $16, %rsp
+ POPA
+ popf
+ pushq %rdi
+ pushq %rsi
+ pushq %rdx
+ call v3_handle_vmx_exit
+
+ andq %rax, %rax
+ jnz .Lvmm_failure
+
+v3_vmx_vmresume:
+ pop %rdx
+ pop %rsi
+ pop %rdi
+ pushf
+ PUSHA
+ pushq %rdi
+ pushq %rdx
+ restore_ctrl_regs(%rdx);
+ restore_registers(%rdi);
+
+ vmresume
+
+ jz .Lfail_valid
+ jc .Lfail_invalid
+ addq $16, %rsp
+ jmp .Lreturn
+
+.align 8
+.globl v3_vmx_vmlaunch
+// vm_regs = %rdi, guest_info * = %rsi, ctrl_regs = %rdx
+v3_vmx_vmlaunch:
+ pushf
+ PUSHA
+ pushq %rdi
+ pushq %rdx
+
+ movq %rsp, %rax
+ movq $VMCS_HOST_RSP, %rbx
+ vmwrite %rax, %rbx
+ jz .Lfail_valid
+ jc .Lfail_invalid
+
+ movq $v3_vmx_exit_handler, %rax
+ movq $VMCS_HOST_RIP, %rbx
+ vmwrite %rax, %rbx
+ jz .Lfail_valid
+ jc .Lfail_invalid
+
+ restore_ctrl_regs(%rdx);
+ restore_registers(%rdi);
+
+ vmlaunch
+ jz .Lfail_valid
+ jc .Lfail_invalid
+ jmp .Lreturn
+
+.Lfail_valid:
+ addq $16, %rsp
+ POPA
+ popf
+ movq $VMX_FAIL_VALID, %rax
+ jmp .Lreturn
+
+.Lfail_invalid:
+ addq $16, %rsp
+ POPA
+ popf
+ movq $VMX_FAIL_INVALID, %rax
+ jmp .Lreturn
+
+.Lvmm_failure:
+ addq $24, %rsp
+ movq $VMM_FAILURE, %rax
+ jmp .Lreturn
+
+.Lreturn:
+ sti
+ ret
+
#else
-#define r(reg) %e##reg
-
+#define save_resgisters(location) \
+ pushl %eax; \
+ movl location, %eax; \
+ movl %edi, (%eax); \
+ movl %esi, 8(%eax); \
+ movl %ebp, 16(%eax); \
+ movl $0, 24(%eax); \
+ movl %ebx, 32(%eax); \
+ movl %edx, 40(%eax); \
+ movl %ecx, 48(%eax); \
+ pushl %ebx; \
+ movl 8(%esp), %ebx; \
+ movl %ebx, 56(%eax); \
+ popl %ebx; \
+ popl %eax;
+
+
+#define restore_registers(location) \
+ pushl %eax; \
+ movl location, %eax; \
+ movl (%eax), %edi; \
+ movl 8(%eax), %esi; \
+ movl 16(%eax), %ebp; \
+ movl 32(%eax), %ebx; \
+ movl 40(%eax), %edx; \
+ movl 48(%eax), %ecx; \
+ popl %eax;
+
#define PUSHA \
push %eax; \
push %ebx; \
pop %ebx; \
pop %eax;
-#endif
-
.align 8
.globl v3_vmx_exit_handler
v3_vmx_exit_handler:
- PUSHA
- call v3_vmx_handle_exit
+ save_registers(4(%esp))
+ addl $8, %esp
POPA
+ popf
+ pushl %edi
+ call v3_handle_vmx_exit
+
+ andl %eax, %eax
+ jnz .Lvmm_failure
v3_vmx_vmresume:
+ popl %edi
+ pushf
+ PUSHA
+ pushl %edi
+ restore_registers(%rdi)
+
vmresume
- sti
+
+ addl $8, %esp
jz .Lfail_valid
jc .Lfail_invalid
jmp .Lreturn
+.align 8
.globl v3_vmx_vmlaunch
+// vm_regs = %edi
v3_vmx_vmlaunch:
- cli
+ cli
pushf
PUSHA
+ pushl %edi
+
+ movl %esp, %eax
+ movl $VMCS_HOST_RSP, %ebx
+ vmwrite %eax, %ebx
+ jz .Lfail_valid
+ jc .Lfail_invalid
- mov r(sp), r(ax)
- mov $VMCS_HOST_RSP, r(bx)
- vmwrite r(bx), r(ax)
+ movl $v3_vmx_exit_handler, %eax
+ movl $VMCS_HOST_RIP, %ebx
+ vmwrite %eax, %ebx
jz .Lfail_valid
jc .Lfail_invalid
+ restore_registers(%edi)
+
vmlaunch
- sti
jz .Lfail_valid
jc .Lfail_invalid
jmp .Lreturn
.Lfail_valid:
- mov $VMX_FAIL_VALID, r(ax)
+ addl $8, %esp
+ POPA
+ movl $VMX_FAIL_VALID, %eax
jmp .Lreturn
.Lfail_invalid:
- mov $VMX_FAIL_INVALID, r(ax)
+ addq $8, %esp
+ POPA
+ movl $MVX_FAIL_INVALID, %eax
jmp .Lreturn
-.Lreturn:
+.Lvmm_failure:
+ addq $8, %esp
POPA
+ movl $VMM_FAILURE, %eax
+ jmp .Lreturn
+
+.Lreturn:
+ sti
popf
ret
-
-
+#endif