Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


bug fix for 32 bit ebp cloberring
[palacios.git] / palacios / src / palacios / svm_lowlevel.S
index 3a17df4..45c5575 100644 (file)
@@ -1,6 +1,5 @@
 #;  -*- fundamental -*-
 
-
 .text
 .align 4
 
 #define clgi   .byte 0x0F,0x01,0xDD
 
 
+#define SVM_VM_HSAVE_PA_MSR .dword 0xc0010117
+
+
 #ifdef __V3_32BIT__
 
+// Note that RAX is saved in the VMCB, so we don't touch it here
+
 #define Save_SVM_Registers(location)   \
        pushl   %eax;                   \
        movl    location, %eax;         \
        movl    %ebx, 32(%eax);         \
        movl    %edx, 40(%eax);         \
        movl    %ecx, 48(%eax);         \
-       pushl   %ebx;                   \
-       movl    4(%esp), %ebx;          \
-       movl    %ebx, 56(%eax);         \
-       popl    %ebx;                   \
        popl    %eax;                   
        
 
        popl    %eax;
 
 
+// 32 bit GCC passes arguments via stack
+
 v3_svm_launch:
        push    %ebp;
        movl    %esp, %ebp;
        pushf;
-       push    %fs;
-       push    %gs;
        pusha;
 
+       movl    16(%ebp), %eax;
+       vmsave;
+
        pushl   12(%ebp);
        pushl   8(%ebp);
 
@@ -72,9 +75,14 @@ v3_svm_launch:
        addl    $4, %esp;
 
        popa;
-       pop     %gs;
-       pop     %fs;
        popf;
+
+       movl    16(%ebp), %eax;
+       vmload;
+
+       // We don't detect failures here, so just return 0
+       xorl    %eax, %eax 
+
        pop     %ebp;
        ret
 
@@ -82,38 +90,113 @@ v3_svm_launch:
 
 #elif __V3_64BIT__
 
+// Note that RAX is saved in the VMCB, so we don't touch it here
+
 #define Save_SVM_Registers(location)   \
-       pushl   %eax;                   \
-       movl    location, %eax;         \
-       movl    %edi, (%eax);           \
-       movl    %esi, 8(%eax);          \
-       movl    %ebp, 16(%eax);         \
-       movl    $0, 24(%eax);           \
-       movl    %ebx, 32(%eax);         \
-       movl    %edx, 40(%eax);         \
-       movl    %ecx, 48(%eax);         \
-       pushl   %ebx;                   \
-       movl    4(%esp), %ebx;          \
-       movl    %ebx, 56(%eax);         \
-       popl    %ebx;                   \
-       popl    %eax;                   
+       pushq   %rax;                   \
+       movq    location, %rax;         \
+       movq    %rdi, (%rax);           \
+       movq    %rsi, 8(%rax);          \
+       movq    %rbp, 16(%rax);         \
+       movq    $0, 24(%rax);           \
+       movq    %rbx, 32(%rax);         \
+       movq    %rdx, 40(%rax);         \
+       movq    %rcx, 48(%rax);         \
+                                       \
+       movq    %r8, 64(%rax);          \
+       movq    %r9, 72(%rax);          \
+       movq    %r10, 80(%rax);         \
+       movq    %r11, 88(%rax);         \
+       movq    %r12, 96(%rax);         \
+       movq    %r13, 104(%rax);        \
+       movq    %r14, 112(%rax);        \
+       movq    %r15, 120(%rax);        \
+       popq    %rax;                   
        
 
 #define Restore_SVM_Registers(location) \
-       pushl   %eax;                   \
-       movl    location, %eax;         \
-       movl    (%eax), %edi;           \
-       movl    8(%eax), %esi;          \
-       movl    16(%eax), %ebp;         \
-       movl    32(%eax), %ebx;         \
-       movl    40(%eax), %edx;         \
-       movl    48(%eax), %ecx;         \
-       popl    %eax;
+       push    %rax;                   \
+       mov     location, %rax;         \
+       mov     (%rax), %rdi;           \
+       mov     8(%rax), %rsi;          \
+       mov     16(%rax), %rbp;         \
+       mov     32(%rax), %rbx;         \
+       mov     40(%rax), %rdx;         \
+       mov     48(%rax), %rcx;         \
+                                       \
+       mov     64(%rax), %r8;          \
+       mov     72(%rax), %r9;          \
+       mov     80(%rax), %r10;         \
+       mov     88(%rax), %r11;         \
+       mov     96(%rax), %r12;         \
+       mov     104(%rax), %r13;        \
+       mov     112(%rax), %r14;        \
+       mov     120(%rax), %r15;        \
+       pop     %rax;
+
+
+
+
+#define PUSHA                          \
+       pushq %rbp;                     \
+       pushq %rbx;                     \
+       pushq %r8;                      \
+       pushq %r9;                      \
+       pushq %r10;                     \
+       pushq %r11;                     \
+       pushq %r12;                     \
+       pushq %r13;                     \
+       pushq %r14;                     \
+       pushq %r15;                     
+
+
+#define POPA                           \
+       popq %r15;                      \
+       popq %r14;                      \
+       popq %r13;                      \
+       popq %r12;                      \
+       popq %r11;                      \
+       popq %r10;                      \
+       popq %r9;                       \
+       popq %r8;                       \
+       popq %rbx;                      \
+       popq %rbp;                      
+
+
+// Note that this is only for 64 bit GCC, 32 bit GCC passes via stack
+// VMCB => RDI
+// vm_regs => RSI
+// HOST VMCB => RDX
 
+v3_svm_launch:
+       pushf;
+       PUSHA
+       
+       pushq   %rdx;
+       movq    %rdx, %rax;
+       vmsave;
 
+       pushq   %rsi
 
-v3_svm_launch:
+       movq    %rdi, %rax
+       Restore_SVM_Registers(%rsi);
 
+
+       vmload;
+       vmrun;
+       vmsave;
+
+
+       Save_SVM_Registers(8(%rsp));
+
+       addq $8, %rsp
+
+
+       popq %rax;
+       vmload;
+
+       POPA
+       popf;
        ret