Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Rewrote /include/vmx_lowlevel.h to include all the possible asm VMX instructions...
Andy Gocke [Tue, 30 Jun 2009 17:14:12 +0000 (12:14 -0500)]
palacios/include/palacios/vmx_lowlevel.h

index e4a878f..c4ee91f 100644 (file)
 #define VMX_FAIL_INVALID    1
 #define VMX_FAIL_VALID      2
 
+// vmfail macro
+#define CHECK_VMXFAIL(ret_valid, ret_invalid)  \
+    if (ret_valid) {                           \
+        return VMX_FAIL_VALID;                 \
+    } else if (ret_invalid) {                  \
+        return VMX_FAIL_INVALID;               \
+    }
+
+/* Opcode definitions for all the VM instructions */
+
+#define VMCLEAR_OPCODE  ".byte 0x66,0xf,0x67;" /* reg=/6 */
+#define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3;"
+#define VMPTRLD_OPCODE  ".byte 0x0f,0xc7;" /* reg=/6 */
+#define VMPTRST_OPCODE  ".byte 0x0f,0xc7;" /* reg=/7 */
+#define VMREAD_OPCODE   ".byte 0x0f,0x78;"
+#define VMWRITE_OPCODE  ".byte 0x0f,0x79;"
+#define VMXOFF_OPCODE   ".byte 0x0f,0x01,0xc4;"
+#define VMXON_OPCODE    ".byte 0xf3,0x0f,0xc7;" /* reg=/6 */
+
+
+/* Mod/rm definitions for intel registers/memory */
+#define EAX_ECX_MODRM   ".byte 0xc1;"
+// %eax with /6 reg
+#define EAX_06_MODRM    ".byte 0x30;"
+// %eax with /7 reg
+#define EAX_07_MODRM    ".byte 0x38;"
 
-#define VMWRITE_OP  ".byte 0x0f,0x79,0xc1;"           /* [eax],[ecx] */
-#define VMREAD_OP   ".byte 0x0f,0x78,0xc1;"           /* [eax],[ecx] */
-#define VMXON_OP    ".byte 0xf3,0x0f,0xc7,0x30;"         /*  [eax] */
 
 
-static int inline v3_enable_vmx(addr_t host_state) {
-    int ret;
+static inline int v3_enable_vmx(uint64_t host_state) {
+    uint8_t ret_invalid = 0;
+
     __asm__ __volatile__ (
-                         VMXON_OP
-                         "setnaeb %0;"
-                         : "=q"(ret)
-                         : "a"(host_state), "0"(ret)
-                         : "memory"
-                         );
-
-    if (ret) {
-       return -1;
-    } 
-
-    return 0;
+                VMXON_OPCODE
+                EAX_06_MODRM
+                "setnaeb %0;" // fail invalid (CF=1)
+                : "=q"(ret_invalid)
+                : "a"(&host_state),"0"(ret_invalid)
+                : "memory");
+
+    if (ret_invalid) {
+        return VMX_FAIL_INVALID;
+    } else {
+        return VMX_SUCCESS;
+    }
 }
 
+// No vmcall necessary - is only executed by the guest
 
+static inline int vmcs_clear(uint64_t addr) {
+    uint8_t ret_valid = 0;
+    uint8_t ret_invalid = 0;
 
+    __asm__ __volatile__ (
+            VMCLEAR_OPCODE
+            EAX_06_MODRM
+            "seteb %0;" // fail valid (ZF=1)
+            "setnaeb %1;" // fail invalid (CF=1)
+            : "=q"(ret_valid), "=q"(ret_invalid)
+            : "a"(&addr), "0"(ret_valid), "1"(ret_invalid)
+            : "memory");
+
+    CHECK_VMXFAIL(ret_valid, ret_invalid);
+  
+    return VMX_SUCCESS;
+}
 
 
-static int inline vmcs_write(addr_t vmcs_index, addr_t value) {
-    int ret_valid = 0;
-    int ret_invalid = 0;
+static inline int vmcs_resume() {
+    uint8_t ret_valid = 0;
+    uint8_t ret_invalid = 0;
 
     __asm__ __volatile__ (
-                         VMWRITE_OP
-                         "seteb %0;"  // fail valid    (ZF=1)
-                         "setnaeb %1;" // fail invalid (CF=1)
-                         : "=q"(ret_valid), "=q"(ret_invalid)
-                         : "a"(vmcs_index), "c"(&value), "0"(ret_valid), "1"(ret_invalid)
-                         : "memory"
-                         );
-
-    if (ret_valid) {
-       return VMX_FAIL_VALID;
-    } else if (ret_invalid) {
-       return VMX_FAIL_INVALID;
-    }
+                VMRESUME_OPCODE
+                "seteb %0;"
+                "setnaeb %1;"
+                : "=q"(ret_valid), "=q"(ret_invalid)
+                : "0"(ret_valid), "1"(ret_invalid)
+                : "memory");
+
+    CHECK_VMXFAIL(ret_valid, ret_invalid);
 
     return VMX_SUCCESS;
 }
 
 
+static inline int vmcs_load(vmcs_t * vmcs_ptr) {
+    uint64_t addr = (uint64_t)vmcs_ptr;
+    uint8_t ret_valid = 0;
+    uint8_t ret_invalid = 0;
+    
+    __asm__ __volatile__ (
+                VMPTRLD_OPCODE
+                EAX_06_MODRM
+                "seteb %0;" // fail valid (ZF=1)
+                "setnaeb %1;"  // fail invalid (CF=1)
+                : "=q"(ret_valid), "=q"(ret_invalid)
+                : "a"(&addr), "0"(ret_valid), "1"(ret_invalid)
+                : "memory");
+    
+    CHECK_VMXFAIL(ret_valid, ret_invalid);
 
-static int inline vmcs_read(addr_t vmcs_index,  void * dst, int len) {
+    return VMX_SUCCESS;
+}
+
+static inline int vmcs_store(vmcs_t * vmcs_ptr) {
+    uint64_t addr = (uint64_t)vmcs_ptr;
+
+    __asm__ __volatile__ (
+               VMPTRSRT_OPCODE
+               EAX_07_MODRM
+               :
+               : "a"(&addr)
+               : "memory");
+
+    return VMX_SUCCESS;
+}
+
+/* According to Intel, vmread will return an architecure sized type - be sure that
+ * dst is at least 64-bits in IA-32e and 32 otherwise */
+static inline int vmcs_read(addr_t vmcs_index, void * dst) {
     addr_t val = 0;
-    int ret_valid = 0;
-    int ret_invalid = 0;
+    uint8_t ret_valid = 0;
+    uint8_t ret_invalid = 0;
+
+    __asm__ __volatile__ (  
+                VMREAD_OPCODE
+                EAX_ECX_MODRM
+                "seteb %0;" // fail valid
+                "setnaeb %1;" // fail invalid
+                : "=q"(ret_valid), "=q"(ret_invalid), "=c"(val) // Use ECX
+                : "a" (vmcs_index), "0"(ret_valid), "1"(ret_invalid)
+                : "memory"
+                );
+
+    CHECK_VMXFAIL(ret_valid, ret_invalid);
+
+    // TODO: Fix this, will have to do a cast because dst will be variable length
+    *dst = val;
+
+    return VMX_SUCCESS;
+}
+
+static inline int vmcs_write(addr_t vmcs_index, addr_t value) {
+    uint8_t ret_valid = 0;
+    uint8_t ret_invalid = 0;
 
     __asm__ __volatile__ (
-                         VMREAD_OP
-                         "seteb %0;"  // fail valid    (ZF=1)
-                         "setnaeb %1;" // fail invalid (CF=1)
-                         : "=q"(ret_valid), "=q"(ret_invalid), "=c"(val)
-                         : "a"(vmcs_index), "0"(ret_valid), "1"(ret_invalid)
-                         : "memory"
-                         );
-
-    if (ret_valid) {
-       return VMX_FAIL_VALID;
-    } else if (ret_invalid) {
-       return VMX_FAIL_INVALID;
-    }
+                VMWRITE_OPCODE
+                EAX_ECX_MODRM
+                "seteb %0;" // fail valid (ZF=1)
+                "setnaeb %1;" // fail invalid (CF=1)
+                : "=q" (ret_valid), "=q" (ret_invalid)
+                : "a" (vmcs_index), "c"(value), "0"(ret_valid), "1"(ret_invalid)
+                : "memory");
+
+    CHECK_VMXFAIL(ret_valid, ret_invalid);
 
     return VMX_SUCCESS;
 }
 
+static inline int vmx_off() {
+    uint8_t ret_valid = 0;
+    uint8_t ret_invalid = 0;
 
+    __asm__ __volatile__ (
+                VMXOFF_OPCODE
+                "seteb %0;"
+                "setnaeb %1;"
+                : "=q"(ret_valid), "=q"(ret_invalid)
+                : "0"(ret_valid), "1"(ret_invalid)
+                : "memory");
+
+    CHECK_VMXFAIL(ret_valid, ret_invalid);
 
+    return VMX_SUCCESS;
+}
 
 #endif