#ifdef __V3VEE__
+#include <palacios/vmcs.h>
#define VMX_SUCCESS 0
#define VMX_FAIL_INVALID 1
/* Opcode definitions for all the VM instructions */
-#define VMCLEAR_OPCODE ".byte 0x66,0xf,0x67;" /* reg=/6 */
+#define VMCLEAR_OPCODE ".byte 0x66,0xf,0xc7;" /* reg=/6 */
#define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3;"
#define VMPTRLD_OPCODE ".byte 0x0f,0xc7;" /* reg=/6 */
#define VMPTRST_OPCODE ".byte 0x0f,0xc7;" /* reg=/7 */
-static inline int v3_enable_vmx(uint64_t host_state) {
- uint8_t ret_invalid = 0;
-
- __asm__ __volatile__ (
- VMXON_OPCODE
- EAX_06_MODRM
- "setnaeb %0;" // fail invalid (CF=1)
- : "=q"(ret_invalid)
- : "a"(&host_state),"0"(ret_invalid)
- : "memory");
-
- if (ret_invalid) {
- return VMX_FAIL_INVALID;
- } else {
- return VMX_SUCCESS;
- }
-}
-// No vmcall necessary - is only executed by the guest
-static inline int vmcs_clear(uint64_t addr) {
+static inline int vmcs_clear(addr_t vmcs_ptr) {
+ uint64_t vmcs_ptr_64 __attribute__ ((aligned(8))) = (uint64_t)vmcs_ptr;
uint8_t ret_valid = 0;
uint8_t ret_invalid = 0;
"seteb %0;" // fail valid (ZF=1)
"setnaeb %1;" // fail invalid (CF=1)
: "=q"(ret_valid), "=q"(ret_invalid)
- : "a"(&addr), "0"(ret_valid), "1"(ret_invalid)
+ : "a"(&vmcs_ptr_64), "0"(ret_valid), "1"(ret_invalid)
: "memory");
CHECK_VMXFAIL(ret_valid, ret_invalid);
return VMX_SUCCESS;
}
-
-static inline int vmcs_resume() {
- uint8_t ret_valid = 0;
- uint8_t ret_invalid = 0;
-
- __asm__ __volatile__ (
- VMRESUME_OPCODE
- "seteb %0;"
- "setnaeb %1;"
- : "=q"(ret_valid), "=q"(ret_invalid)
- : "0"(ret_valid), "1"(ret_invalid)
- : "memory");
-
- CHECK_VMXFAIL(ret_valid, ret_invalid);
-
- return VMX_SUCCESS;
-}
-
-
-static inline int vmcs_load(vmcs_t * vmcs_ptr) {
- uint64_t addr = (uint64_t)vmcs_ptr;
+static inline int vmcs_load(addr_t vmcs_ptr) {
+ uint64_t vmcs_ptr_64 = (uint64_t)vmcs_ptr;
uint8_t ret_valid = 0;
uint8_t ret_invalid = 0;
"seteb %0;" // fail valid (ZF=1)
"setnaeb %1;" // fail invalid (CF=1)
: "=q"(ret_valid), "=q"(ret_invalid)
- : "a"(&addr), "0"(ret_valid), "1"(ret_invalid)
+ : "a"(&vmcs_ptr_64), "0"(ret_valid), "1"(ret_invalid)
: "memory");
CHECK_VMXFAIL(ret_valid, ret_invalid);
return VMX_SUCCESS;
}
-static inline int vmcs_store(vmcs_t * vmcs_ptr) {
- uint64_t addr = (uint64_t)vmcs_ptr;
+static inline uint64_t vmcs_store() {
+ uint64_t vmcs_ptr = 0;
__asm__ __volatile__ (
- VMPTRSRT_OPCODE
+ VMPTRST_OPCODE
EAX_07_MODRM
:
- : "a"(&addr)
+ : "a"(&vmcs_ptr)
: "memory");
- return VMX_SUCCESS;
+ return vmcs_ptr;
}
-/* According to Intel, vmread will return an architecure sized type - be sure that
- * dst is at least 64-bits in IA-32e and 32 otherwise */
-static inline int vmcs_read(addr_t vmcs_index, void * dst, int len) {
+static inline int vmcs_read(vmcs_field_t vmcs_field, void * dst) {
addr_t val = 0;
uint8_t ret_valid = 0;
uint8_t ret_invalid = 0;
__asm__ __volatile__ (
VMREAD_OPCODE
EAX_ECX_MODRM
- "seteb %0;" // fail valid
+ "seteb %1;" // fail valid
"setnaeb %1;" // fail invalid
- : "=q"(ret_valid), "=q"(ret_invalid), "=c"(val) // Use ECX
- : "a" (vmcs_index), "0"(ret_valid), "1"(ret_invalid)
+ : "=c"(val), "=d"(ret_valid) //, "=r"(ret_invalid) // Use ECX
+ : "a" (vmcs_field), "0"(0), "1"(ret_valid)
: "memory"
);
CHECK_VMXFAIL(ret_valid, ret_invalid);
- // TODO: Fix this, will have to do a cast because dst will be variable length
- *dst = val;
+ switch(v3_vmcs_get_field_len(vmcs_field))
+ {
+ case 2:
+ *((uint16_t*)dst) = (uint16_t)val;
+ break;
+ case 4:
+ *((uint32_t*)dst) = (uint32_t)val;
+ break;
+ case 8:
+ *((uint64_t*)dst) = (uint64_t)val;
+ break;
+ default:
+ return -1;
+ }
+
return VMX_SUCCESS;
}
-static inline int vmcs_write(addr_t vmcs_index, addr_t value) {
+static inline int vmcs_write(vmcs_field_t vmcs_field, addr_t value) {
uint8_t ret_valid = 0;
uint8_t ret_invalid = 0;
"seteb %0;" // fail valid (ZF=1)
"setnaeb %1;" // fail invalid (CF=1)
: "=q" (ret_valid), "=q" (ret_invalid)
- : "a" (vmcs_index), "c"(value), "0"(ret_valid), "1"(ret_invalid)
+ : "a" (vmcs_field), "c"(value)
: "memory");
CHECK_VMXFAIL(ret_valid, ret_invalid);
return VMX_SUCCESS;
}
+
+static inline int vmx_on(addr_t vmxon_ptr) {
+ uint64_t vmxon_ptr_64 __attribute__((aligned(8))) = (uint64_t)vmxon_ptr;
+ uint8_t ret_invalid = 0;
+
+ __asm__ __volatile__ (
+ VMXON_OPCODE
+ EAX_06_MODRM
+ "setnaeb %0;" // fail invalid (CF=1)
+ : "=q"(ret_invalid)
+ : "a"(&vmxon_ptr_64),"0"(ret_invalid)
+ : "memory");
+
+ if (ret_invalid) {
+ return VMX_FAIL_INVALID;
+ } else {
+ return VMX_SUCCESS;
+ }
+}
+
static inline int vmx_off() {
uint8_t ret_valid = 0;
uint8_t ret_invalid = 0;
return VMX_SUCCESS;
}
+
+static inline int enable_vmx() {
+#ifdef __V3_64BIT__
+ __asm__ __volatile__ (
+ "movq %%cr4, %%rcx;"
+ "orq $0x00002000, %%rcx;"
+ "movq %%rcx, %%cr4;"
+ :
+ :
+ : "%rcx"
+ );
+
+
+ __asm__ __volatile__ (
+ "movq %%cr0, %%rcx; "
+ "orq $0x00000020,%%rcx; "
+ "movq %%rcx, %%cr0;"
+ :
+ :
+ : "%rcx"
+ );
+#elif __V3_32BIT__
+ __asm__ __volatile__ (
+ "movl %%cr4, %%ecx;"
+ "orl $0x00002000, %%ecx;"
+ "movl %%ecx, %%cr4;"
+ :
+ :
+ : "%ecx"
+ );
+
+
+
+ __asm__ __volatile__ (
+ "movl %%cr0, %%ecx; "
+ "orl $0x00000020,%%ecx; "
+ "movl %%ecx, %%cr0;"
+ :
+ :
+ : "%ecx"
+ );
+
+#endif
+
+ return 0;
+}
+
+
+
+
+
#endif
#endif