X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?p=palacios.git;a=blobdiff_plain;f=palacios%2Finclude%2Fpalacios%2Fvmx_lowlevel.h;h=01bca797049e79a70dfa70e4e93f7b43ef94d58a;hp=e4a878f263e4108c96299378bd5f9b7dc8eb4d8d;hb=6541ca672276d841db22cc18a003303cf517ea89;hpb=2f761543eabfddd12578ed902d7139edbb37df97 diff --git a/palacios/include/palacios/vmx_lowlevel.h b/palacios/include/palacios/vmx_lowlevel.h index e4a878f..01bca79 100644 --- a/palacios/include/palacios/vmx_lowlevel.h +++ b/palacios/include/palacios/vmx_lowlevel.h @@ -22,88 +22,179 @@ #ifdef __V3VEE__ +#include #define VMX_SUCCESS 0 #define VMX_FAIL_INVALID 1 #define VMX_FAIL_VALID 2 +// vmfail macro +#define CHECK_VMXFAIL(ret_valid, ret_invalid) \ + if (ret_valid) { \ + return VMX_FAIL_VALID; \ + } else if (ret_invalid) { \ + return VMX_FAIL_INVALID; \ + } + +/* Opcode definitions for all the VM instructions */ + +#define VMCLEAR_OPCODE ".byte 0x66,0xf,0xc7;" /* reg=/6 */ +#define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3;" +#define VMPTRLD_OPCODE ".byte 0x0f,0xc7;" /* reg=/6 */ +#define VMPTRST_OPCODE ".byte 0x0f,0xc7;" /* reg=/7 */ +#define VMREAD_OPCODE ".byte 0x0f,0x78;" +#define VMWRITE_OPCODE ".byte 0x0f,0x79;" +#define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4;" +#define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7;" /* reg=/6 */ + -#define VMWRITE_OP ".byte 0x0f,0x79,0xc1;" /* [eax],[ecx] */ -#define VMREAD_OP ".byte 0x0f,0x78,0xc1;" /* [eax],[ecx] */ - -#define VMXON_OP ".byte 0xf3,0x0f,0xc7,0x30;" /* [eax] */ +/* Mod/rm definitions for intel registers/memory */ +#define EAX_ECX_MODRM ".byte 0xc1;" +// %eax with /6 reg +#define EAX_06_MODRM ".byte 0x30;" +// %eax with /7 reg +#define EAX_07_MODRM ".byte 0x38;" -static int inline v3_enable_vmx(addr_t host_state) { - int ret; + +static inline int v3_enable_vmx(addr_t vmxon_ptr) { + uint64_t vmxon_ptr_64 __attribute__((aligned(8))) = (uint64_t)vmxon_ptr; + uint8_t ret_invalid = 0; + __asm__ __volatile__ ( - VMXON_OP - "setnaeb %0;" - : "=q"(ret) - : "a"(host_state), "0"(ret) - : "memory" - ); - - if (ret) { - return -1; - } - - return 0; + VMXON_OPCODE + EAX_06_MODRM + "setnaeb %0;" // fail invalid (CF=1) + : "=q"(ret_invalid) + : "a"(&vmxon_ptr_64),"0"(ret_invalid) + : "memory"); + + if (ret_invalid) { + return VMX_FAIL_INVALID; + } else { + return VMX_SUCCESS; + } } +static inline int vmcs_clear(addr_t vmcs_ptr) { + uint64_t vmcs_ptr_64 __attribute__ ((aligned(8))) = (uint64_t)vmcs_ptr; + uint8_t ret_valid = 0; + uint8_t ret_invalid = 0; + __asm__ __volatile__ ( + VMCLEAR_OPCODE + EAX_06_MODRM + "seteb %0;" // fail valid (ZF=1) + "setnaeb %1;" // fail invalid (CF=1) + : "=q"(ret_valid), "=q"(ret_invalid) + : "a"(&vmcs_ptr_64), "0"(ret_valid), "1"(ret_invalid) + : "memory"); + + CHECK_VMXFAIL(ret_valid, ret_invalid); + + return VMX_SUCCESS; +} +static inline int vmcs_load(addr_t vmcs_ptr) { + uint64_t vmcs_ptr_64 = (uint64_t)vmcs_ptr; + uint8_t ret_valid = 0; + uint8_t ret_invalid = 0; + + __asm__ __volatile__ ( + VMPTRLD_OPCODE + EAX_06_MODRM + "seteb %0;" // fail valid (ZF=1) + "setnaeb %1;" // fail invalid (CF=1) + : "=q"(ret_valid), "=q"(ret_invalid) + : "a"(&vmcs_ptr_64), "0"(ret_valid), "1"(ret_invalid) + : "memory"); + + CHECK_VMXFAIL(ret_valid, ret_invalid); + return VMX_SUCCESS; +} -static int inline vmcs_write(addr_t vmcs_index, addr_t value) { - int ret_valid = 0; - int ret_invalid = 0; +static inline int vmcs_store(addr_t vmcs_ptr) { + uint64_t vmcs_ptr_64 = (uint64_t)vmcs_ptr; __asm__ __volatile__ ( - VMWRITE_OP - "seteb %0;" // fail valid (ZF=1) - "setnaeb %1;" // fail invalid (CF=1) - : "=q"(ret_valid), "=q"(ret_invalid) - : "a"(vmcs_index), "c"(&value), "0"(ret_valid), "1"(ret_invalid) - : "memory" - ); - - if (ret_valid) { - return VMX_FAIL_VALID; - } else if (ret_invalid) { - return VMX_FAIL_INVALID; - } + VMPTRST_OPCODE + EAX_07_MODRM + : + : "a"(&vmcs_ptr_64) + : "memory"); return VMX_SUCCESS; } +static inline int vmcs_read(vmcs_field_t vmcs_field, void * dst) { + addr_t val = 0; + uint8_t ret_valid = 0; + uint8_t ret_invalid = 0; + + __asm__ __volatile__ ( + VMREAD_OPCODE + EAX_ECX_MODRM + "seteb %1;" // fail valid + "setnaeb %1;" // fail invalid + : "=c"(val), "=d"(ret_valid) //, "=r"(ret_invalid) // Use ECX + : "a" (vmcs_field), "0"(0), "1"(ret_valid) + : "memory" + ); + + CHECK_VMXFAIL(ret_valid, ret_invalid); + + switch(v3_vmcs_get_field_len(vmcs_field)) + { + case 2: + *((uint16_t*)dst) = (uint16_t)val; + break; + case 4: + *((uint32_t*)dst) = (uint32_t)val; + break; + case 8: + *((uint64_t*)dst) = (uint64_t)val; + break; + } -static int inline vmcs_read(addr_t vmcs_index, void * dst, int len) { - addr_t val = 0; - int ret_valid = 0; - int ret_invalid = 0; + return VMX_SUCCESS; +} + +static inline int vmcs_write(vmcs_field_t vmcs_field, addr_t value) { + uint8_t ret_valid = 0; + uint8_t ret_invalid = 0; __asm__ __volatile__ ( - VMREAD_OP - "seteb %0;" // fail valid (ZF=1) - "setnaeb %1;" // fail invalid (CF=1) - : "=q"(ret_valid), "=q"(ret_invalid), "=c"(val) - : "a"(vmcs_index), "0"(ret_valid), "1"(ret_invalid) - : "memory" - ); - - if (ret_valid) { - return VMX_FAIL_VALID; - } else if (ret_invalid) { - return VMX_FAIL_INVALID; - } + VMWRITE_OPCODE + EAX_ECX_MODRM + "seteb %0;" // fail valid (ZF=1) + "setnaeb %1;" // fail invalid (CF=1) + : "=q" (ret_valid), "=q" (ret_invalid) + : "a" (vmcs_field), "c"(value) + : "memory"); + + CHECK_VMXFAIL(ret_valid, ret_invalid); return VMX_SUCCESS; } +static inline int vmx_off() { + uint8_t ret_valid = 0; + uint8_t ret_invalid = 0; + + __asm__ __volatile__ ( + VMXOFF_OPCODE + "seteb %0;" + "setnaeb %1;" + : "=q"(ret_valid), "=q"(ret_invalid) + : "0"(ret_valid), "1"(ret_invalid) + : "memory"); + CHECK_VMXFAIL(ret_valid, ret_invalid); + return VMX_SUCCESS; +} #endif