2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #ifndef __VMX_LOWLEVEL_H__
21 #define __VMX_LOWLEVEL_H__
27 #define VMX_FAIL_INVALID 1
28 #define VMX_FAIL_VALID 2
31 #define CHECK_VMXFAIL(ret_valid, ret_invalid) \
33 return VMX_FAIL_VALID; \
34 } else if (ret_invalid) { \
35 return VMX_FAIL_INVALID; \
38 /* Opcode definitions for all the VM instructions */
40 #define VMCLEAR_OPCODE ".byte 0x66,0xf,0x67;" /* reg=/6 */
41 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3;"
42 #define VMPTRLD_OPCODE ".byte 0x0f,0xc7;" /* reg=/6 */
43 #define VMPTRST_OPCODE ".byte 0x0f,0xc7;" /* reg=/7 */
44 #define VMREAD_OPCODE ".byte 0x0f,0x78;"
45 #define VMWRITE_OPCODE ".byte 0x0f,0x79;"
46 #define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4;"
47 #define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7;" /* reg=/6 */
50 /* Mod/rm definitions for intel registers/memory */
51 #define EAX_ECX_MODRM ".byte 0xc1;"
53 #define EAX_06_MODRM ".byte 0x30;"
55 #define EAX_07_MODRM ".byte 0x38;"
59 static inline int v3_enable_vmx(struct vmcs_data * vmxon_ptr) {
60 uint64_t vmxon_ptr_64 __attribute__((aligned(8))) = (uint64_t)vmxon_ptr;
61 uint8_t ret_invalid = 0;
63 __asm__ __volatile__ (
66 "setnaeb %0;" // fail invalid (CF=1)
68 : "a"(&vmxon_ptr_64),"0"(ret_invalid)
72 return VMX_FAIL_INVALID;
78 // No vmcall necessary - is only executed by the guest
80 static inline int vmcs_clear(struct vmcs_data* vmcs_ptr) {
81 uint64_t vmcs_ptr_64 = (uint64_t)vmcs_ptr;
82 uint8_t ret_valid = 0;
83 uint8_t ret_invalid = 0;
85 __asm__ __volatile__ (
88 "seteb %0;" // fail valid (ZF=1)
89 "setnaeb %1;" // fail invalid (CF=1)
90 : "=q"(ret_valid), "=q"(ret_invalid)
91 : "a"(&vmcs_ptr_64), "0"(ret_valid), "1"(ret_invalid)
94 CHECK_VMXFAIL(ret_valid, ret_invalid);
100 static inline int vmcs_resume() {
101 uint8_t ret_valid = 0;
102 uint8_t ret_invalid = 0;
104 __asm__ __volatile__ (
108 : "=q"(ret_valid), "=q"(ret_invalid)
109 : "0"(ret_valid), "1"(ret_invalid)
112 CHECK_VMXFAIL(ret_valid, ret_invalid);
118 static inline int vmcs_load(struct vmcs_data* vmcs_ptr) {
119 uint64_t vmcs_ptr_64 = (uint64_t)vmcs_ptr;
120 uint8_t ret_valid = 0;
121 uint8_t ret_invalid = 0;
123 __asm__ __volatile__ (
126 "seteb %0;" // fail valid (ZF=1)
127 "setnaeb %1;" // fail invalid (CF=1)
128 : "=q"(ret_valid), "=q"(ret_invalid)
129 : "a"(&vmcs_ptr_64), "0"(ret_valid), "1"(ret_invalid)
132 CHECK_VMXFAIL(ret_valid, ret_invalid);
137 static inline int vmcs_store(struct vmcs_data* vmcs_ptr) {
138 uint64_t vmcs_ptr_64 = (uint64_t)vmcs_ptr;
140 __asm__ __volatile__ (
150 static inline int vmcs_read(addr_t vmcs_index, void * dst, int len) {
152 uint8_t ret_valid = 0;
153 uint8_t ret_invalid = 0;
155 __asm__ __volatile__ (
158 "seteb %0;" // fail valid
159 "setnaeb %1;" // fail invalid
160 : "=q"(ret_valid), "=q"(ret_invalid), "=c"(val) // Use ECX
161 : "a" (vmcs_index), "0"(ret_valid), "1"(ret_invalid)
165 CHECK_VMXFAIL(ret_valid, ret_invalid);
167 // TODO: Fix this, will have to do a cast because dst will be variable length
171 *((uint16_t*)dst) = (uint16_t)val;
174 *((uint32_t*)dst) = (uint32_t)val;
177 *((uint64_t*)dst) = (uint64_t)val;
185 static inline int vmcs_write(addr_t vmcs_index, addr_t value) {
186 uint8_t ret_valid = 0;
187 uint8_t ret_invalid = 0;
189 __asm__ __volatile__ (
192 "seteb %0;" // fail valid (ZF=1)
193 "setnaeb %1;" // fail invalid (CF=1)
194 : "=q" (ret_valid), "=q" (ret_invalid)
195 : "a" (vmcs_index), "c"(value), "0"(ret_valid), "1"(ret_invalid)
198 CHECK_VMXFAIL(ret_valid, ret_invalid);
203 static inline int vmx_off() {
204 uint8_t ret_valid = 0;
205 uint8_t ret_invalid = 0;
207 __asm__ __volatile__ (
211 : "=q"(ret_valid), "=q"(ret_invalid)
212 : "0"(ret_valid), "1"(ret_invalid)
215 CHECK_VMXFAIL(ret_valid, ret_invalid);