Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


added alignment to vmxon pointer
[palacios.git] / palacios / include / palacios / vmx_lowlevel.h
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #ifndef __VMX_LOWLEVEL_H__
21 #define __VMX_LOWLEVEL_H__
22
23 #ifdef __V3VEE__
24
25
26 #define VMX_SUCCESS         0 
27 #define VMX_FAIL_INVALID    1
28 #define VMX_FAIL_VALID      2
29
30 // vmfail macro
31 #define CHECK_VMXFAIL(ret_valid, ret_invalid)   \
32     if (ret_valid) {                            \
33         return VMX_FAIL_VALID;                  \
34     } else if (ret_invalid) {                   \
35         return VMX_FAIL_INVALID;                \
36     }
37
38 /* Opcode definitions for all the VM instructions */
39
40 #define VMCLEAR_OPCODE  ".byte 0x66,0xf,0x67;" /* reg=/6 */
41 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3;"
42 #define VMPTRLD_OPCODE  ".byte 0x0f,0xc7;" /* reg=/6 */
43 #define VMPTRST_OPCODE  ".byte 0x0f,0xc7;" /* reg=/7 */
44 #define VMREAD_OPCODE   ".byte 0x0f,0x78;"
45 #define VMWRITE_OPCODE  ".byte 0x0f,0x79;"
46 #define VMXOFF_OPCODE   ".byte 0x0f,0x01,0xc4;"
47 #define VMXON_OPCODE    ".byte 0xf3,0x0f,0xc7;" /* reg=/6 */
48
49
50 /* Mod/rm definitions for intel registers/memory */
51 #define EAX_ECX_MODRM   ".byte 0xc1;"
52 // %eax with /6 reg
53 #define EAX_06_MODRM    ".byte 0x30;"
54 // %eax with /7 reg
55 #define EAX_07_MODRM    ".byte 0x38;"
56
57
58
59 static inline int v3_enable_vmx(struct vmcs_data * vmxon_ptr) {
60     uint64_t vmxon_ptr_64 __attribute__((aligned(8))) = (uint64_t)vmxon_ptr;
61     uint8_t ret_invalid = 0;
62
63     __asm__ __volatile__ (
64                 VMXON_OPCODE
65                 EAX_06_MODRM
66                 "setnaeb %0;" // fail invalid (CF=1)
67                 : "=q"(ret_invalid)
68                 : "a"(&vmxon_ptr_64),"0"(ret_invalid)
69                 : "memory");
70
71     if (ret_invalid) {
72         return VMX_FAIL_INVALID;
73     } else {
74         return VMX_SUCCESS;
75     }
76 }
77
78 // No vmcall necessary - is only executed by the guest
79
80 static inline int vmcs_clear(struct vmcs_data* vmcs_ptr) {
81     uint64_t vmcs_ptr_64 = (uint64_t)vmcs_ptr;
82     uint8_t ret_valid = 0;
83     uint8_t ret_invalid = 0;
84
85     __asm__ __volatile__ (
86             VMCLEAR_OPCODE
87             EAX_06_MODRM
88             "seteb %0;" // fail valid (ZF=1)
89             "setnaeb %1;" // fail invalid (CF=1)
90             : "=q"(ret_valid), "=q"(ret_invalid)
91             : "a"(&vmcs_ptr_64), "0"(ret_valid), "1"(ret_invalid)
92             : "memory");
93
94     CHECK_VMXFAIL(ret_valid, ret_invalid);
95   
96     return VMX_SUCCESS;
97 }
98
99
100 static inline int vmcs_resume() {
101     uint8_t ret_valid = 0;
102     uint8_t ret_invalid = 0;
103
104     __asm__ __volatile__ (
105                 VMRESUME_OPCODE
106                 "seteb %0;"
107                 "setnaeb %1;"
108                 : "=q"(ret_valid), "=q"(ret_invalid)
109                 : "0"(ret_valid), "1"(ret_invalid)
110                 : "memory");
111
112     CHECK_VMXFAIL(ret_valid, ret_invalid);
113
114     return VMX_SUCCESS;
115 }
116
117
118 static inline int vmcs_load(struct vmcs_data* vmcs_ptr) {
119     uint64_t vmcs_ptr_64 = (uint64_t)vmcs_ptr;
120     uint8_t ret_valid = 0;
121     uint8_t ret_invalid = 0;
122     
123     __asm__ __volatile__ (
124                 VMPTRLD_OPCODE
125                 EAX_06_MODRM
126                 "seteb %0;" // fail valid (ZF=1)
127                 "setnaeb %1;"  // fail invalid (CF=1)
128                 : "=q"(ret_valid), "=q"(ret_invalid)
129                 : "a"(&vmcs_ptr_64), "0"(ret_valid), "1"(ret_invalid)
130                 : "memory");
131     
132     CHECK_VMXFAIL(ret_valid, ret_invalid);
133
134     return VMX_SUCCESS;
135 }
136
137 static inline int vmcs_store(struct vmcs_data* vmcs_ptr) {
138     uint64_t vmcs_ptr_64 = (uint64_t)vmcs_ptr;
139
140     __asm__ __volatile__ (
141                VMPTRST_OPCODE
142                EAX_07_MODRM
143                :
144                : "a"(&vmcs_ptr_64)
145                : "memory");
146
147     return VMX_SUCCESS;
148 }
149
150 static inline int vmcs_read(addr_t vmcs_index, void * dst, int len) {
151     uint64_t val = 0;
152     uint8_t ret_valid = 0;
153     uint8_t ret_invalid = 0;
154
155     __asm__ __volatile__ (  
156                 VMREAD_OPCODE
157                 EAX_ECX_MODRM
158                 "seteb %0;" // fail valid
159                 "setnaeb %1;" // fail invalid
160                 : "=q"(ret_valid), "=q"(ret_invalid), "=c"(val) // Use ECX
161                 : "a" (vmcs_index), "0"(ret_valid), "1"(ret_invalid)
162                 : "memory"
163                 );
164
165     CHECK_VMXFAIL(ret_valid, ret_invalid);
166
167     // TODO: Fix this, will have to do a cast because dst will be variable length
168     switch(len)
169     {
170         case 2:
171             *((uint16_t*)dst) = (uint16_t)val;
172             break;
173         case 4:
174             *((uint32_t*)dst) = (uint32_t)val;
175             break;
176         case 8:
177             *((uint64_t*)dst) = (uint64_t)val;
178             break;
179     }
180
181
182     return VMX_SUCCESS;
183 }
184
185 static inline int vmcs_write(addr_t vmcs_index, addr_t value) {
186     uint8_t ret_valid = 0;
187     uint8_t ret_invalid = 0;
188
189     __asm__ __volatile__ (
190                 VMWRITE_OPCODE
191                 EAX_ECX_MODRM
192                 "seteb %0;" // fail valid (ZF=1)
193                 "setnaeb %1;" // fail invalid (CF=1)
194                 : "=q" (ret_valid), "=q" (ret_invalid)
195                 : "a" (vmcs_index), "c"(value), "0"(ret_valid), "1"(ret_invalid)
196                 : "memory");
197
198     CHECK_VMXFAIL(ret_valid, ret_invalid);
199
200     return VMX_SUCCESS;
201 }
202
203 static inline int vmx_off() {
204     uint8_t ret_valid = 0;
205     uint8_t ret_invalid = 0;
206
207     __asm__ __volatile__ (
208                 VMXOFF_OPCODE
209                 "seteb %0;"
210                 "setnaeb %1;"
211                 : "=q"(ret_valid), "=q"(ret_invalid)
212                 : "0"(ret_valid), "1"(ret_invalid)
213                 : "memory");
214
215     CHECK_VMXFAIL(ret_valid, ret_invalid);
216
217     return VMX_SUCCESS;
218 }
219
220 #endif
221
222 #endif