Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Cleanup of linkage issues for non-Linux hosts
[palacios.git] / palacios / include / palacios / vmx_lowlevel.h
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #ifndef __VMX_LOWLEVEL_H__
21 #define __VMX_LOWLEVEL_H__
22
23 #ifdef __V3VEE__
24
25 #include <palacios/vmcs.h>
26
27 #define VMX_SUCCESS         0 
28 #define VMX_FAIL_INVALID    1
29 #define VMX_FAIL_VALID      2
30
31 // vmfail macro
32 #define CHECK_VMXFAIL(ret_valid, ret_invalid)   \
33     if (ret_valid) {                            \
34         return VMX_FAIL_VALID;                  \
35     } else if (ret_invalid) {                   \
36         return VMX_FAIL_INVALID;                \
37     }
38
39 /* Opcode definitions for all the VM instructions */
40
41 #define VMCLEAR_OPCODE  ".byte 0x66,0xf,0xc7;" /* reg=/6 */
42 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3;"
43 #define VMPTRLD_OPCODE  ".byte 0x0f,0xc7;" /* reg=/6 */
44 #define VMPTRST_OPCODE  ".byte 0x0f,0xc7;" /* reg=/7 */
45 #define VMREAD_OPCODE   ".byte 0x0f,0x78;"
46 #define VMWRITE_OPCODE  ".byte 0x0f,0x79;"
47 #define VMXOFF_OPCODE   ".byte 0x0f,0x01,0xc4;"
48 #define VMXON_OPCODE    ".byte 0xf3,0x0f,0xc7;" /* reg=/6 */
49
50
51 /* Mod/rm definitions for intel registers/memory */
52 #define EAX_ECX_MODRM   ".byte 0xc1;"
53 // %eax with /6 reg
54 #define EAX_06_MODRM    ".byte 0x30;"
55 // %eax with /7 reg
56 #define EAX_07_MODRM    ".byte 0x38;"
57
58
59
60
61
62 static inline int vmcs_clear(addr_t vmcs_ptr) {
63     uint64_t vmcs_ptr_64 __attribute__ ((aligned(8))) = (uint64_t)vmcs_ptr;
64     uint8_t ret_valid = 0;
65     uint8_t ret_invalid = 0;
66
67     __asm__ __volatile__ (
68             VMCLEAR_OPCODE
69             EAX_06_MODRM
70             "seteb %0;" // fail valid (ZF=1)
71             "setnaeb %1;" // fail invalid (CF=1)
72             : "=q"(ret_valid), "=q"(ret_invalid)
73             : "a"(&vmcs_ptr_64), "0"(ret_valid), "1"(ret_invalid)
74             : "memory");
75
76     CHECK_VMXFAIL(ret_valid, ret_invalid);
77   
78     return VMX_SUCCESS;
79 }
80
81 static inline int vmcs_load(addr_t vmcs_ptr) {
82     uint64_t vmcs_ptr_64 = (uint64_t)vmcs_ptr;
83     uint8_t ret_valid = 0;
84     uint8_t ret_invalid = 0;
85     
86     __asm__ __volatile__ (
87                 VMPTRLD_OPCODE
88                 EAX_06_MODRM
89                 "seteb %0;" // fail valid (ZF=1)
90                 "setnaeb %1;"  // fail invalid (CF=1)
91                 : "=q"(ret_valid), "=q"(ret_invalid)
92                 : "a"(&vmcs_ptr_64), "0"(ret_valid), "1"(ret_invalid)
93                 : "memory");
94     
95     CHECK_VMXFAIL(ret_valid, ret_invalid);
96
97     return VMX_SUCCESS;
98 }
99
100 static inline uint64_t vmcs_store() {
101     uint64_t vmcs_ptr = 0;
102
103     __asm__ __volatile__ (
104                VMPTRST_OPCODE
105                EAX_07_MODRM
106                :
107                : "a"(&vmcs_ptr)
108                : "memory");
109
110     return vmcs_ptr;
111 }
112
113 static inline int vmcs_read(vmcs_field_t vmcs_field, void * dst) {
114     addr_t val = 0;
115     uint8_t ret_valid = 0;
116     uint8_t ret_invalid = 0;
117
118     __asm__ __volatile__ (  
119                 VMREAD_OPCODE
120                 EAX_ECX_MODRM
121                 "seteb %1;" // fail valid
122                 "setnaeb %1;" // fail invalid
123                 :  "=c"(val), "=d"(ret_valid) //, "=r"(ret_invalid) // Use ECX
124                 : "a" (vmcs_field), "0"(0), "1"(ret_valid)
125                 : "memory"
126                 );
127
128     CHECK_VMXFAIL(ret_valid, ret_invalid);
129
130     switch(v3_vmcs_get_field_len(vmcs_field))
131     {
132         case 2:
133             *((uint16_t*)dst) = (uint16_t)val;
134             break;
135         case 4:
136             *((uint32_t*)dst) = (uint32_t)val;
137             break;
138         case 8:
139             *((uint64_t*)dst) = (uint64_t)val;
140             break;
141         default:
142              return -1;
143     }
144
145
146     return VMX_SUCCESS;
147 }
148
149 static inline int vmcs_write(vmcs_field_t vmcs_field, addr_t value) {
150     uint8_t ret_valid = 0;
151     uint8_t ret_invalid = 0;
152
153     __asm__ __volatile__ (
154                 VMWRITE_OPCODE
155                 EAX_ECX_MODRM
156                 "seteb %0;" // fail valid (ZF=1)
157                 "setnaeb %1;" // fail invalid (CF=1)
158                 : "=q" (ret_valid), "=q" (ret_invalid)
159                 : "a" (vmcs_field), "c"(value)
160                 : "memory");
161
162     CHECK_VMXFAIL(ret_valid, ret_invalid);
163
164     return VMX_SUCCESS;
165 }
166
167
168 static inline int vmx_on(addr_t vmxon_ptr) {
169     uint64_t vmxon_ptr_64 __attribute__((aligned(8))) = (uint64_t)vmxon_ptr;
170     uint8_t ret_invalid = 0;
171
172     __asm__ __volatile__ (
173                 VMXON_OPCODE
174                 EAX_06_MODRM
175                 "setnaeb %0;" // fail invalid (CF=1)
176                 : "=q"(ret_invalid)
177                 : "a"(&vmxon_ptr_64),"0"(ret_invalid)
178                 : "memory");
179
180     if (ret_invalid) {
181         return VMX_FAIL_INVALID;
182     } else {
183         return VMX_SUCCESS;
184     }
185 }
186
187 static inline int vmx_off() {
188     uint8_t ret_valid = 0;
189     uint8_t ret_invalid = 0;
190
191     __asm__ __volatile__ (
192                 VMXOFF_OPCODE
193                 "seteb %0;"
194                 "setnaeb %1;"
195                 : "=q"(ret_valid), "=q"(ret_invalid)
196                 : "0"(ret_valid), "1"(ret_invalid)
197                 : "memory");
198
199     CHECK_VMXFAIL(ret_valid, ret_invalid);
200
201     return VMX_SUCCESS;
202 }
203
204
205 static inline int enable_vmx() {
206 #ifdef __V3_64BIT__
207     __asm__ __volatile__ (
208                           "movq %%cr4, %%rcx;"
209                           "orq  $0x00002000, %%rcx;"
210                           "movq %%rcx, %%cr4;"
211                           : 
212                           :
213                           : "%rcx"
214                           );
215
216
217     __asm__ __volatile__ (
218                           "movq %%cr0, %%rcx; "
219                           "orq  $0x00000020,%%rcx; "
220                           "movq %%rcx, %%cr0;"
221                           :
222                           :
223                           : "%rcx"
224                           );
225 #elif __V3_32BIT__
226     __asm__ __volatile__ (
227                           "movl %%cr4, %%ecx;"
228                           "orl  $0x00002000, %%ecx;"
229                           "movl %%ecx, %%cr4;"
230                           : 
231                           :
232                           : "%ecx"
233                           );
234
235
236
237     __asm__ __volatile__ (
238                           "movl %%cr0, %%ecx; "
239                           "orl  $0x00000020,%%ecx; "
240                           "movl %%ecx, %%cr0;"
241                           :
242                           :
243                           : "%ecx"
244                           );
245     
246 #endif
247
248     return 0;
249 }
250
251
252
253
254
255 #endif
256
257 #endif