Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


VMX 64-bit guest support. Add exit handling for CR4 and EFER accesses.
[palacios.git] / palacios / src / palacios / vmx_ctrl_regs.c
1
2 /*
3  * This file is part of the Palacios Virtual Machine Monitor developed
4  * by the V3VEE Project with funding from the United States National 
5  * Science Foundation and the Department of Energy.  
6  *
7  * The V3VEE Project is a joint project between Northwestern University
8  * and the University of New Mexico.  You can find out more at 
9  * http://www.v3vee.org
10  *
11  * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Andy Gocke <agocke@gmail.com>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmm_ctrl_regs.h>
29
30 #ifndef V3_CONFIG_DEBUG_VMX
31 #undef PrintDebug
32 #define PrintDebug(fmt, args...)
33 #endif
34
35 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual);
36 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val, struct vmx_exit_info * exit_info);
37 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
38 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
39
40 int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual, struct vmx_exit_info * exit_info) {
41
42     if (cr_qual->access_type < 2) {
43         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
44         
45         if (cr_qual->access_type == 0) {
46
47             if (handle_mov_to_cr0(info, reg, exit_info) != 0) {
48                 PrintError("Could not handle CR0 write\n");
49                 return -1;
50             }
51         } else {
52             // Mov from cr
53             PrintError("Mov From CR0 not handled\n");
54             return -1;
55         }
56
57         return 0;
58     }
59
60     PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual->access_type);
61     return -1;
62 }
63
64 int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
65
66     if (cr_qual->access_type < 2) {
67         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
68
69         if (cr_qual->access_type == 0) {
70             return handle_mov_to_cr3(info, reg);
71         } else {
72             return handle_mov_from_cr3(info, reg);
73         }
74     }
75
76     PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual->access_type);
77     return -1;
78 }
79
80 int v3_vmx_handle_cr4_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
81     if (cr_qual->access_type < 2) {
82
83         if (cr_qual->access_type == 0) {
84             if (v3_handle_cr4_write(info) != 0) {
85                 PrintError("Could not handle CR4 write\n");
86                 return -1;
87             }
88             info->ctrl_regs.cr4 |= 0x2000; // no VMX allowed in guest, so mask CR4.VMXE
89         } else {
90             if (v3_handle_cr4_read(info) != 0) {
91                 PrintError("Could not handle CR4 read\n");
92                 return -1;
93             }
94         }
95
96         return 0;
97     }
98
99     PrintError("Invalid CR4 Access type?? (type=%d)\n", cr_qual->access_type);
100     return -1;
101 }
102
103 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
104
105     if (info->shdw_pg_mode == SHADOW_PAGING) {
106
107         /*
108         PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
109                    (void *)info->ctrl_regs.cr3,
110                    (void *)info->shdw_pg_state.guest_cr3);
111         */
112
113         if (info->cpu_mode == LONG) {
114             info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
115         } else {
116             info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
117         }
118
119
120         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
121             if (v3_activate_shadow_pt(info) == -1) {
122                 PrintError("Failed to activate 32 bit shadow page table\n");
123                 return -1;
124             }
125         }
126         /*
127         PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
128                    (void *)info->ctrl_regs.cr3,
129                    (void *)info->shdw_pg_state.guest_cr3);
130         */
131     } else if (info->shdw_pg_mode == NESTED_PAGING) {
132         PrintError("Nested paging not available in VMX right now!\n");
133         return -1;
134     }
135
136
137
138     return 0;
139 }
140
141 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
142
143
144     if (info->shdw_pg_mode == SHADOW_PAGING) {
145
146         if ((v3_get_vm_cpu_mode(info) == LONG) ||
147             (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
148
149             *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
150         } else {
151             *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
152         }
153
154     } else {
155         PrintError("Unhandled paging mode\n");
156         return -1;
157     }
158
159
160     return 0;
161 }
162
163 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struct vmx_exit_info * exit_info) {
164     struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
165     struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
166     struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
167     struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
168     uint_t paging_transition = 0;
169
170     /*
171       PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
172       (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
173     */
174
175     if (new_shdw_cr0->pe != shdw_cr0->pe) {
176         /*
177           PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
178           PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
179           PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
180         */
181
182         if (v3_vmxassist_ctx_switch(info) != 0) {
183             PrintError("Unable to execute VMXASSIST context switch!\n");
184             return -1;
185         }
186         
187         if (vmx_info->assist_state == VMXASSIST_ENABLED) {
188             PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip);
189         } else {
190             PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
191                        (void *)(addr_t)info->rip);
192         }
193
194         // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe
195         exit_info->instr_len = 0;
196
197         //      v3_vmx_restore_vmcs(info);
198         //      v3_print_vmcs(info);
199
200     } else {
201
202         if (new_shdw_cr0->pg != shdw_cr0->pg) {
203             paging_transition = 1;
204         }
205         
206         // The shadow always reflects the new value
207         *shdw_cr0 = *new_shdw_cr0;
208         
209         // We don't care about most of the flags, so lets go for it 
210         // and set them to the guest values
211         *guest_cr0 = *shdw_cr0;
212         
213         // Except PG, PE, and NE, which are always set
214         guest_cr0->pe = 1;
215         guest_cr0->pg = 1;
216         guest_cr0->ne = 1;
217         
218         if ((paging_transition)) {
219             // Paging transition
220             
221             if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
222                 struct efer_64 * vm_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
223                 struct efer_64 * hw_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
224                 
225                 if (vm_efer->lme) {
226                     //     PrintDebug("Enabling long mode\n");
227                     
228                     hw_efer->lma = 1;
229                     hw_efer->lme = 1;
230                     
231                     vmx_info->entry_ctrls.guest_ia32e = 1;
232                 }
233                 
234                 //            PrintDebug("Activating Shadow Page tables\n");
235                 
236                 if (info->shdw_pg_mode == SHADOW_PAGING) {
237                     if (v3_activate_shadow_pt(info) == -1) {
238                         PrintError("Failed to activate shadow page tables\n");
239                         return -1;
240                     }
241                 }
242                 
243             } else {
244
245                 if (info->shdw_pg_mode == SHADOW_PAGING) {
246                     if (v3_activate_passthrough_pt(info) == -1) {
247                         PrintError("Failed to activate passthrough page tables\n");
248                         return -1;
249                     }
250                 } else {
251                     // This is hideous... Let's hope that the 1to1 page table has not been nuked...
252                     info->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
253                 }
254             }
255         }
256     }
257
258     return 0;
259 }
260
261 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
262     v3_reg_t * reg = NULL;
263
264     switch (cr_qual->gpr) {
265         case 0:
266             reg = &(info->vm_regs.rax);
267             break;
268         case 1:
269             reg = &(info->vm_regs.rcx);
270             break;
271         case 2:
272             reg = &(info->vm_regs.rdx);
273             break;
274         case 3:
275             reg = &(info->vm_regs.rbx);
276             break;
277         case 4:
278             reg = &(info->vm_regs.rsp);
279             break;
280         case 5:
281             reg = &(info->vm_regs.rbp);
282             break;
283         case 6:
284             reg = &(info->vm_regs.rsi);
285             break;
286         case 7:
287             reg = &(info->vm_regs.rdi);
288             break;
289         case 8:
290             reg = &(info->vm_regs.r8);
291             break;
292         case 9:
293             reg = &(info->vm_regs.r9);
294             break;
295         case 10:
296             reg = &(info->vm_regs.r10);
297             break;
298         case 11:
299             reg = &(info->vm_regs.r11);
300             break;
301         case 12:
302             reg = &(info->vm_regs.r11);
303             break;
304         case 13:
305             reg = &(info->vm_regs.r13);
306             break;
307         case 14:
308             reg = &(info->vm_regs.r14);
309             break;
310         case 15:
311             reg = &(info->vm_regs.r15);
312             break;
313     }
314
315     return reg;
316 }
317
318