Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Disallow startup with shadow paging + memory region outside 4GB boundary
[palacios.git] / palacios / src / palacios / vmx_ctrl_regs.c
1
2 /*
3  * This file is part of the Palacios Virtual Machine Monitor developed
4  * by the V3VEE Project with funding from the United States National 
5  * Science Foundation and the Department of Energy.  
6  *
7  * The V3VEE Project is a joint project between Northwestern University
8  * and the University of New Mexico.  You can find out more at 
9  * http://www.v3vee.org
10  *
11  * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Andy Gocke <agocke@gmail.com>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmm_ctrl_regs.h>
29
30 #ifndef V3_CONFIG_DEBUG_VMX
31 #undef PrintDebug
32 #define PrintDebug(fmt, args...)
33 #endif
34
35 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual);
36 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val, struct vmx_exit_info * exit_info);
37 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
38 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
39
40 int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual, struct vmx_exit_info * exit_info) {
41
42     if (cr_qual->access_type < 2) {
43         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
44         
45         if (cr_qual->access_type == 0) {
46
47             if (handle_mov_to_cr0(info, reg, exit_info) != 0) {
48                 PrintError("Could not handle CR0 write\n");
49                 return -1;
50             }
51         } else {
52             // Mov from cr
53             PrintError("Mov From CR0 not handled\n");
54             return -1;
55         }
56
57         return 0;
58     }
59
60     PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual->access_type);
61     return -1;
62 }
63
64 int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
65
66     if (cr_qual->access_type < 2) {
67         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
68
69         if (cr_qual->access_type == 0) {
70             return handle_mov_to_cr3(info, reg);
71         } else {
72             return handle_mov_from_cr3(info, reg);
73         }
74     }
75
76     PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual->access_type);
77     return -1;
78 }
79
80 int v3_vmx_handle_cr4_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
81     if (cr_qual->access_type < 2) {
82
83         if (cr_qual->access_type == 0) {
84             if (v3_handle_cr4_write(info) != 0) {
85                 PrintError("Could not handle CR4 write\n");
86                 return -1;
87             }
88             info->ctrl_regs.cr4 |= 0x2000; // no VMX allowed in guest, so mask CR4.VMXE
89         } else {
90             if (v3_handle_cr4_read(info) != 0) {
91                 PrintError("Could not handle CR4 read\n");
92                 return -1;
93             }
94         }
95
96         return 0;
97     }
98
99     PrintError("Invalid CR4 Access type?? (type=%d)\n", cr_qual->access_type);
100     return -1;
101 }
102
103 int v3_vmx_handle_cr8_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
104     if (cr_qual->access_type < 2) {
105
106         if (cr_qual->access_type == 0) {
107             if (v3_handle_cr8_write(info) != 0) {
108                 PrintError("Could not handle CR8 write\n");
109                 return -1;
110             }
111         } else {
112             if (v3_handle_cr8_read(info) != 0) {
113                 PrintError("Could not handle CR8 read\n");
114                 return -1;
115             }
116         }
117         
118         return 0;
119     }
120     
121     PrintError("Invalid CR8 Access type?? (type=%d)\n", cr_qual->access_type);
122     return -1;
123 }
124
125 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
126
127     if (info->shdw_pg_mode == SHADOW_PAGING) {
128
129         /*
130         PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
131                    (void *)info->ctrl_regs.cr3,
132                    (void *)info->shdw_pg_state.guest_cr3);
133         */
134
135         if (info->cpu_mode == LONG) {
136             info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
137         } else {
138             info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
139         }
140
141
142         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
143             if (v3_activate_shadow_pt(info) == -1) {
144                 PrintError("Failed to activate 32 bit shadow page table\n");
145                 return -1;
146             }
147         }
148         /*
149         PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
150                    (void *)info->ctrl_regs.cr3,
151                    (void *)info->shdw_pg_state.guest_cr3);
152         */
153     } else if (info->shdw_pg_mode == NESTED_PAGING) {
154         PrintError("Nested paging not available in VMX right now!\n");
155         return -1;
156     }
157
158
159
160     return 0;
161 }
162
163 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
164
165
166     if (info->shdw_pg_mode == SHADOW_PAGING) {
167
168         if ((v3_get_vm_cpu_mode(info) == LONG) ||
169             (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
170
171             *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
172         } else {
173             *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
174         }
175
176     } else {
177         PrintError("Unhandled paging mode\n");
178         return -1;
179     }
180
181
182     return 0;
183 }
184
185 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struct vmx_exit_info * exit_info) {
186     struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
187     struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
188     struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
189     struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
190     uint_t paging_transition = 0;
191     extern v3_cpu_arch_t v3_mach_type;
192
193
194     PrintDebug("Mov to CR0\n");
195     PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
196                (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
197
198     if ((new_shdw_cr0->pe != shdw_cr0->pe) && (vmx_info->assist_state != VMXASSIST_DISABLED)) {
199         /*
200           PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
201           PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
202           PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
203         */
204
205         if (v3_vmxassist_ctx_switch(info) != 0) {
206             PrintError("Unable to execute VMXASSIST context switch!\n");
207             return -1;
208         }
209         
210         if (vmx_info->assist_state == VMXASSIST_ON) {
211             PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip);
212         } else {
213             PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
214                        (void *)(addr_t)info->rip);
215         }
216
217         // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe
218         exit_info->instr_len = 0;
219
220         //      v3_vmx_restore_vmcs(info);
221         //      v3_print_vmcs(info);
222
223     } else {
224
225         if (new_shdw_cr0->pg != shdw_cr0->pg) {
226             paging_transition = 1;
227         }
228
229         
230         // Except PG, PE, and NE, which are always set
231         if ((info->shdw_pg_mode == SHADOW_PAGING) ||  
232             (v3_mach_type != V3_VMX_EPT_UG_CPU)) {
233             
234             // The shadow always reflects the new value
235             *shdw_cr0 = *new_shdw_cr0;
236             
237
238             // We don't care about most of the flags, so lets go for it 
239             // and set them to the guest values
240             *guest_cr0 = *shdw_cr0;
241         
242             guest_cr0->pe = 1;
243             guest_cr0->pg = 1;
244         } else {
245             // Unrestricted guest 
246             //    *(uint32_t *)shdw_cr0 = (0x00000020 & *(uint32_t *)new_shdw_cr0);
247
248             *guest_cr0 = *new_shdw_cr0;
249         }
250
251         guest_cr0->ne = 1;
252         guest_cr0->et = 1;
253
254         
255         if (paging_transition) {
256             // Paging transition
257             
258             if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
259                 struct efer_64 * vm_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
260                 struct efer_64 * hw_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
261                 
262                 if (vmx_info->assist_state != VMXASSIST_DISABLED) {
263                     if (vm_efer->lme) {
264                         PrintDebug("Enabling long mode\n");
265                         
266                         hw_efer->lma = 1;
267                         hw_efer->lme = 1;
268                         
269                         vmx_info->entry_ctrls.guest_ia32e = 1;
270                     }
271                 } else {
272                     if (hw_efer->lme) {
273                         PrintDebug("Enabling long mode\n");
274                         
275                         hw_efer->lma = 1;
276                         
277                         vmx_info->entry_ctrls.guest_ia32e = 1;
278                     }
279                 }
280                 
281                 //            PrintDebug("Activating Shadow Page tables\n");
282                 
283                 if (info->shdw_pg_mode == SHADOW_PAGING) {
284                     if (v3_activate_shadow_pt(info) == -1) {
285                         PrintError("Failed to activate shadow page tables\n");
286                         return -1;
287                     }
288                 }
289                 
290             } else {
291
292                 if (info->shdw_pg_mode == SHADOW_PAGING) {
293                     if (v3_activate_passthrough_pt(info) == -1) {
294                         PrintError("Failed to activate passthrough page tables\n");
295                         return -1;
296                     }
297                 } else {
298                     // This is hideous... Let's hope that the 1to1 page table has not been nuked...
299                     info->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
300                 }
301             }
302         }
303     }
304
305     return 0;
306 }
307
308 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
309     v3_reg_t * reg = NULL;
310
311     switch (cr_qual->gpr) {
312         case 0:
313             reg = &(info->vm_regs.rax);
314             break;
315         case 1:
316             reg = &(info->vm_regs.rcx);
317             break;
318         case 2:
319             reg = &(info->vm_regs.rdx);
320             break;
321         case 3:
322             reg = &(info->vm_regs.rbx);
323             break;
324         case 4:
325             reg = &(info->vm_regs.rsp);
326             break;
327         case 5:
328             reg = &(info->vm_regs.rbp);
329             break;
330         case 6:
331             reg = &(info->vm_regs.rsi);
332             break;
333         case 7:
334             reg = &(info->vm_regs.rdi);
335             break;
336         case 8:
337             reg = &(info->vm_regs.r8);
338             break;
339         case 9:
340             reg = &(info->vm_regs.r9);
341             break;
342         case 10:
343             reg = &(info->vm_regs.r10);
344             break;
345         case 11:
346             reg = &(info->vm_regs.r11);
347             break;
348         case 12:
349             reg = &(info->vm_regs.r11);
350             break;
351         case 13:
352             reg = &(info->vm_regs.r13);
353             break;
354         case 14:
355             reg = &(info->vm_regs.r14);
356             break;
357         case 15:
358             reg = &(info->vm_regs.r15);
359             break;
360     }
361
362     return reg;
363 }
364
365