Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


vmx fixes
[palacios.git] / palacios / src / palacios / vmx_ctrl_regs.c
1
2 /*
3  * This file is part of the Palacios Virtual Machine Monitor developed
4  * by the V3VEE Project with funding from the United States National 
5  * Science Foundation and the Department of Energy.  
6  *
7  * The V3VEE Project is a joint project between Northwestern University
8  * and the University of New Mexico.  You can find out more at 
9  * http://www.v3vee.org
10  *
11  * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Andy Gocke <agocke@gmail.com>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmm_ctrl_regs.h>
29
30 #ifndef V3_CONFIG_DEBUG_VMX
31 #undef PrintDebug
32 #define PrintDebug(fmt, args...)
33 #endif
34
35 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual);
36 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val, struct vmx_exit_info * exit_info);
37 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
38 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
39
40 int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual, struct vmx_exit_info * exit_info) {
41
42     if (cr_qual->access_type < 2) {
43         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
44         
45         if (cr_qual->access_type == 0) {
46
47             if (handle_mov_to_cr0(info, reg, exit_info) != 0) {
48                 PrintError("Could not handle CR0 write\n");
49                 return -1;
50             }
51         } else {
52             // Mov from cr
53             PrintError("Mov From CR0 not handled\n");
54             return -1;
55         }
56
57         return 0;
58     }
59
60     PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual->access_type);
61     return -1;
62 }
63
64 int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
65
66     if (cr_qual->access_type < 2) {
67         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
68
69         if (cr_qual->access_type == 0) {
70             return handle_mov_to_cr3(info, reg);
71         } else {
72             return handle_mov_from_cr3(info, reg);
73         }
74     }
75
76     PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual->access_type);
77     return -1;
78 }
79
80 int v3_vmx_handle_cr4_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
81     if (cr_qual->access_type < 2) {
82
83         if (cr_qual->access_type == 0) {
84             if (v3_handle_cr4_write(info) != 0) {
85                 PrintError("Could not handle CR4 write\n");
86                 return -1;
87             }
88             info->ctrl_regs.cr4 |= 0x2000; // no VMX allowed in guest, so mask CR4.VMXE
89         } else {
90             if (v3_handle_cr4_read(info) != 0) {
91                 PrintError("Could not handle CR4 read\n");
92                 return -1;
93             }
94         }
95
96         return 0;
97     }
98
99     PrintError("Invalid CR4 Access type?? (type=%d)\n", cr_qual->access_type);
100     return -1;
101 }
102
103 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
104
105     if (info->shdw_pg_mode == SHADOW_PAGING) {
106
107         /*
108         PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
109                    (void *)info->ctrl_regs.cr3,
110                    (void *)info->shdw_pg_state.guest_cr3);
111         */
112
113         if (info->cpu_mode == LONG) {
114             info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
115         } else {
116             info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
117         }
118
119
120         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
121             if (v3_activate_shadow_pt(info) == -1) {
122                 PrintError("Failed to activate 32 bit shadow page table\n");
123                 return -1;
124             }
125         }
126         /*
127         PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
128                    (void *)info->ctrl_regs.cr3,
129                    (void *)info->shdw_pg_state.guest_cr3);
130         */
131     } else if (info->shdw_pg_mode == NESTED_PAGING) {
132         PrintError("Nested paging not available in VMX right now!\n");
133         return -1;
134     }
135
136
137
138     return 0;
139 }
140
141 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
142
143
144     if (info->shdw_pg_mode == SHADOW_PAGING) {
145
146         if ((v3_get_vm_cpu_mode(info) == LONG) ||
147             (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
148
149             *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
150         } else {
151             *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
152         }
153
154     } else {
155         PrintError("Unhandled paging mode\n");
156         return -1;
157     }
158
159
160     return 0;
161 }
162
163 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struct vmx_exit_info * exit_info) {
164     struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
165     struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
166     struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
167     struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
168     uint_t paging_transition = 0;
169     extern v3_cpu_arch_t v3_mach_type;
170
171
172     PrintDebug("Mov to CR0\n");
173     PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
174                (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
175
176     if ((new_shdw_cr0->pe != shdw_cr0->pe) && (vmx_info->assist_state != VMXASSIST_DISABLED)) {
177         /*
178           PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
179           PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
180           PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
181         */
182
183         if (v3_vmxassist_ctx_switch(info) != 0) {
184             PrintError("Unable to execute VMXASSIST context switch!\n");
185             return -1;
186         }
187         
188         if (vmx_info->assist_state == VMXASSIST_ON) {
189             PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip);
190         } else {
191             PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
192                        (void *)(addr_t)info->rip);
193         }
194
195         // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe
196         exit_info->instr_len = 0;
197
198         //      v3_vmx_restore_vmcs(info);
199         //      v3_print_vmcs(info);
200
201     } else {
202
203         if (new_shdw_cr0->pg != shdw_cr0->pg) {
204             paging_transition = 1;
205         }
206
207         
208         // Except PG, PE, and NE, which are always set
209         if ((info->shdw_pg_mode == SHADOW_PAGING) ||  
210             (v3_mach_type != V3_VMX_EPT_UG_CPU)) {
211             
212             // The shadow always reflects the new value
213             *shdw_cr0 = *new_shdw_cr0;
214             
215
216             // We don't care about most of the flags, so lets go for it 
217             // and set them to the guest values
218             *guest_cr0 = *shdw_cr0;
219         
220             guest_cr0->pe = 1;
221             guest_cr0->pg = 1;
222         } else {
223             // Unrestricted guest 
224             //    *(uint32_t *)shdw_cr0 = (0x00000020 & *(uint32_t *)new_shdw_cr0);
225
226             *guest_cr0 = *new_shdw_cr0;
227         }
228
229         guest_cr0->ne = 1;
230         guest_cr0->et = 1;
231
232         
233         if (paging_transition) {
234             // Paging transition
235             
236             if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
237                 struct efer_64 * vm_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
238                 struct efer_64 * hw_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
239                 
240                 if (vmx_info->assist_state != VMXASSIST_DISABLED) {
241                     if (vm_efer->lme) {
242                         PrintDebug("Enabling long mode\n");
243                         
244                         hw_efer->lma = 1;
245                         hw_efer->lme = 1;
246                         
247                         vmx_info->entry_ctrls.guest_ia32e = 1;
248                     }
249                 } else {
250                     if (hw_efer->lme) {
251                         PrintDebug("Enabling long mode\n");
252                         
253                         hw_efer->lma = 1;
254                         
255                         vmx_info->entry_ctrls.guest_ia32e = 1;
256                     }
257                 }
258                 
259                 //            PrintDebug("Activating Shadow Page tables\n");
260                 
261                 if (info->shdw_pg_mode == SHADOW_PAGING) {
262                     if (v3_activate_shadow_pt(info) == -1) {
263                         PrintError("Failed to activate shadow page tables\n");
264                         return -1;
265                     }
266                 }
267                 
268             } else {
269
270                 if (info->shdw_pg_mode == SHADOW_PAGING) {
271                     if (v3_activate_passthrough_pt(info) == -1) {
272                         PrintError("Failed to activate passthrough page tables\n");
273                         return -1;
274                     }
275                 } else {
276                     // This is hideous... Let's hope that the 1to1 page table has not been nuked...
277                     info->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
278                 }
279             }
280         }
281     }
282
283     return 0;
284 }
285
286 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
287     v3_reg_t * reg = NULL;
288
289     switch (cr_qual->gpr) {
290         case 0:
291             reg = &(info->vm_regs.rax);
292             break;
293         case 1:
294             reg = &(info->vm_regs.rcx);
295             break;
296         case 2:
297             reg = &(info->vm_regs.rdx);
298             break;
299         case 3:
300             reg = &(info->vm_regs.rbx);
301             break;
302         case 4:
303             reg = &(info->vm_regs.rsp);
304             break;
305         case 5:
306             reg = &(info->vm_regs.rbp);
307             break;
308         case 6:
309             reg = &(info->vm_regs.rsi);
310             break;
311         case 7:
312             reg = &(info->vm_regs.rdi);
313             break;
314         case 8:
315             reg = &(info->vm_regs.r8);
316             break;
317         case 9:
318             reg = &(info->vm_regs.r9);
319             break;
320         case 10:
321             reg = &(info->vm_regs.r10);
322             break;
323         case 11:
324             reg = &(info->vm_regs.r11);
325             break;
326         case 12:
327             reg = &(info->vm_regs.r11);
328             break;
329         case 13:
330             reg = &(info->vm_regs.r13);
331             break;
332         case 14:
333             reg = &(info->vm_regs.r14);
334             break;
335         case 15:
336             reg = &(info->vm_regs.r15);
337             break;
338     }
339
340     return reg;
341 }
342
343