Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


0f10f7e739be34b737b5b727b5eef22490ac53cb
[palacios.git] / palacios / src / palacios / vmx_ctrl_regs.c
1
2 /*
3  * This file is part of the Palacios Virtual Machine Monitor developed
4  * by the V3VEE Project with funding from the United States National 
5  * Science Foundation and the Department of Energy.  
6  *
7  * The V3VEE Project is a joint project between Northwestern University
8  * and the University of New Mexico.  You can find out more at 
9  * http://www.v3vee.org
10  *
11  * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Andy Gocke <agocke@gmail.com>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmm_ctrl_regs.h>
29
30 #ifndef V3_CONFIG_DEBUG_VMX
31 #undef PrintDebug
32 #define PrintDebug(fmt, args...)
33 #endif
34
35 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual);
36 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val, struct vmx_exit_info * exit_info);
37 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
38 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
39
40 int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual, struct vmx_exit_info * exit_info) {
41
42     if (cr_qual->access_type < 2) {
43         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
44         
45         if (cr_qual->access_type == 0) {
46
47             if (handle_mov_to_cr0(info, reg, exit_info) != 0) {
48                 PrintError("Could not handle CR0 write\n");
49                 return -1;
50             }
51         } else {
52             // Mov from cr
53             PrintError("Mov From CR0 not handled\n");
54             return -1;
55         }
56
57         return 0;
58     }
59
60     PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual->access_type);
61     return -1;
62 }
63
64 int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
65
66     if (cr_qual->access_type < 2) {
67         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
68
69         if (cr_qual->access_type == 0) {
70             return handle_mov_to_cr3(info, reg);
71         } else {
72             return handle_mov_from_cr3(info, reg);
73         }
74     }
75
76     PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual->access_type);
77     return -1;
78 }
79
80 int v3_vmx_handle_cr4_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
81     if (cr_qual->access_type < 2) {
82
83         if (cr_qual->access_type == 0) {
84             if (v3_handle_cr4_write(info) != 0) {
85                 PrintError("Could not handle CR4 write\n");
86                 return -1;
87             }
88             info->ctrl_regs.cr4 |= 0x2000; // no VMX allowed in guest, so mask CR4.VMXE
89         } else {
90             if (v3_handle_cr4_read(info) != 0) {
91                 PrintError("Could not handle CR4 read\n");
92                 return -1;
93             }
94         }
95
96         return 0;
97     }
98
99     PrintError("Invalid CR4 Access type?? (type=%d)\n", cr_qual->access_type);
100     return -1;
101 }
102
103 int v3_vmx_handle_cr8_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
104     if (cr_qual->access_type < 2) {
105
106         if (cr_qual->access_type == 0) {
107             if (v3_handle_cr8_write(info) != 0) {
108                 PrintError("Could not handle CR8 write\n");
109                 return -1;
110             }
111         } else {
112             if (v3_handle_cr8_read(info) != 0) {
113                 PrintError("Could not handle CR8 read\n");
114                 return -1;
115             }
116         }
117         
118         return 0;
119     }
120     
121     PrintError("Invalid CR8 Access type?? (type=%d)\n", cr_qual->access_type);
122     return -1;
123 }
124
125 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
126
127     if (info->shdw_pg_mode == SHADOW_PAGING) {
128
129         /*
130         PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
131                    (void *)info->ctrl_regs.cr3,
132                    (void *)info->shdw_pg_state.guest_cr3);
133         */
134
135         if (info->cpu_mode == LONG) {
136             info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
137         } else {
138             info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
139         }
140
141
142         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
143             if (v3_activate_shadow_pt(info) == -1) {
144                 PrintError("Failed to activate 32 bit shadow page table\n");
145                 return -1;
146             }
147         }
148         /*
149         PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
150                    (void *)info->ctrl_regs.cr3,
151                    (void *)info->shdw_pg_state.guest_cr3);
152         */
153     } else if (info->shdw_pg_mode == NESTED_PAGING) {
154         PrintError("Nested paging not available in VMX right now!\n");
155         return -1;
156     }
157
158
159
160     return 0;
161 }
162
163 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
164
165
166     if (info->shdw_pg_mode == SHADOW_PAGING) {
167
168         if ((v3_get_vm_cpu_mode(info) == LONG) ||
169             (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
170
171             *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
172         } else {
173             *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
174         }
175
176     } else {
177         PrintError("Unhandled paging mode\n");
178         return -1;
179     }
180
181
182     return 0;
183 }
184
185 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struct vmx_exit_info * exit_info) {
186     struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
187     struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
188     struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
189     struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
190     uint_t paging_transition = 0;
191     extern v3_cpu_arch_t v3_mach_type;
192
193
194     PrintDebug("Mov to CR0\n");
195     PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
196                (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
197
198     if ((new_shdw_cr0->pe != shdw_cr0->pe) && (vmx_info->assist_state != VMXASSIST_DISABLED)) {
199         /*
200           PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
201           PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
202           PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
203         */
204
205         if (v3_vmxassist_ctx_switch(info) != 0) {
206             PrintError("Unable to execute VMXASSIST context switch!\n");
207             return -1;
208         }
209         
210         if (vmx_info->assist_state == VMXASSIST_ON) {
211             PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip);
212         } else {
213             PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
214                        (void *)(addr_t)info->rip);
215         }
216
217         // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe
218         exit_info->instr_len = 0;
219
220         //      v3_vmx_restore_vmcs(info);
221         //      v3_print_vmcs(info);
222
223     } else {
224
225         if (new_shdw_cr0->pg != shdw_cr0->pg) {
226             paging_transition = 1;
227         }
228
229         
230         // Except PG, PE, and NE, which are always set
231         if ((info->shdw_pg_mode == SHADOW_PAGING) ||  
232             (v3_mach_type != V3_VMX_EPT_UG_CPU)) {
233             
234             // The shadow always reflects the new value
235             *shdw_cr0 = *new_shdw_cr0;
236             
237
238             // We don't care about most of the flags, so lets go for it 
239             // and set them to the guest values
240             *guest_cr0 = *shdw_cr0;
241         
242             guest_cr0->pe = 1;
243             guest_cr0->pg = 1;
244         } else {
245             // Unrestricted guest 
246             //    *(uint32_t *)shdw_cr0 = (0x00000020 & *(uint32_t *)new_shdw_cr0);
247
248             *guest_cr0 = *new_shdw_cr0;
249
250             guest_cr0->cd = 0;
251         }
252
253         guest_cr0->ne = 1;
254         guest_cr0->et = 1;
255
256         
257         if (paging_transition) {
258             // Paging transition
259             
260             if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
261                 struct efer_64 * vm_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
262                 struct efer_64 * hw_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
263                 
264                 if (vmx_info->assist_state != VMXASSIST_DISABLED) {
265                     if (vm_efer->lme) {
266                         PrintDebug("Enabling long mode\n");
267                         
268                         hw_efer->lma = 1;
269                         hw_efer->lme = 1;
270                         
271                         vmx_info->entry_ctrls.guest_ia32e = 1;
272                     }
273                 } else {
274                     if (hw_efer->lme) {
275                         PrintDebug("Enabling long mode\n");
276                         
277                         hw_efer->lma = 1;
278                         
279                         vmx_info->entry_ctrls.guest_ia32e = 1;
280                     }
281                 }
282                 
283                 //            PrintDebug("Activating Shadow Page tables\n");
284                 
285                 if (info->shdw_pg_mode == SHADOW_PAGING) {
286                     if (v3_activate_shadow_pt(info) == -1) {
287                         PrintError("Failed to activate shadow page tables\n");
288                         return -1;
289                     }
290                 }
291                 
292             } else {
293
294                 if (info->shdw_pg_mode == SHADOW_PAGING) {
295                     if (v3_activate_passthrough_pt(info) == -1) {
296                         PrintError("Failed to activate passthrough page tables\n");
297                         return -1;
298                     }
299                 } else {
300                     // This is hideous... Let's hope that the 1to1 page table has not been nuked...
301                     info->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
302                 }
303             }
304         }
305     }
306
307     return 0;
308 }
309
310 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
311     v3_reg_t * reg = NULL;
312
313     switch (cr_qual->gpr) {
314         case 0:
315             reg = &(info->vm_regs.rax);
316             break;
317         case 1:
318             reg = &(info->vm_regs.rcx);
319             break;
320         case 2:
321             reg = &(info->vm_regs.rdx);
322             break;
323         case 3:
324             reg = &(info->vm_regs.rbx);
325             break;
326         case 4:
327             reg = &(info->vm_regs.rsp);
328             break;
329         case 5:
330             reg = &(info->vm_regs.rbp);
331             break;
332         case 6:
333             reg = &(info->vm_regs.rsi);
334             break;
335         case 7:
336             reg = &(info->vm_regs.rdi);
337             break;
338         case 8:
339             reg = &(info->vm_regs.r8);
340             break;
341         case 9:
342             reg = &(info->vm_regs.r9);
343             break;
344         case 10:
345             reg = &(info->vm_regs.r10);
346             break;
347         case 11:
348             reg = &(info->vm_regs.r11);
349             break;
350         case 12:
351             reg = &(info->vm_regs.r11);
352             break;
353         case 13:
354             reg = &(info->vm_regs.r13);
355             break;
356         case 14:
357             reg = &(info->vm_regs.r14);
358             break;
359         case 15:
360             reg = &(info->vm_regs.r15);
361             break;
362     }
363
364     return reg;
365 }
366
367