Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Merge branch 'devel' of ssh://newskysaw.cs.northwestern.edu/home/palacios/palacios...
[palacios.git] / palacios / src / palacios / vmx_ctrl_regs.c
1
2 /*
3  * This file is part of the Palacios Virtual Machine Monitor developed
4  * by the V3VEE Project with funding from the United States National 
5  * Science Foundation and the Department of Energy.  
6  *
7  * The V3VEE Project is a joint project between Northwestern University
8  * and the University of New Mexico.  You can find out more at 
9  * http://www.v3vee.org
10  *
11  * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Andy Gocke <agocke@gmail.com>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmm_ctrl_regs.h>
29
30 #ifndef V3_CONFIG_DEBUG_VMX
31 #undef PrintDebug
32 #define PrintDebug(fmt, args...)
33 #endif
34
35 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual);
36 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val, struct vmx_exit_info * exit_info);
37 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
38 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
39
40 int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual, struct vmx_exit_info * exit_info) {
41
42     if (cr_qual->access_type < 2) {
43         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
44         
45         if (cr_qual->access_type == 0) {
46
47             if (handle_mov_to_cr0(info, reg, exit_info) != 0) {
48                 PrintError("Could not handle CR0 write\n");
49                 return -1;
50             }
51         } else {
52             // Mov from cr
53             PrintError("Mov From CR0 not handled\n");
54             return -1;
55         }
56
57         return 0;
58     }
59
60     PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual->access_type);
61     return -1;
62 }
63
64 int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
65
66     if (cr_qual->access_type < 2) {
67         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
68
69         if (cr_qual->access_type == 0) {
70             return handle_mov_to_cr3(info, reg);
71         } else {
72             return handle_mov_from_cr3(info, reg);
73         }
74     }
75
76     PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual->access_type);
77     return -1;
78 }
79
80 int v3_vmx_handle_cr4_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
81     if (cr_qual->access_type < 2) {
82
83         if (cr_qual->access_type == 0) {
84             if (v3_handle_cr4_write(info) != 0) {
85                 PrintError("Could not handle CR4 write\n");
86                 return -1;
87             }
88             info->ctrl_regs.cr4 |= 0x2000; // no VMX allowed in guest, so mask CR4.VMXE
89         } else {
90             if (v3_handle_cr4_read(info) != 0) {
91                 PrintError("Could not handle CR4 read\n");
92                 return -1;
93             }
94         }
95
96         return 0;
97     }
98
99     PrintError("Invalid CR4 Access type?? (type=%d)\n", cr_qual->access_type);
100     return -1;
101 }
102
103 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
104
105     if (info->shdw_pg_mode == SHADOW_PAGING) {
106
107         /*
108         PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
109                    (void *)info->ctrl_regs.cr3,
110                    (void *)info->shdw_pg_state.guest_cr3);
111         */
112
113         if (info->cpu_mode == LONG) {
114             info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
115         } else {
116             info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
117         }
118
119
120         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
121             if (v3_activate_shadow_pt(info) == -1) {
122                 PrintError("Failed to activate 32 bit shadow page table\n");
123                 return -1;
124             }
125         }
126         /*
127         PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
128                    (void *)info->ctrl_regs.cr3,
129                    (void *)info->shdw_pg_state.guest_cr3);
130         */
131     } else if (info->shdw_pg_mode == NESTED_PAGING) {
132         PrintError("Nested paging not available in VMX right now!\n");
133         return -1;
134     }
135
136
137
138     return 0;
139 }
140
141 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
142
143
144     if (info->shdw_pg_mode == SHADOW_PAGING) {
145
146         if ((v3_get_vm_cpu_mode(info) == LONG) ||
147             (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
148
149             *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
150         } else {
151             *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
152         }
153
154     } else {
155         PrintError("Unhandled paging mode\n");
156         return -1;
157     }
158
159
160     return 0;
161 }
162
163 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struct vmx_exit_info * exit_info) {
164     struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
165     struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
166     struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
167     struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
168     uint_t paging_transition = 0;
169     extern v3_cpu_arch_t v3_mach_type;
170
171
172     V3_Print("Mov to CR0\n");
173     V3_Print("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
174              (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
175
176     if ((new_shdw_cr0->pe != shdw_cr0->pe) && (vmx_info->assist_state != VMXASSIST_DISABLED)) {
177         /*
178           PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
179           PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
180           PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
181         */
182
183         if (v3_vmxassist_ctx_switch(info) != 0) {
184             PrintError("Unable to execute VMXASSIST context switch!\n");
185             return -1;
186         }
187         
188         if (vmx_info->assist_state == VMXASSIST_ON) {
189             PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip);
190         } else {
191             PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
192                        (void *)(addr_t)info->rip);
193         }
194
195         // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe
196         exit_info->instr_len = 0;
197
198         //      v3_vmx_restore_vmcs(info);
199         //      v3_print_vmcs(info);
200
201     } else {
202
203         if (new_shdw_cr0->pg != shdw_cr0->pg) {
204             paging_transition = 1;
205         }
206
207         
208         // Except PG, PE, and NE, which are always set
209         if ((info->shdw_pg_mode == SHADOW_PAGING) ||  
210             (v3_mach_type != V3_VMX_EPT_UG_CPU)) {
211             
212             // The shadow always reflects the new value
213             *shdw_cr0 = *new_shdw_cr0;
214             
215
216             // We don't care about most of the flags, so lets go for it 
217             // and set them to the guest values
218             *guest_cr0 = *shdw_cr0;
219         
220             guest_cr0->pe = 1;
221             guest_cr0->pg = 1;
222             guest_cr0->ne = 1;
223         } else {
224             // Unrestricted guest 
225             *(uint32_t *)shdw_cr0 = (0x00000020 & *(uint32_t *)new_shdw_cr0);
226
227             *guest_cr0 = *new_shdw_cr0;
228             guest_cr0->ne = 1;
229         }
230
231         
232         if (paging_transition) {
233             // Paging transition
234             
235             if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
236                 struct efer_64 * vm_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
237                 struct efer_64 * hw_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
238                 
239                 if (vm_efer->lme) {
240                     //     PrintDebug("Enabling long mode\n");
241                     
242                     hw_efer->lma = 1;
243                     hw_efer->lme = 1;
244                     
245                     vmx_info->entry_ctrls.guest_ia32e = 1;
246                 }
247                 
248                 //            PrintDebug("Activating Shadow Page tables\n");
249                 
250                 if (info->shdw_pg_mode == SHADOW_PAGING) {
251                     if (v3_activate_shadow_pt(info) == -1) {
252                         PrintError("Failed to activate shadow page tables\n");
253                         return -1;
254                     }
255                 }
256                 
257             } else {
258
259                 if (info->shdw_pg_mode == SHADOW_PAGING) {
260                     if (v3_activate_passthrough_pt(info) == -1) {
261                         PrintError("Failed to activate passthrough page tables\n");
262                         return -1;
263                     }
264                 } else {
265                     // This is hideous... Let's hope that the 1to1 page table has not been nuked...
266                     info->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
267                 }
268             }
269         }
270     }
271
272     return 0;
273 }
274
275 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
276     v3_reg_t * reg = NULL;
277
278     switch (cr_qual->gpr) {
279         case 0:
280             reg = &(info->vm_regs.rax);
281             break;
282         case 1:
283             reg = &(info->vm_regs.rcx);
284             break;
285         case 2:
286             reg = &(info->vm_regs.rdx);
287             break;
288         case 3:
289             reg = &(info->vm_regs.rbx);
290             break;
291         case 4:
292             reg = &(info->vm_regs.rsp);
293             break;
294         case 5:
295             reg = &(info->vm_regs.rbp);
296             break;
297         case 6:
298             reg = &(info->vm_regs.rsi);
299             break;
300         case 7:
301             reg = &(info->vm_regs.rdi);
302             break;
303         case 8:
304             reg = &(info->vm_regs.r8);
305             break;
306         case 9:
307             reg = &(info->vm_regs.r9);
308             break;
309         case 10:
310             reg = &(info->vm_regs.r10);
311             break;
312         case 11:
313             reg = &(info->vm_regs.r11);
314             break;
315         case 12:
316             reg = &(info->vm_regs.r11);
317             break;
318         case 13:
319             reg = &(info->vm_regs.r13);
320             break;
321         case 14:
322             reg = &(info->vm_regs.r14);
323             break;
324         case 15:
325             reg = &(info->vm_regs.r15);
326             break;
327     }
328
329     return reg;
330 }
331
332