Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


build fixes to merge the Palacios configuration parameters with Linux parameters.
[palacios.git] / palacios / src / palacios / vmx_ctrl_regs.c
1
2 /*
3  * This file is part of the Palacios Virtual Machine Monitor developed
4  * by the V3VEE Project with funding from the United States National 
5  * Science Foundation and the Department of Energy.  
6  *
7  * The V3VEE Project is a joint project between Northwestern University
8  * and the University of New Mexico.  You can find out more at 
9  * http://www.v3vee.org
10  *
11  * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Andy Gocke <agocke@gmail.com>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmm_ctrl_regs.h>
29
30 #ifndef V3_CONFIG_DEBUG_VMX
31 #undef PrintDebug
32 #define PrintDebug(fmt, args...)
33 #endif
34
35 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual);
36 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val, struct vmx_exit_info * exit_info);
37 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
38 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
39
40 int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual, struct vmx_exit_info * exit_info) {
41
42     if (cr_qual->access_type < 2) {
43         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
44         
45         if (cr_qual->access_type == 0) {
46
47             if (handle_mov_to_cr0(info, reg, exit_info) != 0) {
48                 PrintError("Could not handle CR0 write\n");
49                 return -1;
50             }
51         } else {
52             // Mov from cr
53             PrintError("Mov From CR0 not handled\n");
54             return -1;
55         }
56
57         return 0;
58     }
59
60     PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual->access_type);
61     return -1;
62 }
63
64 int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
65
66     if (cr_qual->access_type < 2) {
67         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
68
69         if (cr_qual->access_type == 0) {
70             return handle_mov_to_cr3(info, reg);
71         } else {
72             return handle_mov_from_cr3(info, reg);
73         }
74     }
75
76     PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual->access_type);
77     return -1;
78 }
79
80 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
81
82     if (info->shdw_pg_mode == SHADOW_PAGING) {
83
84         /*
85         PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
86                    (void *)info->ctrl_regs.cr3,
87                    (void *)info->shdw_pg_state.guest_cr3);
88         */
89
90         if (info->cpu_mode == LONG) {
91             info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
92         } else {
93             info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
94         }
95
96
97         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
98             if (v3_activate_shadow_pt(info) == -1) {
99                 PrintError("Failed to activate 32 bit shadow page table\n");
100                 return -1;
101             }
102         }
103         /*
104         PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
105                    (void *)info->ctrl_regs.cr3,
106                    (void *)info->shdw_pg_state.guest_cr3);
107         */
108     } else if (info->shdw_pg_mode == NESTED_PAGING) {
109         PrintError("Nested paging not available in VMX right now!\n");
110         return -1;
111     }
112
113
114
115     return 0;
116 }
117
118 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
119
120
121     if (info->shdw_pg_mode == SHADOW_PAGING) {
122
123         if ((v3_get_vm_cpu_mode(info) == LONG) ||
124             (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
125
126             *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
127         } else {
128             *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
129         }
130
131     } else {
132         PrintError("Unhandled paging mode\n");
133         return -1;
134     }
135
136
137     return 0;
138 }
139
140 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struct vmx_exit_info * exit_info) {
141     struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
142     struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
143     struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
144     struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
145     uint_t paging_transition = 0;
146
147     /*
148       PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
149       (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
150     */
151
152     if (new_shdw_cr0->pe != shdw_cr0->pe) {
153         /*
154           PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
155           PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
156           PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
157         */
158
159         if (v3_vmxassist_ctx_switch(info) != 0) {
160             PrintError("Unable to execute VMXASSIST context switch!\n");
161             return -1;
162         }
163         
164         if (vmx_info->assist_state == VMXASSIST_ENABLED) {
165             PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip);
166         } else {
167             PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
168                        (void *)(addr_t)info->rip);
169         }
170
171         // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe
172         exit_info->instr_len = 0;
173
174         //      v3_vmx_restore_vmcs(info);
175         //      v3_print_vmcs(info);
176
177     } else {
178
179         if (new_shdw_cr0->pg != shdw_cr0->pg) {
180             paging_transition = 1;
181         }
182         
183         // The shadow always reflects the new value
184         *shdw_cr0 = *new_shdw_cr0;
185         
186         // We don't care about most of the flags, so lets go for it 
187         // and set them to the guest values
188         *guest_cr0 = *shdw_cr0;
189         
190         // Except PG, PE, and NE, which are always set
191         guest_cr0->pe = 1;
192         guest_cr0->pg = 1;
193         guest_cr0->ne = 1;
194         
195         if ((paging_transition)) {
196             // Paging transition
197             
198             if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
199                 struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
200                 
201                 if (guest_efer->lme == 1) {
202                     //     PrintDebug("Enabling long mode\n");
203                     
204                     guest_efer->lma = 1;
205                     guest_efer->lme = 1;
206                     
207                     vmx_info->entry_ctrls.guest_ia32e = 1;
208                 }
209                 
210                 //            PrintDebug("Activating Shadow Page tables\n");
211                 
212                 if (info->shdw_pg_mode == SHADOW_PAGING) {
213                     if (v3_activate_shadow_pt(info) == -1) {
214                         PrintError("Failed to activate shadow page tables\n");
215                         return -1;
216                     }
217                 }
218                 
219             } else {
220
221                 if (info->shdw_pg_mode == SHADOW_PAGING) {
222                     if (v3_activate_passthrough_pt(info) == -1) {
223                         PrintError("Failed to activate passthrough page tables\n");
224                         return -1;
225                     }
226                 } else {
227                     // This is hideous... Let's hope that the 1to1 page table has not been nuked...
228                     info->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
229                 }
230             }
231         }
232     }
233
234     return 0;
235 }
236
237 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
238     v3_reg_t * reg = NULL;
239
240     switch (cr_qual->gpr) {
241         case 0:
242             reg = &(info->vm_regs.rax);
243             break;
244         case 1:
245             reg = &(info->vm_regs.rcx);
246             break;
247         case 2:
248             reg = &(info->vm_regs.rdx);
249             break;
250         case 3:
251             reg = &(info->vm_regs.rbx);
252             break;
253         case 4:
254             reg = &(info->vm_regs.rsp);
255             break;
256         case 5:
257             reg = &(info->vm_regs.rbp);
258             break;
259         case 6:
260             reg = &(info->vm_regs.rsi);
261             break;
262         case 7:
263             reg = &(info->vm_regs.rdi);
264             break;
265         case 8:
266             reg = &(info->vm_regs.r8);
267             break;
268         case 9:
269             reg = &(info->vm_regs.r9);
270             break;
271         case 10:
272             reg = &(info->vm_regs.r10);
273             break;
274         case 11:
275             reg = &(info->vm_regs.r11);
276             break;
277         case 12:
278             reg = &(info->vm_regs.r11);
279             break;
280         case 13:
281             reg = &(info->vm_regs.r13);
282             break;
283         case 14:
284             reg = &(info->vm_regs.r14);
285             break;
286         case 15:
287             reg = &(info->vm_regs.r15);
288             break;
289     }
290
291     return reg;
292 }
293
294