Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


685de90e0341ae1853610748139c7116b6dfa6b4
[palacios.git] / palacios / src / palacios / vmx_ctrl_regs.c
1
2 /*
3  * This file is part of the Palacios Virtual Machine Monitor developed
4  * by the V3VEE Project with funding from the United States National 
5  * Science Foundation and the Department of Energy.  
6  *
7  * The V3VEE Project is a joint project between Northwestern University
8  * and the University of New Mexico.  You can find out more at 
9  * http://www.v3vee.org
10  *
11  * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Andy Gocke <agocke@gmail.com>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmm_ctrl_regs.h>
29
30 #ifndef CONFIG_DEBUG_VMX
31 #undef PrintDebug
32 #define PrintDebug(fmt, args...)
33 #endif
34
35 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual);
36 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val, struct vmx_exit_info * exit_info);
37 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
38 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
39
40 int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual, struct vmx_exit_info * exit_info) {
41
42     if (cr_qual->access_type < 2) {
43         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
44         
45         if (cr_qual->access_type == 0) {
46
47             if (handle_mov_to_cr0(info, reg, exit_info) != 0) {
48                 PrintError("Could not handle CR0 write\n");
49                 return -1;
50             }
51         } else {
52             // Mov from cr
53             PrintError("Mov From CR0 not handled\n");
54             return -1;
55         }
56
57         return 0;
58     }
59
60     PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual->access_type);
61     return -1;
62 }
63
64 int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
65
66     if (cr_qual->access_type < 2) {
67         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
68
69         if (cr_qual->access_type == 0) {
70             return handle_mov_to_cr3(info, reg);
71         } else {
72             return handle_mov_from_cr3(info, reg);
73         }
74     }
75
76     PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual->access_type);
77     return -1;
78 }
79
80 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
81
82     if (info->shdw_pg_mode == SHADOW_PAGING) {
83
84         /*
85         PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
86                    (void *)info->ctrl_regs.cr3,
87                    (void *)info->shdw_pg_state.guest_cr3);
88         */
89
90         if (info->cpu_mode == LONG) {
91             info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
92         } else {
93             info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
94         }
95
96
97         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
98             if (v3_activate_shadow_pt(info) == -1) {
99                 PrintError("Failed to activate 32 bit shadow page table\n");
100                 return -1;
101             }
102         }
103         /*
104         PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
105                    (void *)info->ctrl_regs.cr3,
106                    (void *)info->shdw_pg_state.guest_cr3);
107         */
108     } else if (info->shdw_pg_mode == NESTED_PAGING) {
109         PrintError("Nested paging not available in VMX right now!\n");
110         return -1;
111     }
112
113
114
115     return 0;
116 }
117
118 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
119
120
121     if (info->shdw_pg_mode == SHADOW_PAGING) {
122
123         if ((v3_get_vm_cpu_mode(info) == LONG) ||
124             (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
125
126             *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
127         } else {
128             *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
129         }
130
131     } else {
132         PrintError("Unhandled paging mode\n");
133         return -1;
134     }
135
136
137     return 0;
138 }
139
140 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struct vmx_exit_info * exit_info) {
141     struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
142     struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
143     struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
144     struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
145     uint_t paging_transition = 0;
146
147     /*
148       PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
149       (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
150     */
151
152     if (new_shdw_cr0->pe != shdw_cr0->pe) {
153         /*
154           PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
155           PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
156           PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
157         */
158
159         if (v3_vmxassist_ctx_switch(info) != 0) {
160             PrintError("Unable to execute VMXASSIST context switch!\n");
161             return -1;
162         }
163         
164         if (vmx_info->assist_state == VMXASSIST_ENABLED) {
165             PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip);
166         } else {
167             PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
168                        (void *)(addr_t)info->rip);
169         }
170
171         // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe
172         exit_info->instr_len = 0;
173
174         //      v3_vmx_restore_vmcs(info);
175         //      v3_print_vmcs(info);
176
177     } else {
178
179         if (new_shdw_cr0->pg != shdw_cr0->pg) {
180             paging_transition = 1;
181         }
182         
183         // The shadow always reflects the new value
184         *shdw_cr0 = *new_shdw_cr0;
185         
186         // We don't care about most of the flags, so lets go for it 
187         // and set them to the guest values
188         *guest_cr0 = *shdw_cr0;
189         
190         // Except PG, PE, and NE, which are always set
191         guest_cr0->pe = 1;
192         guest_cr0->pg = 1;
193         guest_cr0->ne = 1;
194         
195         if (paging_transition) {
196             // Paging transition
197             
198             if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
199                 struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
200                 
201                 if (guest_efer->lme == 1) {
202                     //     PrintDebug("Enabling long mode\n");
203                     
204                     guest_efer->lma = 1;
205                     guest_efer->lme = 1;
206                     
207                     vmx_info->entry_ctrls.guest_ia32e = 1;
208                 }
209                 
210                 //            PrintDebug("Activating Shadow Page tables\n");
211                 
212                 if (v3_activate_shadow_pt(info) == -1) {
213                     PrintError("Failed to activate shadow page tables\n");
214                     return -1;
215                 }
216                 
217             } else if (v3_activate_passthrough_pt(info) == -1) {
218                 PrintError("Failed to activate passthrough page tables\n");
219                 return -1;
220             }
221         }
222     }
223
224     return 0;
225 }
226
227 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
228     v3_reg_t * reg = NULL;
229
230     switch (cr_qual->gpr) {
231         case 0:
232             reg = &(info->vm_regs.rax);
233             break;
234         case 1:
235             reg = &(info->vm_regs.rcx);
236             break;
237         case 2:
238             reg = &(info->vm_regs.rdx);
239             break;
240         case 3:
241             reg = &(info->vm_regs.rbx);
242             break;
243         case 4:
244             reg = &(info->vm_regs.rsp);
245             break;
246         case 5:
247             reg = &(info->vm_regs.rbp);
248             break;
249         case 6:
250             reg = &(info->vm_regs.rsi);
251             break;
252         case 7:
253             reg = &(info->vm_regs.rdi);
254             break;
255         case 8:
256             reg = &(info->vm_regs.r8);
257             break;
258         case 9:
259             reg = &(info->vm_regs.r9);
260             break;
261         case 10:
262             reg = &(info->vm_regs.r10);
263             break;
264         case 11:
265             reg = &(info->vm_regs.r11);
266             break;
267         case 12:
268             reg = &(info->vm_regs.r11);
269             break;
270         case 13:
271             reg = &(info->vm_regs.r13);
272             break;
273         case 14:
274             reg = &(info->vm_regs.r14);
275             break;
276         case 15:
277             reg = &(info->vm_regs.r15);
278             break;
279     }
280
281     return reg;
282 }
283
284