Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


can now boot linux (slowly) with EPT enabled
[palacios.git] / palacios / src / palacios / vmx_ctrl_regs.c
1
2 /*
3  * This file is part of the Palacios Virtual Machine Monitor developed
4  * by the V3VEE Project with funding from the United States National 
5  * Science Foundation and the Department of Energy.  
6  *
7  * The V3VEE Project is a joint project between Northwestern University
8  * and the University of New Mexico.  You can find out more at 
9  * http://www.v3vee.org
10  *
11  * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Andy Gocke <agocke@gmail.com>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmm_ctrl_regs.h>
29
30 #if 0
31 #ifndef CONFIG_DEBUG_VMX
32 #undef PrintDebug
33 #define PrintDebug(fmt, args...)
34 #endif
35 #endif
36
37 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual);
38 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val, struct vmx_exit_info * exit_info);
39 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
40 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
41
42 int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual, struct vmx_exit_info * exit_info) {
43
44     if (cr_qual->access_type < 2) {
45         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
46         
47         if (cr_qual->access_type == 0) {
48
49             if (handle_mov_to_cr0(info, reg, exit_info) != 0) {
50                 PrintError("Could not handle CR0 write\n");
51                 return -1;
52             }
53         } else {
54             // Mov from cr
55             PrintError("Mov From CR0 not handled\n");
56             return -1;
57         }
58
59         return 0;
60     }
61
62     PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual->access_type);
63     return -1;
64 }
65
66 int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
67
68     if (cr_qual->access_type < 2) {
69         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
70
71         if (cr_qual->access_type == 0) {
72             return handle_mov_to_cr3(info, reg);
73         } else {
74             return handle_mov_from_cr3(info, reg);
75         }
76     }
77
78     PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual->access_type);
79     return -1;
80 }
81
82 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
83
84     if (info->shdw_pg_mode == SHADOW_PAGING) {
85
86         /*
87         PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
88                    (void *)info->ctrl_regs.cr3,
89                    (void *)info->shdw_pg_state.guest_cr3);
90         */
91
92         if (info->cpu_mode == LONG) {
93             info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
94         } else {
95             info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
96         }
97
98
99         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
100             if (v3_activate_shadow_pt(info) == -1) {
101                 PrintError("Failed to activate 32 bit shadow page table\n");
102                 return -1;
103             }
104         }
105         /*
106         PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
107                    (void *)info->ctrl_regs.cr3,
108                    (void *)info->shdw_pg_state.guest_cr3);
109         */
110     } else if (info->shdw_pg_mode == NESTED_PAGING) {
111         PrintError("Nested paging not available in VMX right now!\n");
112         return -1;
113     }
114
115
116
117     return 0;
118 }
119
120 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
121
122
123     if (info->shdw_pg_mode == SHADOW_PAGING) {
124
125         if ((v3_get_vm_cpu_mode(info) == LONG) ||
126             (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
127
128             *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
129         } else {
130             *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
131         }
132
133     } else {
134         PrintError("Unhandled paging mode\n");
135         return -1;
136     }
137
138
139     return 0;
140 }
141
142 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struct vmx_exit_info * exit_info) {
143     struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
144     struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
145     struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
146     struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
147     uint_t paging_transition = 0;
148
149     /*
150       PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
151       (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
152     */
153
154     if (new_shdw_cr0->pe != shdw_cr0->pe) {
155         /*
156           PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
157           PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
158           PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
159         */
160
161         if (v3_vmxassist_ctx_switch(info) != 0) {
162             PrintError("Unable to execute VMXASSIST context switch!\n");
163             return -1;
164         }
165         
166         if (vmx_info->assist_state == VMXASSIST_ENABLED) {
167             PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip);
168         } else {
169             PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
170                        (void *)(addr_t)info->rip);
171         }
172
173         // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe
174         exit_info->instr_len = 0;
175
176         //      v3_vmx_restore_vmcs(info);
177         //      v3_print_vmcs(info);
178
179     } else {
180
181         if (new_shdw_cr0->pg != shdw_cr0->pg) {
182             paging_transition = 1;
183         }
184         
185         // The shadow always reflects the new value
186         *shdw_cr0 = *new_shdw_cr0;
187         
188         // We don't care about most of the flags, so lets go for it 
189         // and set them to the guest values
190         *guest_cr0 = *shdw_cr0;
191         
192         // Except PG, PE, and NE, which are always set
193         guest_cr0->pe = 1;
194         guest_cr0->pg = 1;
195         guest_cr0->ne = 1;
196         
197         if ((paging_transition)) {
198             // Paging transition
199             
200             if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
201                 struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
202                 
203                 if (guest_efer->lme == 1) {
204                     //     PrintDebug("Enabling long mode\n");
205                     
206                     guest_efer->lma = 1;
207                     guest_efer->lme = 1;
208                     
209                     vmx_info->entry_ctrls.guest_ia32e = 1;
210                 }
211                 
212                 //            PrintDebug("Activating Shadow Page tables\n");
213                 
214                 if (info->shdw_pg_mode == SHADOW_PAGING) {
215                     if (v3_activate_shadow_pt(info) == -1) {
216                         PrintError("Failed to activate shadow page tables\n");
217                         return -1;
218                     }
219                 }
220                 
221             } else {
222
223                 if (info->shdw_pg_mode == SHADOW_PAGING) {
224                     if (v3_activate_passthrough_pt(info) == -1) {
225                         PrintError("Failed to activate passthrough page tables\n");
226                         return -1;
227                     }
228                 } else {
229                     // This is hideous... Let's hope that the 1to1 page table has not been nuked...
230                     info->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
231                 }
232             }
233         }
234     }
235
236     return 0;
237 }
238
239 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
240     v3_reg_t * reg = NULL;
241
242     switch (cr_qual->gpr) {
243         case 0:
244             reg = &(info->vm_regs.rax);
245             break;
246         case 1:
247             reg = &(info->vm_regs.rcx);
248             break;
249         case 2:
250             reg = &(info->vm_regs.rdx);
251             break;
252         case 3:
253             reg = &(info->vm_regs.rbx);
254             break;
255         case 4:
256             reg = &(info->vm_regs.rsp);
257             break;
258         case 5:
259             reg = &(info->vm_regs.rbp);
260             break;
261         case 6:
262             reg = &(info->vm_regs.rsi);
263             break;
264         case 7:
265             reg = &(info->vm_regs.rdi);
266             break;
267         case 8:
268             reg = &(info->vm_regs.r8);
269             break;
270         case 9:
271             reg = &(info->vm_regs.r9);
272             break;
273         case 10:
274             reg = &(info->vm_regs.r10);
275             break;
276         case 11:
277             reg = &(info->vm_regs.r11);
278             break;
279         case 12:
280             reg = &(info->vm_regs.r11);
281             break;
282         case 13:
283             reg = &(info->vm_regs.r13);
284             break;
285         case 14:
286             reg = &(info->vm_regs.r14);
287             break;
288         case 15:
289             reg = &(info->vm_regs.r15);
290             break;
291     }
292
293     return reg;
294 }
295
296