Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


c7bf6e3a6269906213e0e1c5ca0b2720f71bb836
[palacios.git] / palacios / src / palacios / vmx_ctrl_regs.c
1
2 /*
3  * This file is part of the Palacios Virtual Machine Monitor developed
4  * by the V3VEE Project with funding from the United States National 
5  * Science Foundation and the Department of Energy.  
6  *
7  * The V3VEE Project is a joint project between Northwestern University
8  * and the University of New Mexico.  You can find out more at 
9  * http://www.v3vee.org
10  *
11  * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Andy Gocke <agocke@gmail.com>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmm_ctrl_regs.h>
29
30 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual);
31 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val, struct vmx_exit_info * exit_info);
32 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
33 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
34
35 int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual, struct vmx_exit_info * exit_info) {
36
37     if (cr_qual->access_type < 2) {
38         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
39         
40         if (cr_qual->access_type == 0) {
41
42             if (handle_mov_to_cr0(info, reg, exit_info) != 0) {
43                 PrintError("Could not handle CR0 write\n");
44                 return -1;
45             }
46         } else {
47             // Mov from cr
48             PrintError("Mov From CR0 not handled\n");
49             return -1;
50         }
51
52         return 0;
53     }
54
55     PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual->access_type);
56     return -1;
57 }
58
59 int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
60
61     if (cr_qual->access_type < 2) {
62         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
63
64         if (cr_qual->access_type == 0) {
65             return handle_mov_to_cr3(info, reg);
66         } else {
67             return handle_mov_from_cr3(info, reg);
68         }
69     }
70
71     PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual->access_type);
72     return -1;
73 }
74
75 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
76
77     if (info->shdw_pg_mode == SHADOW_PAGING) {
78
79         /*
80         PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
81                    (void *)info->ctrl_regs.cr3,
82                    (void *)info->shdw_pg_state.guest_cr3);
83         */
84
85         if (info->cpu_mode == LONG) {
86             info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
87         } else {
88             info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
89         }
90
91
92         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
93             if (v3_activate_shadow_pt(info) == -1) {
94                 PrintError("Failed to activate 32 bit shadow page table\n");
95                 return -1;
96             }
97         }
98         /*
99         PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
100                    (void *)info->ctrl_regs.cr3,
101                    (void *)info->shdw_pg_state.guest_cr3);
102         */
103     } else if (info->shdw_pg_mode == NESTED_PAGING) {
104         PrintError("Nested paging not available in VMX right now!\n");
105         return -1;
106     }
107
108
109
110     return 0;
111 }
112
113 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
114
115
116     if (info->shdw_pg_mode == SHADOW_PAGING) {
117
118         if ((v3_get_vm_cpu_mode(info) == LONG) ||
119             (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
120
121             *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
122         } else {
123             *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
124         }
125
126     } else {
127         PrintError("Unhandled paging mode\n");
128         return -1;
129     }
130
131
132     return 0;
133 }
134
135 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struct vmx_exit_info * exit_info) {
136     struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
137     struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
138     struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
139     struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
140     uint_t paging_transition = 0;
141
142     /*
143       PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
144       (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
145     */
146
147     if (new_shdw_cr0->pe != shdw_cr0->pe) {
148         /*
149           PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
150           PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
151           PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
152         */
153
154         if (v3_vmxassist_ctx_switch(info) != 0) {
155             PrintError("Unable to execute VMXASSIST context switch!\n");
156             return -1;
157         }
158         
159         if (vmx_info->state == VMXASSIST_ENABLED) {
160             PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)info->rip);
161         } else {
162             PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
163                        (void *)info->rip);
164         }
165
166         // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe
167         exit_info->instr_len = 0;
168
169         //      v3_vmx_restore_vmcs(info);
170         //      v3_print_vmcs(info);
171
172     } else {
173
174         if (new_shdw_cr0->pg != shdw_cr0->pg) {
175             paging_transition = 1;
176         }
177         
178         // The shadow always reflects the new value
179         *shdw_cr0 = *new_shdw_cr0;
180         
181         // We don't care about most of the flags, so lets go for it 
182         // and set them to the guest values
183         *guest_cr0 = *shdw_cr0;
184         
185         // Except PG, PE, and NE, which are always set
186         guest_cr0->pe = 1;
187         guest_cr0->pg = 1;
188         guest_cr0->ne = 1;
189         
190         if (paging_transition) {
191             // Paging transition
192             
193             if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
194                 struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
195                 
196                 if (guest_efer->lme == 1) {
197                     //     PrintDebug("Enabling long mode\n");
198                     
199                     guest_efer->lma = 1;
200                     guest_efer->lme = 1;
201                     
202                     vmx_info->entry_ctrls.guest_ia32e = 1;
203                 }
204                 
205                 //            PrintDebug("Activating Shadow Page tables\n");
206                 
207                 if (v3_activate_shadow_pt(info) == -1) {
208                     PrintError("Failed to activate shadow page tables\n");
209                     return -1;
210                 }
211                 
212             } else if (v3_activate_passthrough_pt(info) == -1) {
213                 PrintError("Failed to activate passthrough page tables\n");
214                 return -1;
215             }
216         }
217     }
218
219     return 0;
220 }
221
222 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
223     v3_reg_t * reg = NULL;
224
225     switch (cr_qual->gpr) {
226         case 0:
227             reg = &(info->vm_regs.rax);
228             break;
229         case 1:
230             reg = &(info->vm_regs.rcx);
231             break;
232         case 2:
233             reg = &(info->vm_regs.rdx);
234             break;
235         case 3:
236             reg = &(info->vm_regs.rbx);
237             break;
238         case 4:
239             reg = &(info->vm_regs.rsp);
240             break;
241         case 5:
242             reg = &(info->vm_regs.rbp);
243             break;
244         case 6:
245             reg = &(info->vm_regs.rsi);
246             break;
247         case 7:
248             reg = &(info->vm_regs.rdi);
249             break;
250         case 8:
251             reg = &(info->vm_regs.r8);
252             break;
253         case 9:
254             reg = &(info->vm_regs.r9);
255             break;
256         case 10:
257             reg = &(info->vm_regs.r10);
258             break;
259         case 11:
260             reg = &(info->vm_regs.r11);
261             break;
262         case 12:
263             reg = &(info->vm_regs.r11);
264             break;
265         case 13:
266             reg = &(info->vm_regs.r13);
267             break;
268         case 14:
269             reg = &(info->vm_regs.r14);
270             break;
271         case 15:
272             reg = &(info->vm_regs.r15);
273             break;
274     }
275
276     return reg;
277 }
278
279