Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


00fd7e9fcddddf247369200b6bf77227f24db5c3
[palacios.git] / palacios / src / palacios / vmx_ctrl_regs.c
1
2 /*
3  * This file is part of the Palacios Virtual Machine Monitor developed
4  * by the V3VEE Project with funding from the United States National 
5  * Science Foundation and the Department of Energy.  
6  *
7  * The V3VEE Project is a joint project between Northwestern University
8  * and the University of New Mexico.  You can find out more at 
9  * http://www.v3vee.org
10  *
11  * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Andy Gocke <agocke@gmail.com>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmx_handler.h>
29
30 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual cr_qual);
31 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val);
32 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
33 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
34
35 int v3_vmx_handle_cr0_access(struct guest_info * info) {
36     struct vmx_exit_cr_qual cr_qual;
37     
38     vmcs_read(VMCS_EXIT_QUAL, &(cr_qual.value));
39
40     if (cr_qual.access_type < 2) {
41         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
42         
43         if (cr_qual.access_type == 0) {
44
45             if (handle_mov_to_cr0(info, reg) != 0) {
46                 PrintError("Could not handle CR0 write\n");
47                 return -1;
48             }
49         } else {
50             // Mov from cr
51             PrintError("Mov From CR0 not handled\n");
52             return -1;
53         }
54
55         return 0;
56     }
57
58     PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual.access_type);
59     return -1;
60 }
61
62 int v3_vmx_handle_cr3_access(struct guest_info * info) {
63     struct vmx_exit_cr_qual cr_qual;
64
65     vmcs_read(VMCS_EXIT_QUAL, &(cr_qual.value));
66
67     if (cr_qual.access_type < 2) {
68         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
69
70         if (cr_qual.access_type == 0) {
71             return handle_mov_to_cr3(info, reg);
72         } else {
73             return handle_mov_from_cr3(info, reg);
74         }
75     }
76
77     PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual.access_type);
78     return -1;
79 }
80
81 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
82     int instr_len = 0;
83
84     if (info->shdw_pg_mode == SHADOW_PAGING) {
85
86         /*
87         PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
88                    (void *)info->ctrl_regs.cr3,
89                    (void *)info->shdw_pg_state.guest_cr3);
90         */
91         if (info->cpu_mode == LONG) {
92             info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
93         } else {
94             info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
95         }
96
97
98         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
99             if (v3_activate_shadow_pt(info) == -1) {
100                 PrintError("Failed to activate 32 bit shadow page table\n");
101                 return -1;
102             }
103         }
104         /*
105         PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
106                    (void *)info->ctrl_regs.cr3,
107                    (void *)info->shdw_pg_state.guest_cr3);
108         */
109     } else if (info->shdw_pg_mode == NESTED_PAGING) {
110         PrintError("Nested paging not available in VMX right now!\n");
111         return -1;
112     }
113
114
115     vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
116     info->rip += instr_len;
117
118     return 0;
119 }
120
121 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
122     int instr_len = 0;
123
124     if (info->shdw_pg_mode == SHADOW_PAGING) {
125
126         if ((v3_get_vm_cpu_mode(info) == LONG) ||
127             (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
128
129             *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
130         } else {
131             *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
132         }
133
134     } else {
135         PrintError("Unhandled paging mode\n");
136         return -1;
137     }
138
139
140     vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
141     info->rip += instr_len;
142
143     return 0;
144 }
145
146 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0) {
147     struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
148     struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
149     struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
150     struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
151     uint_t paging_transition = 0;
152     int instr_len = 0;
153
154     /*
155     PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
156                (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
157     */
158
159     if (new_shdw_cr0->pe != shdw_cr0->pe) {
160         /*
161         PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
162         PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
163         PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
164         */
165         if (v3_vmxassist_ctx_switch(info) != 0) {
166             PrintError("Unable to execute VMXASSIST context switch!\n");
167             return -1;
168         }
169
170         v3_load_vmcs_guest_state(info);
171
172         if (vmx_info->state == VMXASSIST_ENABLED) {
173             PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)info->rip);
174         } else {
175             PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
176                        (void *)info->rip);
177         }
178
179         // vmx assist sets the new cr values itself
180         return 0;
181     }
182
183     if (new_shdw_cr0->pg != shdw_cr0->pg) {
184         paging_transition = 1;
185     }
186  
187     // The shadow always reflects the new value
188     *shdw_cr0 = *new_shdw_cr0;
189
190     // We don't care about most of the flags, so lets go for it 
191     // and set them to the guest values
192     *guest_cr0 = *shdw_cr0;
193
194     // Except PG, PE, and NE, which are always set
195     guest_cr0->pe = 1;
196     guest_cr0->pg = 1;
197     guest_cr0->ne = 1;
198
199     if (paging_transition) {
200         // Paging transition
201
202         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
203             struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
204
205             if (guest_efer->lme == 1) {
206                 //     PrintDebug("Enabling long mode\n");
207
208                 guest_efer->lma = 1;
209                 guest_efer->lme = 1;
210
211                 vmx_info->entry_ctrls.guest_ia32e = 1;
212             }
213
214             //            PrintDebug("Activating Shadow Page tables\n");
215
216             if (v3_activate_shadow_pt(info) == -1) {
217                 PrintError("Failed to activate shadow page tables\n");
218                 return -1;
219             }
220
221         } else if (v3_activate_passthrough_pt(info) == -1) {
222             PrintError("Failed to activate passthrough page tables\n");
223             return -1;
224         }
225     }
226    
227     // PE loads its own RIP, otherwise we need to skip ahead an instruction
228
229     vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
230     info->rip += instr_len;
231    
232     return 0;
233 }
234
235 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual cr_qual) {
236     v3_reg_t * reg = NULL;
237
238     switch (cr_qual.gpr) {
239         case 0:
240             reg = &(info->vm_regs.rax);
241             break;
242         case 1:
243             reg = &(info->vm_regs.rcx);
244             break;
245         case 2:
246             reg = &(info->vm_regs.rdx);
247             break;
248         case 3:
249             reg = &(info->vm_regs.rbx);
250             break;
251         case 4:
252             reg = &(info->vm_regs.rsp);
253             break;
254         case 5:
255             reg = &(info->vm_regs.rbp);
256             break;
257         case 6:
258             reg = &(info->vm_regs.rsi);
259             break;
260         case 7:
261             reg = &(info->vm_regs.rdi);
262             break;
263         case 8:
264             reg = &(info->vm_regs.r8);
265             break;
266         case 9:
267             reg = &(info->vm_regs.r9);
268             break;
269         case 10:
270             reg = &(info->vm_regs.r10);
271             break;
272         case 11:
273             reg = &(info->vm_regs.r11);
274             break;
275         case 12:
276             reg = &(info->vm_regs.r11);
277             break;
278         case 13:
279             reg = &(info->vm_regs.r13);
280             break;
281         case 14:
282             reg = &(info->vm_regs.r14);
283             break;
284         case 15:
285             reg = &(info->vm_regs.r15);
286             break;
287     }
288
289     return reg;
290 }
291
292