Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


VMX is working for a 32-bit Linux kernel. It should also work for a 64-bit kernel...
[palacios.git] / palacios / src / palacios / vmx_ctrl_regs.c
1
2 /*
3  * This file is part of the Palacios Virtual Machine Monitor developed
4  * by the V3VEE Project with funding from the United States National 
5  * Science Foundation and the Department of Energy.  
6  *
7  * The V3VEE Project is a joint project between Northwestern University
8  * and the University of New Mexico.  You can find out more at 
9  * http://www.v3vee.org
10  *
11  * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Andy Gocke <agocke@gmail.com>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmx_handler.h>
29
30 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual cr_qual);
31 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val);
32 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
33 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
34
35 int v3_vmx_handle_cr0_access(struct guest_info * info) {
36     struct vmx_exit_cr_qual cr_qual;
37     
38     vmcs_read(VMCS_EXIT_QUAL, &(cr_qual.value));
39
40     if (cr_qual.access_type < 2) {
41         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
42         
43         if (cr_qual.access_type == 0) {
44
45             if (handle_mov_to_cr0(info, reg) != 0) {
46                 PrintError("Could not handle CR0 write\n");
47                 return -1;
48             }
49         } else {
50             // Mov from cr
51             PrintError("Mov From CR0 not handled\n");
52             return -1;
53         }
54
55         return 0;
56     }
57
58     PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual.access_type);
59     return -1;
60 }
61
62 int v3_vmx_handle_cr3_access(struct guest_info * info) {
63     struct vmx_exit_cr_qual cr_qual;
64
65     vmcs_read(VMCS_EXIT_QUAL, &(cr_qual.value));
66
67     if (cr_qual.access_type < 2) {
68         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
69
70         if (cr_qual.access_type == 0) {
71             return handle_mov_to_cr3(info, reg);
72         } else {
73             return handle_mov_from_cr3(info, reg);
74         }
75     }
76
77     PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual.access_type);
78     return -1;
79 }
80
81 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
82     int instr_len = 0;
83
84     if (info->shdw_pg_mode == SHADOW_PAGING) {
85
86         PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
87                    (void *)info->ctrl_regs.cr3,
88                    (void *)info->shdw_pg_state.guest_cr3);
89
90         if (info->cpu_mode == LONG) {
91             info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
92         } else {
93             info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
94         }
95
96
97         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
98             if (v3_activate_shadow_pt(info) == -1) {
99                 PrintError("Failed to activate 32 bit shadow page table\n");
100                 return -1;
101             }
102         }
103
104         PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
105                    (void *)info->ctrl_regs.cr3,
106                    (void *)info->shdw_pg_state.guest_cr3);
107
108     } else if (info->shdw_pg_mode == NESTED_PAGING) {
109         PrintError("Nested paging not available in VMX right now!\n");
110         return -1;
111     }
112
113
114     vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
115     info->rip += instr_len;
116
117     return 0;
118 }
119
120 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
121     int instr_len = 0;
122
123     if (info->shdw_pg_mode == SHADOW_PAGING) {
124
125         if ((v3_get_vm_cpu_mode(info) == LONG) ||
126             (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
127
128             *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
129         } else {
130             *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
131         }
132
133     } else {
134         PrintError("Unhandled paging mode\n");
135         return -1;
136     }
137
138
139     vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
140     info->rip += instr_len;
141
142     return 0;
143 }
144
145 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0) {
146     struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
147     struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
148     struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
149     struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
150     uint_t paging_transition = 0;
151     int instr_len = 0;
152
153     PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
154                (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
155
156
157     if (new_shdw_cr0->pe != shdw_cr0->pe) {
158         PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
159         PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
160         PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
161
162         if (v3_vmxassist_ctx_switch(info) != 0) {
163             PrintError("Unable to execute VMXASSIST context switch!\n");
164             return -1;
165         }
166
167         v3_load_vmcs_guest_state(info);
168
169         if (vmx_info->state == VMXASSIST_ENABLED) {
170             PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)info->rip);
171         } else {
172             PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
173                        (void *)info->rip);
174         }
175
176         // vmx assist sets the new cr values itself
177         return 0;
178     }
179
180     if (new_shdw_cr0->pg != shdw_cr0->pg) {
181         paging_transition = 1;
182     }
183  
184     // The shadow always reflects the new value
185     *shdw_cr0 = *new_shdw_cr0;
186
187     // We don't care about most of the flags, so lets go for it 
188     // and set them to the guest values
189     *guest_cr0 = *shdw_cr0;
190
191     // Except PG, PE, and NE, which are always set
192     guest_cr0->pe = 1;
193     guest_cr0->pg = 1;
194     guest_cr0->ne = 1;
195
196     if (paging_transition) {
197         // Paging transition
198
199         if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
200             struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
201
202             if (guest_efer->lme == 1) {
203                 PrintDebug("Enabling long mode\n");
204
205                 guest_efer->lma = 1;
206                 guest_efer->lme = 1;
207
208                 vmx_info->entry_ctrls.guest_ia32e = 1;
209             }
210
211             PrintDebug("Activating Shadow Page tables\n");
212
213             if (v3_activate_shadow_pt(info) == -1) {
214                 PrintError("Failed to activate shadow page tables\n");
215                 return -1;
216             }
217
218         } else if (v3_activate_passthrough_pt(info) == -1) {
219             PrintError("Failed to activate passthrough page tables\n");
220             return -1;
221         }
222     }
223    
224     // PE loads its own RIP, otherwise we need to skip ahead an instruction
225
226     vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
227     info->rip += instr_len;
228    
229     return 0;
230 }
231
232 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual cr_qual) {
233     v3_reg_t * reg = NULL;
234
235     switch (cr_qual.gpr) {
236         case 0:
237             reg = &(info->vm_regs.rax);
238             break;
239         case 1:
240             reg = &(info->vm_regs.rcx);
241             break;
242         case 2:
243             reg = &(info->vm_regs.rdx);
244             break;
245         case 3:
246             reg = &(info->vm_regs.rbx);
247             break;
248         case 4:
249             reg = &(info->vm_regs.rsp);
250             break;
251         case 5:
252             reg = &(info->vm_regs.rbp);
253             break;
254         case 6:
255             reg = &(info->vm_regs.rsi);
256             break;
257         case 7:
258             reg = &(info->vm_regs.rdi);
259             break;
260         case 8:
261             reg = &(info->vm_regs.r8);
262             break;
263         case 9:
264             reg = &(info->vm_regs.r9);
265             break;
266         case 10:
267             reg = &(info->vm_regs.r10);
268             break;
269         case 11:
270             reg = &(info->vm_regs.r11);
271             break;
272         case 12:
273             reg = &(info->vm_regs.r11);
274             break;
275         case 13:
276             reg = &(info->vm_regs.r13);
277             break;
278         case 14:
279             reg = &(info->vm_regs.r14);
280             break;
281         case 15:
282             reg = &(info->vm_regs.r15);
283             break;
284     }
285
286     return reg;
287 }
288
289