Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Succesful transition to vmxassist, then to the bios, where it dies in keyboard init.
[palacios.git] / palacios / src / palacios / vmx_handler.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmx_handler.h>
21 #include <palacios/vmm_types.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmcs.h>
24 #include <palacios/vmx_lowlevel.h>
25 #include <palacios/vmx_io.h>
26 #include <palacios/vmx.h>
27 #include <palacios/vmm_ctrl_regs.h>
28
29
30 static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
31 {
32     int ret = 0;
33     ret = vmcs_write(field,val);
34
35     if (ret != VMX_SUCCESS) {
36         PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
37         return 1;
38     }
39
40     return 0;
41 }
42
43 static int inline check_vmcs_read(vmcs_field_t field, void * val)
44 {
45     int ret = 0;
46     ret = vmcs_read(field,val);
47
48     if(ret != VMX_SUCCESS) {
49         PrintError("VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
50         return ret;
51     }
52
53     return 0;
54 }
55
56 static void inline translate_access_to_v3_seg(struct vmcs_segment_access * access, 
57         struct v3_segment * v3_seg)
58 {
59     v3_seg->type = access->type;
60     v3_seg->system = access->desc_type;
61     v3_seg->dpl = access->dpl;
62     v3_seg->present = access->present;
63     v3_seg->avail = access->avail;
64     v3_seg->long_mode = access->long_mode;
65     v3_seg->db = access->db;
66     v3_seg->granularity = access->granularity;
67 }
68
69 static void load_vmcs_guest_state(struct guest_info * info)
70 {
71     check_vmcs_read(VMCS_GUEST_RIP, &(info->rip));
72     check_vmcs_read(VMCS_GUEST_RSP, &(info->vm_regs.rsp));
73     check_vmcs_read(VMCS_GUEST_CR0, &(info->ctrl_regs.cr0));
74     check_vmcs_read(VMCS_GUEST_CR3, &(info->ctrl_regs.cr3));
75     check_vmcs_read(VMCS_GUEST_CR4, &(info->ctrl_regs.cr4));
76
77     struct vmcs_segment_access access;
78
79     memset(&access, 0, sizeof(access));
80
81     /* CS Segment */
82     check_vmcs_read(VMCS_GUEST_CS_BASE, &(info->segments.cs.base));
83     check_vmcs_read(VMCS_GUEST_CS_SELECTOR, &(info->segments.cs.selector));
84     check_vmcs_read(VMCS_GUEST_CS_LIMIT, &(info->segments.cs.limit));
85     check_vmcs_read(VMCS_GUEST_CS_ACCESS, &(access.value));
86
87     translate_access_to_v3_seg(&access, &(info->segments.cs));
88
89     /* SS Segment */
90     check_vmcs_read(VMCS_GUEST_SS_BASE, &(info->segments.ss.base));
91     check_vmcs_read(VMCS_GUEST_SS_SELECTOR, &(info->segments.ss.selector));
92     check_vmcs_read(VMCS_GUEST_SS_LIMIT, &(info->segments.ss.limit));
93     check_vmcs_read(VMCS_GUEST_SS_ACCESS, &(access.value));
94
95     translate_access_to_v3_seg(&access, &(info->segments.ss));
96
97     /* DS Segment */
98     check_vmcs_read(VMCS_GUEST_DS_BASE, &(info->segments.ds.base));
99     check_vmcs_read(VMCS_GUEST_DS_SELECTOR, &(info->segments.ds.selector));
100     check_vmcs_read(VMCS_GUEST_DS_LIMIT, &(info->segments.ds.limit));
101     check_vmcs_read(VMCS_GUEST_DS_ACCESS, &(access.value));
102
103     translate_access_to_v3_seg(&access, &(info->segments.ds));
104
105     /* ES Segment */
106     check_vmcs_read(VMCS_GUEST_ES_BASE, &(info->segments.es.base));
107     check_vmcs_read(VMCS_GUEST_ES_SELECTOR, &(info->segments.es.selector));
108     check_vmcs_read(VMCS_GUEST_ES_LIMIT, &(info->segments.es.limit));
109     check_vmcs_read(VMCS_GUEST_ES_ACCESS, &(access.value));
110
111     translate_access_to_v3_seg(&access, &(info->segments.es));
112
113     /* FS Segment */
114     check_vmcs_read(VMCS_GUEST_FS_BASE, &(info->segments.fs.base));
115     check_vmcs_read(VMCS_GUEST_FS_SELECTOR, &(info->segments.fs.selector));
116     check_vmcs_read(VMCS_GUEST_FS_LIMIT, &(info->segments.fs.limit));
117     check_vmcs_read(VMCS_GUEST_FS_ACCESS, &(access.value));
118
119     translate_access_to_v3_seg(&access, &(info->segments.fs));
120
121
122     /* GS Segment */
123     check_vmcs_read(VMCS_GUEST_GS_BASE, &(info->segments.gs.base));
124     check_vmcs_read(VMCS_GUEST_GS_SELECTOR, &(info->segments.gs.selector));
125     check_vmcs_read(VMCS_GUEST_GS_LIMIT, &(info->segments.gs.limit));
126     check_vmcs_read(VMCS_GUEST_GS_ACCESS, &(access.value));
127
128     translate_access_to_v3_seg(&access, &(info->segments.gs));
129
130     /* LDTR Segment */
131     check_vmcs_read(VMCS_GUEST_LDTR_BASE, &(info->segments.ldtr.base));
132     check_vmcs_read(VMCS_GUEST_LDTR_SELECTOR, &(info->segments.ldtr.selector));
133     check_vmcs_read(VMCS_GUEST_LDTR_LIMIT, &(info->segments.ldtr.limit));
134     check_vmcs_read(VMCS_GUEST_LDTR_ACCESS, &(access.value));
135
136     translate_access_to_v3_seg(&access, &(info->segments.ldtr));
137
138     /* TR Segment */
139     check_vmcs_read(VMCS_GUEST_TR_BASE, &(info->segments.tr.base));
140     check_vmcs_read(VMCS_GUEST_TR_SELECTOR, &(info->segments.tr.selector));
141     check_vmcs_read(VMCS_GUEST_TR_LIMIT, &(info->segments.tr.limit));
142     check_vmcs_read(VMCS_GUEST_TR_ACCESS, &(access.value));
143
144     translate_access_to_v3_seg(&access, &(info->segments.tr));
145
146     /* GDTR Segment */
147     check_vmcs_read(VMCS_GUEST_GDTR_BASE, &(info->segments.gdtr.base));
148     check_vmcs_read(VMCS_GUEST_GDTR_LIMIT, &(info->segments.gdtr.limit));
149     
150     /* IDTR Segment */
151     check_vmcs_read(VMCS_GUEST_IDTR_BASE, &(info->segments.idtr.base));
152     check_vmcs_read(VMCS_GUEST_IDTR_LIMIT, &(info->segments.idtr.limit));
153 }
154
155
156 static void setup_v8086_mode_for_boot(struct guest_info * info)
157 {
158
159     ((struct vmx_data *)info->vmm_data)->state = VMXASSIST_V8086_BIOS;
160     struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
161     flags->rsvd1 = 1;
162     flags->vm = 1;
163     flags->iopl = 3;
164
165     info->rip = 0xfff0;
166     //info->vm_regs.rsp = 0x0;
167    
168     /* Zero the segment registers */
169     memset(&(info->segments), 0, sizeof(struct v3_segment)*6);
170
171
172     info->segments.cs.selector = 0xf000;
173     info->segments.cs.base = 0xf000 << 4;
174     info->segments.cs.limit = 0xffff;
175     info->segments.cs.type = 3;
176     info->segments.cs.system = 1;
177     info->segments.cs.dpl = 3;
178     info->segments.cs.present = 1;
179     info->segments.cs.granularity = 0;
180
181     int i;
182     
183     /* Set values for selectors ds through ss */
184     struct v3_segment * seg_ptr = (struct v3_segment *)&(info->segments);
185     for(i = 1; i < 6 ; i++) {
186         seg_ptr[i].selector = 0x0000;
187         seg_ptr[i].base = 0x00000;
188         seg_ptr[i].limit = 0xffff;
189         seg_ptr[i].type = 3;
190         seg_ptr[i].system = 1;
191         seg_ptr[i].dpl = 3;
192         seg_ptr[i].present = 1;
193         seg_ptr[i].granularity = 0;
194     }
195
196     PrintDebug("END INFO!\n");
197 #if 0
198     for(i = 6; i < 10; i++) {
199         seg_ptr[i].base = 0x0;
200         seg_ptr[i].limit = 0xffff;
201     }
202
203     info->segments.ldtr.type = 2;
204     info->segments.ldtr.system = 0;
205     info->segments.ldtr.present = 1;
206     info->segments.ldtr.granularity = 0;
207
208     info->segments.tr.type = 3;
209     info->segments.tr.system = 0;
210     info->segments.tr.present = 1;
211     info->segments.tr.granularity = 0;
212 #endif
213 }
214
215 static int inline handle_cr_access(struct guest_info * info, ulong_t exit_qual)
216 {
217     struct vmexit_cr_qual * cr_qual = (struct vmexit_cr_qual *)&exit_qual;
218
219     if(cr_qual->access_type < 2) {
220         ulong_t reg = 0;
221         switch(cr_qual->gpr) {
222             case 0:
223                 reg = info->vm_regs.rax;
224                 break;
225             case 1:
226                 reg = info->vm_regs.rcx;
227                 break;
228             case 2:
229                 reg = info->vm_regs.rdx;
230                 break;
231             case 3:
232                 reg = info->vm_regs.rbx;
233                 break;
234             case 4:
235                 reg = info->vm_regs.rsp;
236                 break;
237             case 5:
238                 reg = info->vm_regs.rbp;
239                 break;
240             case 6:
241                 reg = info->vm_regs.rsi;
242                 break;
243             case 7:
244                 reg = info->vm_regs.rdi;
245                 break;
246             case 8:
247                 reg = info->vm_regs.r8;
248                 break;
249             case 9:
250                 reg = info->vm_regs.r9;
251                 break;
252             case 10:
253                 reg = info->vm_regs.r10;
254                 break;
255             case 11:
256                 reg = info->vm_regs.r11;
257                 break;
258             case 12:
259                 reg = info->vm_regs.r11;
260                 break;
261             case 13:
262                 reg = info->vm_regs.r13;
263                 break;
264             case 14:
265                 reg = info->vm_regs.r14;
266                 break;
267             case 15:
268                 reg = info->vm_regs.r15;
269                 break;
270         }
271         PrintDebug("RAX: %p\n", (void *)info->vm_regs.rax);
272
273         if(cr_qual->cr_id == 0
274                 && (~reg & CR0_PE)
275                 && ((struct vmx_data*)info->vmm_data)->state == VMXASSIST_STARTUP) {
276             setup_v8086_mode_for_boot(info);
277             info->shdw_pg_state.guest_cr0 = 0x0;
278             v3_update_vmcs_guest_state(info);
279             return 0;
280         }
281     }
282     PrintError("Unhandled CR access\n");
283     return -1;
284 }
285
286
287 int v3_handle_vmx_exit(struct v3_gprs * gprs, struct guest_info * info)
288 {
289     uint32_t exit_reason;
290     ulong_t exit_qual;
291
292     check_vmcs_read(VMCS_EXIT_REASON, &exit_reason);
293     check_vmcs_read(VMCS_EXIT_QUAL, &exit_qual);
294
295     PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_reason, exit_qual);
296
297     /* Update guest state */
298     load_vmcs_guest_state(info);
299   
300     switch(exit_reason)
301     {
302         case VMEXIT_INFO_EXCEPTION_OR_NMI:
303         {
304             uint32_t int_info;
305             pf_error_t error_code;
306             check_vmcs_read(VMCS_EXIT_INT_INFO, &int_info);
307             check_vmcs_read(VMCS_EXIT_INT_ERR, &error_code);
308
309             if((uint8_t)int_info == 0x0e) {
310                 PrintDebug("Page Fault at %p\n", (void*)exit_qual);
311                 if(info->shdw_pg_mode == SHADOW_PAGING) {
312                     if(v3_handle_shadow_pagefault(info, (addr_t)exit_qual, error_code) == -1) {
313                         return -1;
314                     }
315                 } else {
316                     PrintError("Page fault in unimplemented paging mode\n");
317                     return -1;
318                 }
319             } else {
320                 PrintDebug("Unknown exception: 0x%x\n", (uint8_t)int_info);
321                 v3_print_GPRs(info);
322                 return -1;
323             }
324             break;
325         }
326
327         case VMEXIT_IO_INSTR: 
328         {
329             struct vmexit_io_qual * io_qual = (struct vmexit_io_qual *)&exit_qual;
330
331             if(io_qual->dir == 0) {
332                 if(io_qual->string) {
333                     if(v3_handle_vmx_io_outs(info) == -1) {
334                         return -1;
335                     }
336                 } else {
337                     if(v3_handle_vmx_io_out(info) == -1) {
338                         return -1;
339                     }
340                 }
341             } else {
342                 if(io_qual->string) {
343                     if(v3_handle_vmx_io_ins(info) == -1) {
344                         return -1;
345                     }
346                 } else {
347                     if(v3_handle_vmx_io_in(info) == -1) {
348                         return -1;
349                     }
350                 }
351             }
352             break;
353         }
354
355         case VMEXIT_CR_REG_ACCESSES:
356             if(handle_cr_access(info,exit_qual) != 0)
357                 return -1;
358             break;
359
360         default:
361             PrintError("Unhandled VMEXIT\n");
362             return -1;
363     }
364
365     check_vmcs_write(VMCS_GUEST_CR0, info->ctrl_regs.cr0);
366     check_vmcs_write(VMCS_GUEST_CR3, info->ctrl_regs.cr3);
367     check_vmcs_write(VMCS_GUEST_CR4, info->ctrl_regs.cr4);
368     check_vmcs_write(VMCS_GUEST_RIP, info->rip);
369     check_vmcs_write(VMCS_GUEST_RSP, info->vm_regs.rsp);
370
371     check_vmcs_write(VMCS_CR0_READ_SHDW, info->shdw_pg_state.guest_cr0);
372
373     return 0;
374 }