Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Updated symbiotic interfaces for multicore support
[palacios.git] / palacios / src / palacios / vmm_sym_iface.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_msr.h>
23 #include <palacios/vmm_mem.h>
24 #include <palacios/vmm_hypercall.h>
25 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_sprintf.h>
27
28
29 #define SYMSPY_GLOBAL_MSR 0x534
30 #define SYMSPY_LOCAL_MSR 0x535
31
32 #define SYM_CPUID_NUM 0x90000000
33
34 // A succesfull symcall returns via the RET_HCALL, with the return values in registers
35 // A symcall error returns via the ERR_HCALL with the error code in rbx
36 #define SYM_CALL_RET_HCALL 0x535
37 #define SYM_CALL_ERR_HCALL 0x536
38
39
40 /* Notes: We use a combination of SYSCALL and SYSENTER Semantics 
41  * SYSCALL just sets an EIP, CS/SS seg, and GS seg via swapgs
42  * the RSP is loaded via the structure pointed to by GS
43  * This is safe because it assumes that system calls are guaranteed to be made with an empty kernel stack.
44  * We cannot make that assumption with a symcall, so we have to have our own stack area somewhere.
45  * SYSTENTER does not really use the GS base MSRs, but we do to map to 64 bit kernels
46  */
47
48 #define SYMCALL_RIP_MSR 0x536
49 #define SYMCALL_RSP_MSR 0x537
50 #define SYMCALL_CS_MSR  0x538
51 #define SYMCALL_GS_MSR  0x539
52 #define SYMCALL_FS_MSR  0x540
53
54 static int symspy_msr_read(struct guest_info * core, uint_t msr, 
55                     struct v3_msr * dst, void * priv_data) {
56     struct v3_sym_global_state * global_state = &(core->vm_info->sym_global_state);
57     struct v3_sym_local_state * local_state = &(core->sym_local_state);
58
59     switch (msr) {
60         case SYMSPY_GLOBAL_MSR:
61             dst->value = global_state->global_guest_pa;
62             break;
63         case SYMSPY_LOCAL_MSR:
64             dst->value = local_state->local_guest_pa;
65             break;
66         default:
67             return -1;
68     }
69
70     return 0;
71 }
72
73 static int symcall_msr_read(struct guest_info * core, uint_t msr, 
74                             struct v3_msr * dst, void * priv_data) {
75     struct v3_symcall_state * state = &(core->sym_local_state.symcall_state);
76
77     switch (msr) {
78         case SYMCALL_RIP_MSR:
79             dst->value = state->sym_call_rip;
80             break;
81         case SYMCALL_RSP_MSR:
82             dst->value = state->sym_call_rsp;
83             break;
84         case SYMCALL_CS_MSR:
85             dst->value = state->sym_call_cs;
86             break;
87         case SYMCALL_GS_MSR:
88             dst->value = state->sym_call_gs;
89             break;
90         case SYMCALL_FS_MSR:
91             dst->value = state->sym_call_fs;
92             break;
93         default:
94             return -1;
95     }
96
97     return 0;
98 }
99
100 static int symspy_msr_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
101
102     if (msr == SYMSPY_GLOBAL_MSR) {
103         struct v3_sym_global_state * global_state = &(core->vm_info->sym_global_state);
104
105         PrintDebug("Symbiotic Glbal MSR write for page %p\n", (void *)(addr_t)src.value);
106
107         if (global_state->active == 1) {
108             // unmap page
109             struct v3_shadow_region * old_reg = v3_get_shadow_region(core->vm_info, core->cpu_id, 
110                                                                      (addr_t)global_state->global_guest_pa);
111
112             if (old_reg == NULL) {
113                 PrintError("Could not find previously active symbiotic page (%p)\n", 
114                            (void *)(addr_t)global_state->global_guest_pa);
115                 return -1;
116             }
117
118             v3_delete_shadow_region(core->vm_info, old_reg);
119         }
120
121         global_state->global_guest_pa = src.value;
122         global_state->global_guest_pa &= ~0xfffLL;
123
124         global_state->active = 1;
125
126         // map page
127         v3_add_shadow_mem(core->vm_info, V3_MEM_CORE_ANY, (addr_t)global_state->global_guest_pa, 
128                           (addr_t)(global_state->global_guest_pa + PAGE_SIZE_4KB - 1), 
129                           global_state->global_page_pa);
130     } else if (msr == SYMSPY_LOCAL_MSR) {
131         struct v3_sym_local_state * local_state = &(core->sym_local_state);
132
133         PrintDebug("Symbiotic Local MSR write for page %p\n", (void *)(addr_t)src.value);
134
135         if (local_state->active == 1) {
136             // unmap page
137             struct v3_shadow_region * old_reg = v3_get_shadow_region(core->vm_info, core->cpu_id,
138                                                                      (addr_t)local_state->local_guest_pa);
139
140             if (old_reg == NULL) {
141                 PrintError("Could not find previously active symbiotic page (%p)\n", 
142                            (void *)(addr_t)local_state->local_guest_pa);
143                 return -1;
144             }
145
146             v3_delete_shadow_region(core->vm_info, old_reg);
147         }
148
149         local_state->local_guest_pa = src.value;
150         local_state->local_guest_pa &= ~0xfffLL;
151
152         local_state->active = 1;
153
154         // map page
155         v3_add_shadow_mem(core->vm_info, core->cpu_id, (addr_t)local_state->local_guest_pa, 
156                           (addr_t)(local_state->local_guest_pa + PAGE_SIZE_4KB - 1), 
157                           local_state->local_page_pa);
158     } else {
159         PrintError("Invalid Symbiotic MSR write (0x%x)\n", msr);
160         return -1;
161     }
162
163     return 0;
164 }
165
166
167 static int symcall_msr_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
168     struct v3_symcall_state * state = &(core->sym_local_state.symcall_state);
169
170     switch (msr) {
171         case SYMCALL_RIP_MSR:
172             state->sym_call_rip = src.value;
173             break;
174         case SYMCALL_RSP_MSR:
175             state->sym_call_rsp = src.value;
176             break;
177         case SYMCALL_CS_MSR:
178             state->sym_call_cs = src.value;
179             break;
180         case SYMCALL_GS_MSR:
181             state->sym_call_gs = src.value;
182             break;
183         case SYMCALL_FS_MSR:
184             state->sym_call_fs = src.value;
185             break;
186         default:
187             PrintError("Invalid Symbiotic MSR write (0x%x)\n", msr);
188             return -1;
189     }
190     return 0;
191 }
192
193 static int cpuid_fn(struct guest_info * core, uint32_t cpuid, 
194                     uint32_t * eax, uint32_t * ebx,
195                     uint32_t * ecx, uint32_t * edx,
196                     void * private_data) {
197     extern v3_cpu_arch_t v3_cpu_types[];
198
199     *eax = *(uint32_t *)"V3V";
200
201     if ((v3_cpu_types[core->cpu_id] == V3_SVM_CPU) || 
202         (v3_cpu_types[core->cpu_id] == V3_SVM_REV3_CPU)) {
203         *ebx = *(uint32_t *)"SVM";
204     } else if ((v3_cpu_types[core->cpu_id] == V3_VMX_CPU) || 
205                (v3_cpu_types[core->cpu_id] == V3_VMX_EPT_CPU)) {
206         *ebx = *(uint32_t *)"VMX";
207     }
208
209
210     return 0;
211 }
212
213
214 static int sym_call_ret(struct guest_info * info, uint_t hcall_id, void * private_data);
215 static int sym_call_err(struct guest_info * info, uint_t hcall_id, void * private_data);
216
217
218
219 int v3_init_sym_iface(struct v3_vm_info * vm) {
220     struct v3_sym_global_state * global_state = &(vm->sym_global_state);
221     memset(global_state, 0, sizeof(struct v3_sym_global_state));
222
223     global_state->global_page_pa = (addr_t)V3_AllocPages(1);
224     global_state->sym_page = (struct v3_sym_global_page *)V3_VAddr((void *)global_state->global_page_pa);
225     memset(global_state->sym_page, 0, PAGE_SIZE_4KB);
226
227     memcpy(&(global_state->sym_page->magic), "V3V", 3);
228
229     v3_hook_msr(vm, SYMSPY_LOCAL_MSR, symspy_msr_read, symspy_msr_write, NULL);
230     v3_hook_msr(vm, SYMSPY_GLOBAL_MSR, symspy_msr_read, symspy_msr_write, NULL);
231
232     v3_hook_cpuid(vm, SYM_CPUID_NUM, cpuid_fn, NULL);
233
234     v3_hook_msr(vm, SYMCALL_RIP_MSR, symcall_msr_read, symcall_msr_write, NULL);
235     v3_hook_msr(vm, SYMCALL_RSP_MSR, symcall_msr_read, symcall_msr_write, NULL);
236     v3_hook_msr(vm, SYMCALL_CS_MSR, symcall_msr_read, symcall_msr_write, NULL);
237     v3_hook_msr(vm, SYMCALL_GS_MSR, symcall_msr_read, symcall_msr_write, NULL);
238     v3_hook_msr(vm, SYMCALL_FS_MSR, symcall_msr_read, symcall_msr_write, NULL);
239
240     v3_register_hypercall(vm, SYM_CALL_RET_HCALL, sym_call_ret, NULL);
241     v3_register_hypercall(vm, SYM_CALL_ERR_HCALL, sym_call_err, NULL);
242
243     return 0;
244 }
245
246
247 int v3_init_sym_core(struct guest_info * core) {
248     struct v3_sym_local_state * local_state = &(core->sym_local_state);
249     memset(local_state, 0, sizeof(struct v3_sym_local_state));
250
251     local_state->local_page_pa = (addr_t)V3_AllocPages(1);
252     local_state->local_page = (struct v3_sym_local_page *)V3_VAddr((void *)local_state->local_page_pa);
253     memset(local_state->local_page, 0, PAGE_SIZE_4KB);
254
255     snprintf((uint8_t *)&(local_state->local_page->magic), 8, "V3V.%d", core->cpu_id);
256
257     return 0;
258 }
259
260
261 int v3_sym_map_pci_passthrough(struct v3_vm_info * vm, uint_t bus, uint_t dev, uint_t fn) {
262     struct v3_sym_global_state * global_state = &(vm->sym_global_state);
263     uint_t dev_index = (bus << 8) + (dev << 3) + fn;
264     uint_t major = dev_index / 8;
265     uint_t minor = dev_index % 8;
266
267     if (bus > 3) {
268         PrintError("Invalid PCI bus %d\n", bus);
269         return -1;
270     }
271
272     PrintDebug("Setting passthrough pci map for index=%d\n", dev_index);
273
274     global_state->sym_page->pci_pt_map[major] |= 0x1 << minor;
275
276     PrintDebug("pt_map entry=%x\n",   global_state->sym_page->pci_pt_map[major]);
277
278     PrintDebug("pt map vmm addr=%p\n", global_state->sym_page->pci_pt_map);
279
280     return 0;
281 }
282
283 int v3_sym_unmap_pci_passthrough(struct v3_vm_info * vm, uint_t bus, uint_t dev, uint_t fn) {
284     struct v3_sym_global_state * global_state = &(vm->sym_global_state);
285     uint_t dev_index = (bus << 8) + (dev << 3) + fn;
286     uint_t major = dev_index / 8;
287     uint_t minor = dev_index % 8;
288
289     if (bus > 3) {
290         PrintError("Invalid PCI bus %d\n", bus);
291         return -1;
292     }
293
294     global_state->sym_page->pci_pt_map[major] &= ~(0x1 << minor);
295
296     return 0;
297 }
298
299
300 static int sym_call_err(struct guest_info * core, uint_t hcall_id, void * private_data) {
301     struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_local_state.symcall_state);
302
303     PrintError("sym call error\n");
304
305     state->sym_call_errno = (int)core->vm_regs.rbx;
306     v3_print_guest_state(core);
307     v3_print_mem_map(core->vm_info);
308
309     // clear sym flags
310     state->sym_call_error = 1;
311     state->sym_call_returned = 1;
312
313     return -1;
314 }
315
316 static int sym_call_ret(struct guest_info * core, uint_t hcall_id, void * private_data) {
317     struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_local_state.symcall_state);
318
319     //    PrintError("Return from sym call (ID=%x)\n", hcall_id);
320     //   v3_print_guest_state(info);
321
322     state->sym_call_returned = 1;
323
324     return 0;
325 }
326
327 static int execute_symcall(struct guest_info * core) {
328     struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_local_state.symcall_state);
329
330     while (state->sym_call_returned == 0) {
331         if (v3_vm_enter(core) == -1) {
332             PrintError("Error in Sym call\n");
333             return -1;
334         }
335     }
336
337     return 0;
338 }
339
340
341 int v3_sym_call(struct guest_info * core, 
342                 uint64_t call_num, sym_arg_t * arg0, 
343                 sym_arg_t * arg1, sym_arg_t * arg2,
344                 sym_arg_t * arg3, sym_arg_t * arg4) {
345     struct v3_sym_local_state * sym_state = (struct v3_sym_local_state *)&(core->sym_local_state);
346     struct v3_symcall_state * state = (struct v3_symcall_state *)&(sym_state->symcall_state);
347     struct v3_sym_cpu_context * old_ctx = (struct v3_sym_cpu_context *)&(state->old_ctx);
348     struct v3_segment sym_cs;
349     struct v3_segment sym_ss;
350     uint64_t trash_args[5] = { [0 ... 4] = 0 };
351
352     //   PrintDebug("Making Sym call\n");
353     //    v3_print_guest_state(info);
354
355     if ((sym_state->local_page->sym_call_enabled == 0) ||
356         (state->sym_call_active == 1)) {
357         return -1;
358     }
359     
360     if (!arg0) arg0 = &trash_args[0];
361     if (!arg1) arg1 = &trash_args[1];
362     if (!arg2) arg2 = &trash_args[2];
363     if (!arg3) arg3 = &trash_args[3];
364     if (!arg4) arg4 = &trash_args[4];
365
366     // Save the old context
367     memcpy(&(old_ctx->vm_regs), &(core->vm_regs), sizeof(struct v3_gprs));
368     memcpy(&(old_ctx->cs), &(core->segments.cs), sizeof(struct v3_segment));
369     memcpy(&(old_ctx->ss), &(core->segments.ss), sizeof(struct v3_segment));
370     old_ctx->gs_base = core->segments.gs.base;
371     old_ctx->fs_base = core->segments.fs.base;
372     old_ctx->rip = core->rip;
373     old_ctx->cpl = core->cpl;
374     old_ctx->flags = core->ctrl_regs.rflags;
375
376     // Setup the sym call context
377     core->rip = state->sym_call_rip;
378     core->vm_regs.rsp = state->sym_call_rsp; // old contest rsp is saved in vm_regs
379
380     v3_translate_segment(core, state->sym_call_cs, &sym_cs);
381     memcpy(&(core->segments.cs), &sym_cs, sizeof(struct v3_segment));
382  
383     v3_translate_segment(core, state->sym_call_cs + 8, &sym_ss);
384     memcpy(&(core->segments.ss), &sym_ss, sizeof(struct v3_segment));
385
386     core->segments.gs.base = state->sym_call_gs;
387     core->segments.fs.base = state->sym_call_fs;
388     core->cpl = 0;
389
390     core->vm_regs.rax = call_num;
391     core->vm_regs.rbx = *arg0;
392     core->vm_regs.rcx = *arg1;
393     core->vm_regs.rdx = *arg2;
394     core->vm_regs.rsi = *arg3;
395     core->vm_regs.rdi = *arg4;
396
397     // Mark sym call as active
398     state->sym_call_active = 1;
399     state->sym_call_returned = 0;
400
401     //    PrintDebug("Sym state\n");
402     //  v3_print_guest_state(core);
403
404     // Do the sym call entry
405     if (execute_symcall(core) == -1) {
406         PrintError("SYMCALL error\n");
407         return -1;
408     }
409
410     // clear sym flags
411     state->sym_call_active = 0;
412
413     *arg0 = core->vm_regs.rbx;
414     *arg1 = core->vm_regs.rcx;
415     *arg2 = core->vm_regs.rdx;
416     *arg3 = core->vm_regs.rsi;
417     *arg4 = core->vm_regs.rdi;
418
419     // restore guest state
420     memcpy(&(core->vm_regs), &(old_ctx->vm_regs), sizeof(struct v3_gprs));
421     memcpy(&(core->segments.cs), &(old_ctx->cs), sizeof(struct v3_segment));
422     memcpy(&(core->segments.ss), &(old_ctx->ss), sizeof(struct v3_segment));
423     core->segments.gs.base = old_ctx->gs_base;
424     core->segments.fs.base = old_ctx->fs_base;
425     core->rip = old_ctx->rip;
426     core->cpl = old_ctx->cpl;
427     core->ctrl_regs.rflags = old_ctx->flags;
428
429
430
431     //    PrintError("restoring guest state\n");
432     //    v3_print_guest_state(core);
433
434     return 0;
435 }
436
437