Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Minor fix
[palacios.git] / palacios / src / palacios / vmm_symcall.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vm_guest.h>
21 #include <palacios/vm_guest_mem.h>
22 #include <palacios/vmm_symcall.h>
23 #include <palacios/vmm_symspy.h>
24 #include <palacios/vmm_msr.h>
25 #include <palacios/vmm_lowlevel.h>
26 #include <palacios/vmm_debug.h>
27
28 // A succesfull symcall returns via the RET_HCALL, with the return values in registers
29 // A symcall error returns via the ERR_HCALL with the error code in rbx
30
31
32 /* Notes: We use a combination of SYSCALL and SYSENTER Semantics 
33  * SYSCALL just sets an EIP, CS/SS seg, and GS seg via swapgs
34  * the RSP is loaded via the structure pointed to by GS
35  * This is safe because it assumes that system calls are guaranteed to be made with an empty kernel stack.
36  * We cannot make that assumption with a symcall, so we have to have our own stack area somewhere.
37  * SYSTENTER does not really use the GS base MSRs, but we do to map to 64 bit kernels
38  */
39
40 #define SYMCALL_RIP_MSR 0x536
41 #define SYMCALL_RSP_MSR 0x537
42 #define SYMCALL_CS_MSR  0x538
43 #define SYMCALL_GS_MSR  0x539
44 #define SYMCALL_FS_MSR  0x540
45
46
47 static int symcall_msr_read(struct guest_info * core, uint_t msr, 
48                             struct v3_msr * dst, void * priv_data) {
49     struct v3_symcall_state * state = &(core->sym_core_state.symcall_state);
50
51     switch (msr) {
52         case SYMCALL_RIP_MSR:
53             dst->value = state->sym_call_rip;
54             break;
55         case SYMCALL_RSP_MSR:
56             dst->value = state->sym_call_rsp;
57             break;
58         case SYMCALL_CS_MSR:
59             dst->value = state->sym_call_cs;
60             break;
61         case SYMCALL_GS_MSR:
62             dst->value = state->sym_call_gs;
63             break;
64         case SYMCALL_FS_MSR:
65             dst->value = state->sym_call_fs;
66             break;
67         default:
68             return -1;
69     }
70
71     return 0;
72 }
73
74 static int symcall_msr_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
75     struct v3_symcall_state * state = &(core->sym_core_state.symcall_state);
76
77     switch (msr) {
78         case SYMCALL_RIP_MSR:
79             state->sym_call_rip = src.value;
80             break;
81         case SYMCALL_RSP_MSR:
82             state->sym_call_rsp = src.value;
83             break;
84         case SYMCALL_CS_MSR:
85             state->sym_call_cs = src.value;
86             break;
87         case SYMCALL_GS_MSR:
88             state->sym_call_gs = src.value;
89             break;
90         case SYMCALL_FS_MSR:
91             state->sym_call_fs = src.value;
92             break;
93         default:
94             PrintError(core->vm_info, core, "Invalid Symbiotic MSR write (0x%x)\n", msr);
95             return -1;
96     }
97     return 0;
98 }
99
100
101 static int sym_call_ret(struct guest_info * info, uint_t hcall_id, void * private_data);
102 static int sym_call_err(struct guest_info * info, uint_t hcall_id, void * private_data);
103
104
105
106
107 int v3_init_symcall_vm(struct v3_vm_info * vm) {
108
109     v3_hook_msr(vm, SYMCALL_RIP_MSR, symcall_msr_read, symcall_msr_write, NULL);
110     v3_hook_msr(vm, SYMCALL_RSP_MSR, symcall_msr_read, symcall_msr_write, NULL);
111     v3_hook_msr(vm, SYMCALL_CS_MSR, symcall_msr_read, symcall_msr_write, NULL);
112     v3_hook_msr(vm, SYMCALL_GS_MSR, symcall_msr_read, symcall_msr_write, NULL);
113     v3_hook_msr(vm, SYMCALL_FS_MSR, symcall_msr_read, symcall_msr_write, NULL);
114
115     v3_register_hypercall(vm, SYMCALL_RET_HCALL, sym_call_ret, NULL);
116     v3_register_hypercall(vm, SYMCALL_ERR_HCALL, sym_call_err, NULL);
117
118
119     return 0;
120 }
121
122
123
124
125
126 static int sym_call_err(struct guest_info * core, uint_t hcall_id, void * private_data) {
127     struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_core_state.symcall_state);
128
129     PrintError(core->vm_info, core, "sym call error\n");
130
131     state->sym_call_errno = (int)core->vm_regs.rbx;
132     v3_print_guest_state(core);
133     v3_print_mem_map(core->vm_info);
134
135     // clear sym flags
136     state->sym_call_error = 1;
137     state->sym_call_returned = 1;
138
139     return -1;
140 }
141
142 static int sym_call_ret(struct guest_info * core, uint_t hcall_id, void * private_data) {
143     struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_core_state.symcall_state);
144
145     //    PrintError(info->vm_info, info, "Return from sym call (ID=%x)\n", hcall_id);
146     //   v3_print_guest_state(info);
147
148     state->sym_call_returned = 1;
149
150     return 0;
151 }
152
153 static int execute_symcall(struct guest_info * core) {
154     struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_core_state.symcall_state);
155
156     while (state->sym_call_returned == 0) {
157         if (v3_vm_enter(core) == -1) {
158             PrintError(core->vm_info, core, "Error in Sym call\n");
159             return -1;
160         }
161     }
162
163     return 0;
164 }
165
166
167 //
168 // We don't handle those fancy 64 bit system segments...
169 //
170 static int translate_segment(struct guest_info * info, uint16_t selector, struct v3_segment * seg) {
171     struct v3_segment * gdt = &(info->segments.gdtr);
172     addr_t gdt_addr = 0;
173     uint16_t seg_offset = (selector & ~0x7);
174     addr_t seg_addr = 0;
175     struct gen_segment * gen_seg = NULL;
176     struct seg_selector sel;
177
178     memset(seg, 0, sizeof(struct v3_segment));
179
180     sel.value = selector;
181
182     if (sel.ti == 1) {
183         PrintError(info->vm_info, info, "LDT translations not supported\n");
184         return -1;
185     }
186
187     if (v3_gva_to_hva(info, gdt->base, &gdt_addr) == -1) {
188         PrintError(info->vm_info, info, "Unable to translate GDT address\n");
189         return -1;
190     }
191
192     seg_addr = gdt_addr + seg_offset;
193     gen_seg = (struct gen_segment *)seg_addr;
194
195     //translate
196     seg->selector = selector;
197
198     seg->limit = gen_seg->limit_hi;
199     seg->limit <<= 16;
200     seg->limit += gen_seg->limit_lo;
201
202     seg->base = gen_seg->base_hi;
203     seg->base <<= 24;
204     seg->base += gen_seg->base_lo;
205
206     if (gen_seg->granularity == 1) {
207         seg->limit <<= 12;
208         seg->limit |= 0xfff;
209     }
210
211     seg->type = gen_seg->type;
212     seg->system = gen_seg->system;
213     seg->dpl = gen_seg->dpl;
214     seg->present = gen_seg->present;
215     seg->avail = gen_seg->avail;
216     seg->long_mode = gen_seg->long_mode;
217     seg->db = gen_seg->db;
218     seg->granularity = gen_seg->granularity;
219     
220     return 0;
221 }
222
223
224
225 int v3_sym_call(struct guest_info * core, 
226                 uint64_t call_num, sym_arg_t * arg0, 
227                 sym_arg_t * arg1, sym_arg_t * arg2,
228                 sym_arg_t * arg3, sym_arg_t * arg4) {
229     struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_core_state.symcall_state);
230     struct v3_symspy_local_state * symspy_state = (struct v3_symspy_local_state *)&(core->sym_core_state.symspy_state);
231     struct v3_sym_cpu_context * old_ctx = (struct v3_sym_cpu_context *)&(state->old_ctx);
232     struct v3_segment sym_cs;
233     struct v3_segment sym_ss;
234     uint64_t trash_args[5] = { [0 ... 4] = 0 };
235
236     //   PrintDebug(core->vm_info, core, "Making Sym call\n");
237     //    v3_print_guest_state(info);
238
239     if ((symspy_state->local_page->sym_call_enabled == 0) ||
240         (symspy_state->local_page->sym_call_active == 1)) {
241         return -1;
242     }
243     
244     if (!arg0) arg0 = &trash_args[0];
245     if (!arg1) arg1 = &trash_args[1];
246     if (!arg2) arg2 = &trash_args[2];
247     if (!arg3) arg3 = &trash_args[3];
248     if (!arg4) arg4 = &trash_args[4];
249
250     // Save the old context
251     memcpy(&(old_ctx->vm_regs), &(core->vm_regs), sizeof(struct v3_gprs));
252     memcpy(&(old_ctx->cs), &(core->segments.cs), sizeof(struct v3_segment));
253     memcpy(&(old_ctx->ss), &(core->segments.ss), sizeof(struct v3_segment));
254     old_ctx->gs_base = core->segments.gs.base;
255     old_ctx->fs_base = core->segments.fs.base;
256     old_ctx->rip = core->rip;
257     old_ctx->cpl = core->cpl;
258     old_ctx->flags = core->ctrl_regs.rflags;
259
260     // Setup the sym call context
261     core->rip = state->sym_call_rip;
262     core->vm_regs.rsp = state->sym_call_rsp; // old contest rsp is saved in vm_regs
263
264     translate_segment(core, state->sym_call_cs, &sym_cs);
265     memcpy(&(core->segments.cs), &sym_cs, sizeof(struct v3_segment));
266  
267     translate_segment(core, state->sym_call_cs + 8, &sym_ss);
268     memcpy(&(core->segments.ss), &sym_ss, sizeof(struct v3_segment));
269
270     core->segments.gs.base = state->sym_call_gs;
271     core->segments.fs.base = state->sym_call_fs;
272     core->cpl = 0;
273
274     core->vm_regs.rax = call_num;
275     core->vm_regs.rbx = *arg0;
276     core->vm_regs.rcx = *arg1;
277     core->vm_regs.rdx = *arg2;
278     core->vm_regs.rsi = *arg3;
279     core->vm_regs.rdi = *arg4;
280
281     // Mark sym call as active
282     state->sym_call_active = 1;
283     state->sym_call_returned = 0;
284
285     //    PrintDebug(core->vm_info, core, "Sym state\n");
286     //  v3_print_guest_state(core);
287
288     // Do the sym call entry
289     if (execute_symcall(core) == -1) {
290         PrintError(core->vm_info, core, "SYMCALL error\n");
291         return -1;
292     }
293
294     // clear sym flags
295     state->sym_call_active = 0;
296
297     *arg0 = core->vm_regs.rbx;
298     *arg1 = core->vm_regs.rcx;
299     *arg2 = core->vm_regs.rdx;
300     *arg3 = core->vm_regs.rsi;
301     *arg4 = core->vm_regs.rdi;
302
303     // restore guest state
304     memcpy(&(core->vm_regs), &(old_ctx->vm_regs), sizeof(struct v3_gprs));
305     memcpy(&(core->segments.cs), &(old_ctx->cs), sizeof(struct v3_segment));
306     memcpy(&(core->segments.ss), &(old_ctx->ss), sizeof(struct v3_segment));
307     core->segments.gs.base = old_ctx->gs_base;
308     core->segments.fs.base = old_ctx->fs_base;
309     core->rip = old_ctx->rip;
310     core->cpl = old_ctx->cpl;
311     core->ctrl_regs.rflags = old_ctx->flags;
312
313
314
315     //    PrintError(core->vm_info, core, "restoring guest state\n");
316     //    v3_print_guest_state(core);
317
318     return 0;
319 }
320
321