2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_msr.h>
23 #include <palacios/vmm_mem.h>
24 #include <palacios/vmm_hypercall.h>
25 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_sprintf.h>
29 #define SYMSPY_GLOBAL_MSR 0x534
30 #define SYMSPY_LOCAL_MSR 0x535
32 #define SYM_CPUID_NUM 0x90000000
34 // A succesfull symcall returns via the RET_HCALL, with the return values in registers
35 // A symcall error returns via the ERR_HCALL with the error code in rbx
36 #define SYM_CALL_RET_HCALL 0x535
37 #define SYM_CALL_ERR_HCALL 0x536
40 /* Notes: We use a combination of SYSCALL and SYSENTER Semantics
41 * SYSCALL just sets an EIP, CS/SS seg, and GS seg via swapgs
42 * the RSP is loaded via the structure pointed to by GS
43 * This is safe because it assumes that system calls are guaranteed to be made with an empty kernel stack.
44 * We cannot make that assumption with a symcall, so we have to have our own stack area somewhere.
45 * SYSTENTER does not really use the GS base MSRs, but we do to map to 64 bit kernels
48 #define SYMCALL_RIP_MSR 0x536
49 #define SYMCALL_RSP_MSR 0x537
50 #define SYMCALL_CS_MSR 0x538
51 #define SYMCALL_GS_MSR 0x539
52 #define SYMCALL_FS_MSR 0x540
54 static int symspy_msr_read(struct guest_info * core, uint_t msr,
55 struct v3_msr * dst, void * priv_data) {
56 struct v3_sym_global_state * global_state = &(core->vm_info->sym_global_state);
57 struct v3_sym_local_state * local_state = &(core->sym_local_state);
60 case SYMSPY_GLOBAL_MSR:
61 dst->value = global_state->global_guest_pa;
63 case SYMSPY_LOCAL_MSR:
64 dst->value = local_state->local_guest_pa;
73 static int symcall_msr_read(struct guest_info * core, uint_t msr,
74 struct v3_msr * dst, void * priv_data) {
75 struct v3_symcall_state * state = &(core->sym_local_state.symcall_state);
79 dst->value = state->sym_call_rip;
82 dst->value = state->sym_call_rsp;
85 dst->value = state->sym_call_cs;
88 dst->value = state->sym_call_gs;
91 dst->value = state->sym_call_fs;
100 static int symspy_msr_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
102 if (msr == SYMSPY_GLOBAL_MSR) {
103 struct v3_sym_global_state * global_state = &(core->vm_info->sym_global_state);
105 PrintDebug("Symbiotic Glbal MSR write for page %p\n", (void *)(addr_t)src.value);
107 if (global_state->active == 1) {
109 struct v3_shadow_region * old_reg = v3_get_shadow_region(core->vm_info, core->cpu_id,
110 (addr_t)global_state->global_guest_pa);
112 if (old_reg == NULL) {
113 PrintError("Could not find previously active symbiotic page (%p)\n",
114 (void *)(addr_t)global_state->global_guest_pa);
118 v3_delete_shadow_region(core->vm_info, old_reg);
121 global_state->global_guest_pa = src.value;
122 global_state->global_guest_pa &= ~0xfffLL;
124 global_state->active = 1;
127 v3_add_shadow_mem(core->vm_info, V3_MEM_CORE_ANY, (addr_t)global_state->global_guest_pa,
128 (addr_t)(global_state->global_guest_pa + PAGE_SIZE_4KB - 1),
129 global_state->global_page_pa);
130 } else if (msr == SYMSPY_LOCAL_MSR) {
131 struct v3_sym_local_state * local_state = &(core->sym_local_state);
133 PrintDebug("Symbiotic Local MSR write for page %p\n", (void *)(addr_t)src.value);
135 if (local_state->active == 1) {
137 struct v3_shadow_region * old_reg = v3_get_shadow_region(core->vm_info, core->cpu_id,
138 (addr_t)local_state->local_guest_pa);
140 if (old_reg == NULL) {
141 PrintError("Could not find previously active symbiotic page (%p)\n",
142 (void *)(addr_t)local_state->local_guest_pa);
146 v3_delete_shadow_region(core->vm_info, old_reg);
149 local_state->local_guest_pa = src.value;
150 local_state->local_guest_pa &= ~0xfffLL;
152 local_state->active = 1;
155 v3_add_shadow_mem(core->vm_info, core->cpu_id, (addr_t)local_state->local_guest_pa,
156 (addr_t)(local_state->local_guest_pa + PAGE_SIZE_4KB - 1),
157 local_state->local_page_pa);
159 PrintError("Invalid Symbiotic MSR write (0x%x)\n", msr);
167 static int symcall_msr_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
168 struct v3_symcall_state * state = &(core->sym_local_state.symcall_state);
171 case SYMCALL_RIP_MSR:
172 state->sym_call_rip = src.value;
174 case SYMCALL_RSP_MSR:
175 state->sym_call_rsp = src.value;
178 state->sym_call_cs = src.value;
181 state->sym_call_gs = src.value;
184 state->sym_call_fs = src.value;
187 PrintError("Invalid Symbiotic MSR write (0x%x)\n", msr);
193 static int cpuid_fn(struct guest_info * core, uint32_t cpuid,
194 uint32_t * eax, uint32_t * ebx,
195 uint32_t * ecx, uint32_t * edx,
196 void * private_data) {
197 extern v3_cpu_arch_t v3_cpu_types[];
199 *eax = *(uint32_t *)"V3V";
201 if ((v3_cpu_types[core->cpu_id] == V3_SVM_CPU) ||
202 (v3_cpu_types[core->cpu_id] == V3_SVM_REV3_CPU)) {
203 *ebx = *(uint32_t *)"SVM";
204 } else if ((v3_cpu_types[core->cpu_id] == V3_VMX_CPU) ||
205 (v3_cpu_types[core->cpu_id] == V3_VMX_EPT_CPU)) {
206 *ebx = *(uint32_t *)"VMX";
214 static int sym_call_ret(struct guest_info * info, uint_t hcall_id, void * private_data);
215 static int sym_call_err(struct guest_info * info, uint_t hcall_id, void * private_data);
219 int v3_init_sym_iface(struct v3_vm_info * vm) {
220 struct v3_sym_global_state * global_state = &(vm->sym_global_state);
221 memset(global_state, 0, sizeof(struct v3_sym_global_state));
223 global_state->global_page_pa = (addr_t)V3_AllocPages(1);
224 global_state->sym_page = (struct v3_sym_global_page *)V3_VAddr((void *)global_state->global_page_pa);
225 memset(global_state->sym_page, 0, PAGE_SIZE_4KB);
227 memcpy(&(global_state->sym_page->magic), "V3V", 3);
229 v3_hook_msr(vm, SYMSPY_LOCAL_MSR, symspy_msr_read, symspy_msr_write, NULL);
230 v3_hook_msr(vm, SYMSPY_GLOBAL_MSR, symspy_msr_read, symspy_msr_write, NULL);
232 v3_hook_cpuid(vm, SYM_CPUID_NUM, cpuid_fn, NULL);
234 v3_hook_msr(vm, SYMCALL_RIP_MSR, symcall_msr_read, symcall_msr_write, NULL);
235 v3_hook_msr(vm, SYMCALL_RSP_MSR, symcall_msr_read, symcall_msr_write, NULL);
236 v3_hook_msr(vm, SYMCALL_CS_MSR, symcall_msr_read, symcall_msr_write, NULL);
237 v3_hook_msr(vm, SYMCALL_GS_MSR, symcall_msr_read, symcall_msr_write, NULL);
238 v3_hook_msr(vm, SYMCALL_FS_MSR, symcall_msr_read, symcall_msr_write, NULL);
240 v3_register_hypercall(vm, SYM_CALL_RET_HCALL, sym_call_ret, NULL);
241 v3_register_hypercall(vm, SYM_CALL_ERR_HCALL, sym_call_err, NULL);
247 int v3_init_sym_core(struct guest_info * core) {
248 struct v3_sym_local_state * local_state = &(core->sym_local_state);
249 memset(local_state, 0, sizeof(struct v3_sym_local_state));
251 local_state->local_page_pa = (addr_t)V3_AllocPages(1);
252 local_state->local_page = (struct v3_sym_local_page *)V3_VAddr((void *)local_state->local_page_pa);
253 memset(local_state->local_page, 0, PAGE_SIZE_4KB);
255 snprintf((uint8_t *)&(local_state->local_page->magic), 8, "V3V.%d", core->cpu_id);
261 int v3_sym_map_pci_passthrough(struct v3_vm_info * vm, uint_t bus, uint_t dev, uint_t fn) {
262 struct v3_sym_global_state * global_state = &(vm->sym_global_state);
263 uint_t dev_index = (bus << 8) + (dev << 3) + fn;
264 uint_t major = dev_index / 8;
265 uint_t minor = dev_index % 8;
268 PrintError("Invalid PCI bus %d\n", bus);
272 PrintDebug("Setting passthrough pci map for index=%d\n", dev_index);
274 global_state->sym_page->pci_pt_map[major] |= 0x1 << minor;
276 PrintDebug("pt_map entry=%x\n", global_state->sym_page->pci_pt_map[major]);
278 PrintDebug("pt map vmm addr=%p\n", global_state->sym_page->pci_pt_map);
283 int v3_sym_unmap_pci_passthrough(struct v3_vm_info * vm, uint_t bus, uint_t dev, uint_t fn) {
284 struct v3_sym_global_state * global_state = &(vm->sym_global_state);
285 uint_t dev_index = (bus << 8) + (dev << 3) + fn;
286 uint_t major = dev_index / 8;
287 uint_t minor = dev_index % 8;
290 PrintError("Invalid PCI bus %d\n", bus);
294 global_state->sym_page->pci_pt_map[major] &= ~(0x1 << minor);
300 static int sym_call_err(struct guest_info * core, uint_t hcall_id, void * private_data) {
301 struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_local_state.symcall_state);
303 PrintError("sym call error\n");
305 state->sym_call_errno = (int)core->vm_regs.rbx;
306 v3_print_guest_state(core);
307 v3_print_mem_map(core->vm_info);
310 state->sym_call_error = 1;
311 state->sym_call_returned = 1;
316 static int sym_call_ret(struct guest_info * core, uint_t hcall_id, void * private_data) {
317 struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_local_state.symcall_state);
319 // PrintError("Return from sym call (ID=%x)\n", hcall_id);
320 // v3_print_guest_state(info);
322 state->sym_call_returned = 1;
327 static int execute_symcall(struct guest_info * core) {
328 struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_local_state.symcall_state);
330 while (state->sym_call_returned == 0) {
331 if (v3_vm_enter(core) == -1) {
332 PrintError("Error in Sym call\n");
341 int v3_sym_call(struct guest_info * core,
342 uint64_t call_num, sym_arg_t * arg0,
343 sym_arg_t * arg1, sym_arg_t * arg2,
344 sym_arg_t * arg3, sym_arg_t * arg4) {
345 struct v3_sym_local_state * sym_state = (struct v3_sym_local_state *)&(core->sym_local_state);
346 struct v3_symcall_state * state = (struct v3_symcall_state *)&(sym_state->symcall_state);
347 struct v3_sym_cpu_context * old_ctx = (struct v3_sym_cpu_context *)&(state->old_ctx);
348 struct v3_segment sym_cs;
349 struct v3_segment sym_ss;
350 uint64_t trash_args[5] = { [0 ... 4] = 0 };
352 // PrintDebug("Making Sym call\n");
353 // v3_print_guest_state(info);
355 if ((sym_state->local_page->sym_call_enabled == 0) ||
356 (state->sym_call_active == 1)) {
360 if (!arg0) arg0 = &trash_args[0];
361 if (!arg1) arg1 = &trash_args[1];
362 if (!arg2) arg2 = &trash_args[2];
363 if (!arg3) arg3 = &trash_args[3];
364 if (!arg4) arg4 = &trash_args[4];
366 // Save the old context
367 memcpy(&(old_ctx->vm_regs), &(core->vm_regs), sizeof(struct v3_gprs));
368 memcpy(&(old_ctx->cs), &(core->segments.cs), sizeof(struct v3_segment));
369 memcpy(&(old_ctx->ss), &(core->segments.ss), sizeof(struct v3_segment));
370 old_ctx->gs_base = core->segments.gs.base;
371 old_ctx->fs_base = core->segments.fs.base;
372 old_ctx->rip = core->rip;
373 old_ctx->cpl = core->cpl;
374 old_ctx->flags = core->ctrl_regs.rflags;
376 // Setup the sym call context
377 core->rip = state->sym_call_rip;
378 core->vm_regs.rsp = state->sym_call_rsp; // old contest rsp is saved in vm_regs
380 v3_translate_segment(core, state->sym_call_cs, &sym_cs);
381 memcpy(&(core->segments.cs), &sym_cs, sizeof(struct v3_segment));
383 v3_translate_segment(core, state->sym_call_cs + 8, &sym_ss);
384 memcpy(&(core->segments.ss), &sym_ss, sizeof(struct v3_segment));
386 core->segments.gs.base = state->sym_call_gs;
387 core->segments.fs.base = state->sym_call_fs;
390 core->vm_regs.rax = call_num;
391 core->vm_regs.rbx = *arg0;
392 core->vm_regs.rcx = *arg1;
393 core->vm_regs.rdx = *arg2;
394 core->vm_regs.rsi = *arg3;
395 core->vm_regs.rdi = *arg4;
397 // Mark sym call as active
398 state->sym_call_active = 1;
399 state->sym_call_returned = 0;
401 // PrintDebug("Sym state\n");
402 // v3_print_guest_state(core);
404 // Do the sym call entry
405 if (execute_symcall(core) == -1) {
406 PrintError("SYMCALL error\n");
411 state->sym_call_active = 0;
413 *arg0 = core->vm_regs.rbx;
414 *arg1 = core->vm_regs.rcx;
415 *arg2 = core->vm_regs.rdx;
416 *arg3 = core->vm_regs.rsi;
417 *arg4 = core->vm_regs.rdi;
419 // restore guest state
420 memcpy(&(core->vm_regs), &(old_ctx->vm_regs), sizeof(struct v3_gprs));
421 memcpy(&(core->segments.cs), &(old_ctx->cs), sizeof(struct v3_segment));
422 memcpy(&(core->segments.ss), &(old_ctx->ss), sizeof(struct v3_segment));
423 core->segments.gs.base = old_ctx->gs_base;
424 core->segments.fs.base = old_ctx->fs_base;
425 core->rip = old_ctx->rip;
426 core->cpl = old_ctx->cpl;
427 core->ctrl_regs.rflags = old_ctx->flags;
431 // PrintError("restoring guest state\n");
432 // v3_print_guest_state(core);