2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmm.h>
22 #include <palacios/vmm_msr.h>
23 #include <palacios/vmm_mem.h>
24 #include <palacios/vmm_hypercall.h>
25 #include <palacios/vm_guest.h>
26 #include <palacios/vmm_sprintf.h>
29 #define SYMSPY_GLOBAL_MSR 0x534
30 #define SYMSPY_LOCAL_MSR 0x535
32 #define SYM_CPUID_NUM 0x90000000
34 // A succesfull symcall returns via the RET_HCALL, with the return values in registers
35 // A symcall error returns via the ERR_HCALL with the error code in rbx
38 /* Notes: We use a combination of SYSCALL and SYSENTER Semantics
39 * SYSCALL just sets an EIP, CS/SS seg, and GS seg via swapgs
40 * the RSP is loaded via the structure pointed to by GS
41 * This is safe because it assumes that system calls are guaranteed to be made with an empty kernel stack.
42 * We cannot make that assumption with a symcall, so we have to have our own stack area somewhere.
43 * SYSTENTER does not really use the GS base MSRs, but we do to map to 64 bit kernels
46 #define SYMCALL_RIP_MSR 0x536
47 #define SYMCALL_RSP_MSR 0x537
48 #define SYMCALL_CS_MSR 0x538
49 #define SYMCALL_GS_MSR 0x539
50 #define SYMCALL_FS_MSR 0x540
52 static int symspy_msr_read(struct guest_info * core, uint_t msr,
53 struct v3_msr * dst, void * priv_data) {
54 struct v3_sym_global_state * global_state = &(core->vm_info->sym_global_state);
55 struct v3_sym_local_state * local_state = &(core->sym_local_state);
58 case SYMSPY_GLOBAL_MSR:
59 dst->value = global_state->global_guest_pa;
61 case SYMSPY_LOCAL_MSR:
62 dst->value = local_state->local_guest_pa;
71 static int symcall_msr_read(struct guest_info * core, uint_t msr,
72 struct v3_msr * dst, void * priv_data) {
73 struct v3_symcall_state * state = &(core->sym_local_state.symcall_state);
77 dst->value = state->sym_call_rip;
80 dst->value = state->sym_call_rsp;
83 dst->value = state->sym_call_cs;
86 dst->value = state->sym_call_gs;
89 dst->value = state->sym_call_fs;
98 static int symspy_msr_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
100 if (msr == SYMSPY_GLOBAL_MSR) {
101 struct v3_sym_global_state * global_state = &(core->vm_info->sym_global_state);
103 PrintDebug("Symbiotic Glbal MSR write for page %p\n", (void *)(addr_t)src.value);
105 if (global_state->active == 1) {
107 struct v3_shadow_region * old_reg = v3_get_shadow_region(core->vm_info, core->cpu_id,
108 (addr_t)global_state->global_guest_pa);
110 if (old_reg == NULL) {
111 PrintError("Could not find previously active symbiotic page (%p)\n",
112 (void *)(addr_t)global_state->global_guest_pa);
116 v3_delete_shadow_region(core->vm_info, old_reg);
119 global_state->global_guest_pa = src.value;
120 global_state->global_guest_pa &= ~0xfffLL;
122 global_state->active = 1;
125 v3_add_shadow_mem(core->vm_info, V3_MEM_CORE_ANY, (addr_t)global_state->global_guest_pa,
126 (addr_t)(global_state->global_guest_pa + PAGE_SIZE_4KB - 1),
127 global_state->global_page_pa);
128 } else if (msr == SYMSPY_LOCAL_MSR) {
129 struct v3_sym_local_state * local_state = &(core->sym_local_state);
131 PrintDebug("Symbiotic Local MSR write for page %p\n", (void *)(addr_t)src.value);
133 if (local_state->active == 1) {
135 struct v3_shadow_region * old_reg = v3_get_shadow_region(core->vm_info, core->cpu_id,
136 (addr_t)local_state->local_guest_pa);
138 if (old_reg == NULL) {
139 PrintError("Could not find previously active symbiotic page (%p)\n",
140 (void *)(addr_t)local_state->local_guest_pa);
144 v3_delete_shadow_region(core->vm_info, old_reg);
147 local_state->local_guest_pa = src.value;
148 local_state->local_guest_pa &= ~0xfffLL;
150 local_state->active = 1;
153 v3_add_shadow_mem(core->vm_info, core->cpu_id, (addr_t)local_state->local_guest_pa,
154 (addr_t)(local_state->local_guest_pa + PAGE_SIZE_4KB - 1),
155 local_state->local_page_pa);
157 PrintError("Invalid Symbiotic MSR write (0x%x)\n", msr);
165 static int symcall_msr_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
166 struct v3_symcall_state * state = &(core->sym_local_state.symcall_state);
169 case SYMCALL_RIP_MSR:
170 state->sym_call_rip = src.value;
172 case SYMCALL_RSP_MSR:
173 state->sym_call_rsp = src.value;
176 state->sym_call_cs = src.value;
179 state->sym_call_gs = src.value;
182 state->sym_call_fs = src.value;
185 PrintError("Invalid Symbiotic MSR write (0x%x)\n", msr);
191 static int cpuid_fn(struct guest_info * core, uint32_t cpuid,
192 uint32_t * eax, uint32_t * ebx,
193 uint32_t * ecx, uint32_t * edx,
194 void * private_data) {
195 extern v3_cpu_arch_t v3_cpu_types[];
197 *eax = *(uint32_t *)"V3V";
199 if ((v3_cpu_types[core->cpu_id] == V3_SVM_CPU) ||
200 (v3_cpu_types[core->cpu_id] == V3_SVM_REV3_CPU)) {
201 *ebx = *(uint32_t *)"SVM";
202 } else if ((v3_cpu_types[core->cpu_id] == V3_VMX_CPU) ||
203 (v3_cpu_types[core->cpu_id] == V3_VMX_EPT_CPU)) {
204 *ebx = *(uint32_t *)"VMX";
212 static int sym_call_ret(struct guest_info * info, uint_t hcall_id, void * private_data);
213 static int sym_call_err(struct guest_info * info, uint_t hcall_id, void * private_data);
217 int v3_init_sym_iface(struct v3_vm_info * vm) {
218 struct v3_sym_global_state * global_state = &(vm->sym_global_state);
219 memset(global_state, 0, sizeof(struct v3_sym_global_state));
221 global_state->global_page_pa = (addr_t)V3_AllocPages(1);
222 global_state->sym_page = (struct v3_sym_global_page *)V3_VAddr((void *)global_state->global_page_pa);
223 memset(global_state->sym_page, 0, PAGE_SIZE_4KB);
225 memcpy(&(global_state->sym_page->magic), "V3V", 3);
227 v3_hook_msr(vm, SYMSPY_LOCAL_MSR, symspy_msr_read, symspy_msr_write, NULL);
228 v3_hook_msr(vm, SYMSPY_GLOBAL_MSR, symspy_msr_read, symspy_msr_write, NULL);
230 v3_hook_cpuid(vm, SYM_CPUID_NUM, cpuid_fn, NULL);
232 v3_hook_msr(vm, SYMCALL_RIP_MSR, symcall_msr_read, symcall_msr_write, NULL);
233 v3_hook_msr(vm, SYMCALL_RSP_MSR, symcall_msr_read, symcall_msr_write, NULL);
234 v3_hook_msr(vm, SYMCALL_CS_MSR, symcall_msr_read, symcall_msr_write, NULL);
235 v3_hook_msr(vm, SYMCALL_GS_MSR, symcall_msr_read, symcall_msr_write, NULL);
236 v3_hook_msr(vm, SYMCALL_FS_MSR, symcall_msr_read, symcall_msr_write, NULL);
238 v3_register_hypercall(vm, SYMCALL_RET_HCALL, sym_call_ret, NULL);
239 v3_register_hypercall(vm, SYMCALL_ERR_HCALL, sym_call_err, NULL);
245 int v3_init_sym_core(struct guest_info * core) {
246 struct v3_sym_local_state * local_state = &(core->sym_local_state);
247 memset(local_state, 0, sizeof(struct v3_sym_local_state));
249 local_state->local_page_pa = (addr_t)V3_AllocPages(1);
250 local_state->local_page = (struct v3_sym_local_page *)V3_VAddr((void *)local_state->local_page_pa);
251 memset(local_state->local_page, 0, PAGE_SIZE_4KB);
253 snprintf((uint8_t *)&(local_state->local_page->magic), 8, "V3V.%d", core->cpu_id);
259 int v3_sym_map_pci_passthrough(struct v3_vm_info * vm, uint_t bus, uint_t dev, uint_t fn) {
260 struct v3_sym_global_state * global_state = &(vm->sym_global_state);
261 uint_t dev_index = (bus << 8) + (dev << 3) + fn;
262 uint_t major = dev_index / 8;
263 uint_t minor = dev_index % 8;
266 PrintError("Invalid PCI bus %d\n", bus);
270 PrintDebug("Setting passthrough pci map for index=%d\n", dev_index);
272 global_state->sym_page->pci_pt_map[major] |= 0x1 << minor;
274 PrintDebug("pt_map entry=%x\n", global_state->sym_page->pci_pt_map[major]);
276 PrintDebug("pt map vmm addr=%p\n", global_state->sym_page->pci_pt_map);
281 int v3_sym_unmap_pci_passthrough(struct v3_vm_info * vm, uint_t bus, uint_t dev, uint_t fn) {
282 struct v3_sym_global_state * global_state = &(vm->sym_global_state);
283 uint_t dev_index = (bus << 8) + (dev << 3) + fn;
284 uint_t major = dev_index / 8;
285 uint_t minor = dev_index % 8;
288 PrintError("Invalid PCI bus %d\n", bus);
292 global_state->sym_page->pci_pt_map[major] &= ~(0x1 << minor);
298 static int sym_call_err(struct guest_info * core, uint_t hcall_id, void * private_data) {
299 struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_local_state.symcall_state);
301 PrintError("sym call error\n");
303 state->sym_call_errno = (int)core->vm_regs.rbx;
304 v3_print_guest_state(core);
305 v3_print_mem_map(core->vm_info);
308 state->sym_call_error = 1;
309 state->sym_call_returned = 1;
314 static int sym_call_ret(struct guest_info * core, uint_t hcall_id, void * private_data) {
315 struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_local_state.symcall_state);
317 // PrintError("Return from sym call (ID=%x)\n", hcall_id);
318 // v3_print_guest_state(info);
320 state->sym_call_returned = 1;
325 static int execute_symcall(struct guest_info * core) {
326 struct v3_symcall_state * state = (struct v3_symcall_state *)&(core->sym_local_state.symcall_state);
328 while (state->sym_call_returned == 0) {
329 if (v3_vm_enter(core) == -1) {
330 PrintError("Error in Sym call\n");
339 int v3_sym_call(struct guest_info * core,
340 uint64_t call_num, sym_arg_t * arg0,
341 sym_arg_t * arg1, sym_arg_t * arg2,
342 sym_arg_t * arg3, sym_arg_t * arg4) {
343 struct v3_sym_local_state * sym_state = (struct v3_sym_local_state *)&(core->sym_local_state);
344 struct v3_symcall_state * state = (struct v3_symcall_state *)&(sym_state->symcall_state);
345 struct v3_sym_cpu_context * old_ctx = (struct v3_sym_cpu_context *)&(state->old_ctx);
346 struct v3_segment sym_cs;
347 struct v3_segment sym_ss;
348 uint64_t trash_args[5] = { [0 ... 4] = 0 };
350 // PrintDebug("Making Sym call\n");
351 // v3_print_guest_state(info);
353 if ((sym_state->local_page->sym_call_enabled == 0) ||
354 (state->sym_call_active == 1)) {
358 if (!arg0) arg0 = &trash_args[0];
359 if (!arg1) arg1 = &trash_args[1];
360 if (!arg2) arg2 = &trash_args[2];
361 if (!arg3) arg3 = &trash_args[3];
362 if (!arg4) arg4 = &trash_args[4];
364 // Save the old context
365 memcpy(&(old_ctx->vm_regs), &(core->vm_regs), sizeof(struct v3_gprs));
366 memcpy(&(old_ctx->cs), &(core->segments.cs), sizeof(struct v3_segment));
367 memcpy(&(old_ctx->ss), &(core->segments.ss), sizeof(struct v3_segment));
368 old_ctx->gs_base = core->segments.gs.base;
369 old_ctx->fs_base = core->segments.fs.base;
370 old_ctx->rip = core->rip;
371 old_ctx->cpl = core->cpl;
372 old_ctx->flags = core->ctrl_regs.rflags;
374 // Setup the sym call context
375 core->rip = state->sym_call_rip;
376 core->vm_regs.rsp = state->sym_call_rsp; // old contest rsp is saved in vm_regs
378 v3_translate_segment(core, state->sym_call_cs, &sym_cs);
379 memcpy(&(core->segments.cs), &sym_cs, sizeof(struct v3_segment));
381 v3_translate_segment(core, state->sym_call_cs + 8, &sym_ss);
382 memcpy(&(core->segments.ss), &sym_ss, sizeof(struct v3_segment));
384 core->segments.gs.base = state->sym_call_gs;
385 core->segments.fs.base = state->sym_call_fs;
388 core->vm_regs.rax = call_num;
389 core->vm_regs.rbx = *arg0;
390 core->vm_regs.rcx = *arg1;
391 core->vm_regs.rdx = *arg2;
392 core->vm_regs.rsi = *arg3;
393 core->vm_regs.rdi = *arg4;
395 // Mark sym call as active
396 state->sym_call_active = 1;
397 state->sym_call_returned = 0;
399 // PrintDebug("Sym state\n");
400 // v3_print_guest_state(core);
402 // Do the sym call entry
403 if (execute_symcall(core) == -1) {
404 PrintError("SYMCALL error\n");
409 state->sym_call_active = 0;
411 *arg0 = core->vm_regs.rbx;
412 *arg1 = core->vm_regs.rcx;
413 *arg2 = core->vm_regs.rdx;
414 *arg3 = core->vm_regs.rsi;
415 *arg4 = core->vm_regs.rdi;
417 // restore guest state
418 memcpy(&(core->vm_regs), &(old_ctx->vm_regs), sizeof(struct v3_gprs));
419 memcpy(&(core->segments.cs), &(old_ctx->cs), sizeof(struct v3_segment));
420 memcpy(&(core->segments.ss), &(old_ctx->ss), sizeof(struct v3_segment));
421 core->segments.gs.base = old_ctx->gs_base;
422 core->segments.fs.base = old_ctx->fs_base;
423 core->rip = old_ctx->rip;
424 core->cpl = old_ctx->cpl;
425 core->ctrl_regs.rflags = old_ctx->flags;
429 // PrintError("restoring guest state\n");
430 // v3_print_guest_state(core);