void v3_print_ctrl_regs(struct guest_info * info);
void v3_print_GPRs(struct guest_info * info);
+void v3_print_stack(struct guest_info * info);
+
#endif // ! __V3VEE__
#endif
uint8_t pci_pt_map[(4 * 256) / 8]; // we're hardcoding this: (4 busses, 256 max devs)
- uint64_t sym_call_rip;
- uint64_t sym_call_cs;
- uint64_t sym_call_rsp;
- uint64_t sym_call_gs;
- uint64_t sym_call_ret_fn;
+
} __attribute__((packed));
struct v3_sym_context old_ctx;
uint64_t args[6];
+
+ uint64_t sym_call_rip;
+ uint64_t sym_call_cs;
+ uint64_t sym_call_rsp;
+ uint64_t sym_call_gs;
+ uint64_t sym_call_fs;
+ uint64_t sym_call_ret_fn;
+
int (*notifier)(struct guest_info * info, void * private_data);
void * private_data;
int v3_sym_unmap_pci_passthrough(struct guest_info * info, uint_t bus, uint_t dev, uint_t fn);
+/* Symcall numbers */
+#define SYMCALL_TEST 1
+#define SYMCALL_MEM_LOOKUP 10
+/* ** */
+
int v3_sym_call(struct guest_info * info,
- uint64_t arg0, uint64_t arg1,
- uint64_t arg2, uint64_t arg3,
- uint64_t arg4, uint64_t arg5,
+ uint64_t call_num, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4,
int (*notifier)(struct guest_info * info, void * private_data),
void * private_data);
// PrintGuestPageTables(info, info->shdw_pg_state.guest_cr3);
} else if (evt->scan_code == 0x43) { // F9 Sym test
PrintDebug("Testing sym call\n");
- v3_sym_call0(info, 0, NULL, NULL);
+ v3_sym_call5(info, SYMCALL_TEST, 0x1111, 0x2222, 0x3333, 0x4444, 0x5555, NULL, NULL);
+ } else if (evt->scan_code == 0x42) { // F8 Sym test2
+ PrintDebug("Testing sym call\n");
+ v3_sym_call1(info, SYMCALL_MEM_LOOKUP, 0, NULL, NULL);
}
+
+
addr_t irq_state = v3_lock_irqsave(state->kb_lock);
if ( (state->status.enabled == 1) // onboard is enabled
#include <palacios/vmm_config.h>
#include <palacios/svm_io.h>
+#include <palacios/vmm_sprintf.h>
// This is a global pointer to the host's VMCB
vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
addr_t host_addr;
addr_t linear_addr = 0;
-
+
info->run_state = VM_ERROR;
PrintDebug("SVM ERROR!!\n");
PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
v3_dump_mem((uint8_t *)host_addr, 15);
+
+ v3_print_stack(info);
+
+
break;
}
}
/* Checks machine SVM capability */
/* Implemented from: AMD Arch Manual 3, sect 15.4 */
int v3_is_svm_capable() {
- // Dinda
uint_t vm_cr_low = 0, vm_cr_high = 0;
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
#ifdef CONFIG_SYMBIOTIC
if (0) {
+ // if (sym_started == 1) {
// ignore interrupt injection if we just started a symcall
PrintDebug("SVM Exit: %s (rip=%p) (info1=%p) (info2=%p)\n", vmexit_code_to_str(exit_code),
(void *)(addr_t)info->rip, (void *)(addr_t)guest_ctrl->exit_info1,
(void *)(addr_t)guest_ctrl->exit_info2);
- if (exit_code == VMEXIT_EXCP14) {
- PrintGuestPageTree(info, guest_ctrl->exit_info2, info->shdw_pg_state.guest_cr3);
- }
+ /* if (exit_code == VMEXIT_EXCP14) {
+ PrintGuestPageTree(info, guest_ctrl->exit_info2, info->shdw_pg_state.guest_cr3);
+ }*/
+
}
#endif
seg_ptr=(struct v3_segment *)segs;
char *seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
- PrintDebug("Segments\n");
+ V3_Print("Segments\n");
for (i = 0; seg_names[i] != NULL; i++) {
- PrintDebug("\t%s: Sel=%x, base=%p, limit=%x (long_mode=%d, db=%d)\n", seg_names[i], seg_ptr[i].selector,
+ V3_Print("\t%s: Sel=%x, base=%p, limit=%x (long_mode=%d, db=%d)\n", seg_names[i], seg_ptr[i].selector,
(void *)(addr_t)seg_ptr[i].base, seg_ptr[i].limit,
seg_ptr[i].long_mode, seg_ptr[i].db);
reg_ptr = (v3_reg_t *)regs;
- PrintDebug("32 bit Ctrl Regs:\n");
+ V3_Print("32 bit Ctrl Regs:\n");
for (i = 0; reg_names[i] != NULL; i++) {
- PrintDebug("\t%s=0x%p\n", reg_names[i], (void *)(addr_t)reg_ptr[i]);
+ V3_Print("\t%s=0x%p\n", reg_names[i], (void *)(addr_t)reg_ptr[i]);
}
- PrintDebug("\tEFER=0x%p\n", (void*)(addr_t)(guest_state->efer));
+ V3_Print("\tEFER=0x%p\n", (void*)(addr_t)(guest_state->efer));
}
void v3_print_guest_state(struct guest_info * info) {
addr_t linear_addr = 0;
- PrintDebug("RIP: %p\n", (void *)(addr_t)(info->rip));
+ V3_Print("RIP: %p\n", (void *)(addr_t)(info->rip));
linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
- PrintDebug("RIP Linear: %p\n", (void *)linear_addr);
+ V3_Print("RIP Linear: %p\n", (void *)linear_addr);
v3_print_segments(info);
v3_print_ctrl_regs(info);
if (info->shdw_pg_mode == SHADOW_PAGING) {
- PrintDebug("Shadow Paging Guest Registers:\n");
- PrintDebug("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
- PrintDebug("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
- PrintDebug("\tGuest EFER=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_efer.value));
+ V3_Print("Shadow Paging Guest Registers:\n");
+ V3_Print("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
+ V3_Print("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
+ V3_Print("\tGuest EFER=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_efer.value));
// CR4
}
v3_print_GPRs(info);
}
+void v3_print_stack(struct guest_info * info) {
+ addr_t linear_addr = 0;
+ addr_t host_addr = 0;
+ int i = 0;
+ v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(info);
+
+
+ linear_addr = get_addr_linear(info, info->vm_regs.rsp, &(info->segments.ss));
+
+ if (info->mem_mode == PHYSICAL_MEM) {
+ guest_pa_to_host_va(info, linear_addr, &host_addr);
+ } else if (info->mem_mode == VIRTUAL_MEM) {
+ guest_va_to_host_va(info, linear_addr, &host_addr);
+ }
+
+ V3_Print("Host Address of rsp = 0x%p\n", (void *)host_addr);
+ V3_Print("Stack at %p:\n", (void *)host_addr);
+
+ // We start i at one because the current stack pointer points to an unused stack element
+ for (i = 0; i <= 24; i++) {
+ if (cpu_mode == LONG) {
+ V3_Print("\t%p\n", (void *)*(uint64_t *)(host_addr + (i * 8)));
+ } else if (cpu_mode == REAL) {
+ V3_Print("Don't currently handle 16 bit stacks... \n");
+ } else {
+ // 32 bit stacks...
+ V3_Print("\t%.8x\n", *(uint32_t *)(host_addr + (i * 4)));
+ }
+ }
+
+}
+
#ifdef __V3_32BIT__
void v3_print_GPRs(struct guest_info * info) {
reg_ptr= (v3_reg_t *)regs;
- PrintDebug("32 bit GPRs:\n");
+ V3_Print("32 bit GPRs:\n");
for (i = 0; reg_names[i] != NULL; i++) {
- PrintDebug("\t%s=0x%p\n", reg_names[i], (void *)(addr_t)reg_ptr[i]);
+ V3_Print("\t%s=0x%p\n", reg_names[i], (void *)(addr_t)reg_ptr[i]);
}
}
char * reg_names[] = { "RDI", "RSI", "RBP", "RSP", "RBX", "RDX", "RCX", "RAX", \
"R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", NULL};
- reg_ptr= (v3_reg_t *)regs;
+ reg_ptr = (v3_reg_t *)regs;
- PrintDebug("64 bit GPRs:\n");
+ V3_Print("64 bit GPRs:\n");
for (i = 0; reg_names[i] != NULL; i++) {
- PrintDebug("\t%s=0x%p\n", reg_names[i], (void *)(addr_t)reg_ptr[i]);
+ V3_Print("\t%s=0x%p\n", reg_names[i], (void *)(addr_t)reg_ptr[i]);
}
}
struct hypercall * hcall = get_hypercall(info, hypercall_id);
if (!hcall) {
- PrintError("Invalid Hypercall (%d not registered)\n", hypercall_id);
+ PrintError("Invalid Hypercall (%d(0x%x) not registered)\n",
+ hypercall_id, hypercall_id);
return -1;
}
#define SYM_CALL_RET_HCALL 0x535
+/* Notes: We use a combination of SYSCALL and SYSENTER Semantics
+ * SYSCALL just sets an EIP, CS/SS seg, and GS seg via swapgs
+ * the RSP is loaded via the structure pointed to by GS
+ * This is safe because it assumes that system calls are guaranteed to be made with an empty kernel stack.
+ * We cannot make that assumption with a symcall, so we have to have our own stack area somewhere.
+ * SYSTENTER does not really use the GS base MSRs, but we do to map to 64 bit kernels
+ */
+
+#define SYMCALL_RIP_MSR 0x536
+#define SYMCALL_RSP_MSR 0x537
+#define SYMCALL_CS_MSR 0x538
+#define SYMCALL_GS_MSR 0x539
+#define SYMCALL_FS_MSR 0x540
+
+
static int msr_read(uint_t msr, struct v3_msr * dst, void * priv_data) {
struct guest_info * info = (struct guest_info *)priv_data;
struct v3_sym_state * state = &(info->sym_state);
- dst->value = state->guest_pg_addr;
+ switch (msr) {
+ case SYM_PAGE_MSR:
+ dst->value = state->guest_pg_addr;
+ break;
+ case SYMCALL_RIP_MSR:
+ dst->value = state->sym_call_rip;
+ break;
+ case SYMCALL_RSP_MSR:
+ dst->value = state->sym_call_rsp;
+ break;
+ case SYMCALL_CS_MSR:
+ dst->value = state->sym_call_cs;
+ break;
+ case SYMCALL_GS_MSR:
+ dst->value = state->sym_call_gs;
+ break;
+ case SYMCALL_FS_MSR:
+ dst->value = state->sym_call_fs;
+ break;
+ default:
+ return -1;
+ }
return 0;
}
struct guest_info * info = (struct guest_info *)priv_data;
struct v3_sym_state * state = &(info->sym_state);
+ if (msr == SYM_PAGE_MSR) {
+ PrintDebug("Symbiotic MSR write for page %p\n", (void *)src.value);
- PrintDebug("Symbiotic MSR write for page %p\n", (void *)src.value);
+ if (state->active == 1) {
+ // unmap page
+ struct v3_shadow_region * old_reg = v3_get_shadow_region(info, (addr_t)state->guest_pg_addr);
- if (state->active == 1) {
- // unmap page
- struct v3_shadow_region * old_reg = v3_get_shadow_region(info, (addr_t)state->guest_pg_addr);
+ if (old_reg == NULL) {
+ PrintError("Could not find previously active symbiotic page (%p)\n", (void *)state->guest_pg_addr);
+ return -1;
+ }
- if (old_reg == NULL) {
- PrintError("Could not find previously active symbiotic page (%p)\n", (void *)state->guest_pg_addr);
- return -1;
+ v3_delete_shadow_region(info, old_reg);
}
- v3_delete_shadow_region(info, old_reg);
+ state->guest_pg_addr = src.value;
+ state->guest_pg_addr &= ~0xfffLL;
+
+ state->active = 1;
+
+ // map page
+ v3_add_shadow_mem(info, (addr_t)state->guest_pg_addr,
+ (addr_t)(state->guest_pg_addr + PAGE_SIZE_4KB - 1),
+ state->sym_page_pa);
+
+
+ } else if (msr == SYMCALL_RIP_MSR) {
+ state->sym_call_rip = src.value;
+ } else if (msr == SYMCALL_RSP_MSR) {
+ state->sym_call_rsp = src.value;
+ } else if (msr == SYMCALL_CS_MSR) {
+ state->sym_call_cs = src.value;
+ } else if (msr == SYMCALL_GS_MSR) {
+ state->sym_call_gs = src.value;
+ } else if (msr == SYMCALL_FS_MSR) {
+ state->sym_call_fs = src.value;
+ } else {
+ PrintError("Invalid Symbiotic MSR write (0x%x)\n", msr);
+ return -1;
}
- state->guest_pg_addr = src.value;
- state->guest_pg_addr &= ~0xfffLL;
-
- state->active = 1;
-
- // map page
- v3_add_shadow_mem(info, (addr_t)state->guest_pg_addr,
- (addr_t)(state->guest_pg_addr + PAGE_SIZE_4KB - 1),
- state->sym_page_pa);
-
return 0;
}
uint32_t * eax, uint32_t * ebx,
uint32_t * ecx, uint32_t * edx,
void * private_data) {
+ extern v3_cpu_arch_t v3_cpu_types[];
+
+ *eax = *(uint32_t *)"V3V";
+
+ if ((v3_cpu_types[info->cpu_id] == V3_SVM_CPU) ||
+ (v3_cpu_types[info->cpu_id] == V3_SVM_REV3_CPU)) {
+ *ebx = *(uint32_t *)"SVM";
+ } else if ((v3_cpu_types[info->cpu_id] == V3_VMX_CPU) ||
+ (v3_cpu_types[info->cpu_id] == V3_VMX_EPT_CPU)) {
+ *ebx = *(uint32_t *)"VMX";
+ }
- memset(eax, 0, sizeof(uint32_t));
- memcpy(eax, "V3V", 3);
return 0;
}
v3_hook_cpuid(info, SYM_CPUID_NUM, cpuid_fn, info);
+ v3_hook_msr(info, SYMCALL_RIP_MSR, msr_read, msr_write, info);
+ v3_hook_msr(info, SYMCALL_RSP_MSR, msr_read, msr_write, info);
+ v3_hook_msr(info, SYMCALL_CS_MSR, msr_read, msr_write, info);
+ v3_hook_msr(info, SYMCALL_GS_MSR, msr_read, msr_write, info);
+ v3_hook_msr(info, SYMCALL_FS_MSR, msr_read, msr_write, info);
v3_register_hypercall(info, SYM_CALL_RET_HCALL, sym_call_ret, NULL);
int v3_sym_call(struct guest_info * info,
- uint64_t arg0, uint64_t arg1,
- uint64_t arg2, uint64_t arg3,
- uint64_t arg4, uint64_t arg5,
+ uint64_t call_num, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4,
int (*notifier)(struct guest_info * info, void * private_data),
void * private_data) {
struct v3_sym_state * state = (struct v3_sym_state *)&(info->sym_state);
return -1;
}
- state->args[0] = arg0;
- state->args[1] = arg1;
- state->args[2] = arg2;
- state->args[3] = arg3;
- state->args[4] = arg4;
- state->args[5] = arg5;
+ state->args[0] = call_num;
+ state->args[1] = arg0;
+ state->args[2] = arg1;
+ state->args[3] = arg2;
+ state->args[4] = arg3;
+ state->args[5] = arg4;
state->notifier = notifier;
state->private_data = private_data;
// Setup the sym call context
- info->rip = state->sym_page->sym_call_rip;
- info->vm_regs.rsp = state->sym_page->sym_call_rsp;
+ info->rip = state->sym_call_rip;
+ info->vm_regs.rsp = state->sym_call_rsp;
- v3_translate_segment(info, state->sym_page->sym_call_cs, &sym_cs);
+ v3_translate_segment(info, state->sym_call_cs, &sym_cs);
memcpy(&(info->segments.cs), &sym_cs, sizeof(struct v3_segment));
- v3_translate_segment(info, state->sym_page->sym_call_cs + 8, &sym_ss);
+ v3_translate_segment(info, state->sym_call_cs + 8, &sym_ss);
memcpy(&(info->segments.ss), &sym_ss, sizeof(struct v3_segment));
- info->segments.gs.base = state->sym_page->sym_call_gs;
- info->segments.fs.base = 0;
+ info->segments.gs.base = state->sym_call_gs;
+ info->segments.fs.base = state->sym_call_fs;
info->cpl = 0;
info->vm_regs.rax = state->args[0];