void v3_init_svm_hooks(struct v3_ctrl_ops * vmm_ops);
int v3_is_svm_capable();
+int v3_svm_enter(struct guest_info * info);
+
#endif
/******************************************/
-int v3_handle_svm_exit(struct guest_info * info);
+int v3_handle_svm_exit(struct guest_info * info, addr_t exit_code,
+ addr_t exit_info1, addr_t exit_info2);
const char * vmexit_code_to_str(uint_t exit_code);
int v3_init_svm_io_map(struct guest_info * info);
-int v3_handle_svm_io_in(struct guest_info * info);
-int v3_handle_svm_io_ins(struct guest_info * info);
-int v3_handle_svm_io_out(struct guest_info * info);
-int v3_handle_svm_io_outs(struct guest_info * info);
+int v3_handle_svm_io_in(struct guest_info * info, struct svm_io_info * io_info);
+int v3_handle_svm_io_ins(struct guest_info * info, struct svm_io_info * io_info);
+int v3_handle_svm_io_out(struct guest_info * info, struct svm_io_info * io_info);
+int v3_handle_svm_io_outs(struct guest_info * info, struct svm_io_info * io_info);
#endif // !__V3VEE__
void v3_interrupt_cpu(struct guest_info * vm, int logical_cpu);
+int v3_vm_enter(struct guest_info * info);
+
#endif //!__V3VEE__
uint64_t gs_base;
uint64_t fs_base;
uint64_t rip;
+ uint64_t flags;
uint8_t cpl;
};
uint64_t guest_pg_addr;
struct {
- uint_t active : 1;
- uint_t call_pending : 1;
- uint_t call_active : 1;
+ uint_t active : 1; // activated when symbiotic page MSR is written
+ uint_t sym_call_active : 1;
+ uint_t sym_call_returned : 1;
+ uint_t sym_call_error : 1;
} __attribute__((packed));
struct v3_sym_context old_ctx;
- uint64_t args[6];
+
+ int sym_call_errno;
uint64_t sym_call_rip;
uint64_t sym_call_cs;
uint64_t sym_call_rsp;
uint64_t sym_call_gs;
uint64_t sym_call_fs;
- uint64_t sym_call_ret_fn;
-
- int (*notifier)(struct guest_info * info, void * private_data);
-
- void * private_data;
-
};
int v3_init_sym_iface(struct guest_info * info);
+typedef uint64_t sym_arg_t;
-#define v3_sym_call0(info, call_num, cb, priv) \
- v3_sym_call(info, call_num, 0, 0, 0, 0, 0, cb, priv)
-#define v3_sym_call1(info, call_num, arg1, cb, priv) \
- v3_sym_call(info, call_num, arg1, 0, 0, 0, 0, cb, priv)
-#define v3_sym_call2(info, call_num, arg1, arg2, cb, priv) \
- v3_sym_call(info, call_num, arg1, arg2, 0, 0, 0, cb, priv)
-#define v3_sym_call3(info, call_num, arg1, arg2, arg3, cb, priv) \
- v3_sym_call(info, call_num, arg1, arg2, arg3, 0, 0, cb, priv)
-#define v3_sym_call4(info, call_num, arg1, arg2, arg3, arg4, cb, priv) \
- v3_sym_call(info, call_num, arg1, arg2, arg3, arg4, 0, cb, priv)
-#define v3_sym_call5(info, call_num, arg1, arg2, arg3, arg4, arg5, cb, priv) \
- v3_sym_call(info, call_num, arg1, arg2, arg3, arg4, arg5, cb, priv)
+#define v3_sym_call0(info, call_num) \
+ v3_sym_call(info, call_num, 0, 0, 0, 0, 0)
+#define v3_sym_call1(info, call_num, arg1) \
+ v3_sym_call(info, call_num, arg1, 0, 0, 0, 0)
+#define v3_sym_call2(info, call_num, arg1, arg2) \
+ v3_sym_call(info, call_num, arg1, arg2, 0, 0, 0)
+#define v3_sym_call3(info, call_num, arg1, arg2, arg3) \
+ v3_sym_call(info, call_num, arg1, arg2, arg3, 0, 0)
+#define v3_sym_call4(info, call_num, arg1, arg2, arg3, arg4) \
+ v3_sym_call(info, call_num, arg1, arg2, arg3, arg4, 0)
+#define v3_sym_call5(info, call_num, arg1, arg2, arg3, arg4, arg5) \
+ v3_sym_call(info, call_num, arg1, arg2, arg3, arg4, arg5)
/* ** */
int v3_sym_call(struct guest_info * info,
- uint64_t call_num, uint64_t arg0,
- uint64_t arg1, uint64_t arg2,
- uint64_t arg3, uint64_t arg4,
- int (*notifier)(struct guest_info * info, void * private_data),
- void * private_data);
+ uint64_t call_num, sym_arg_t * arg0,
+ sym_arg_t * arg1, sym_arg_t * arg2,
+ sym_arg_t * arg3, sym_arg_t * arg4);
-int v3_activate_sym_call(struct guest_info * info);
#endif
int v3_swap_in_notify(struct guest_info * info, int pg_index, int dev_index);
+int v3_sym_get_addr_info(struct guest_info * info, addr_t vadd,
+ int (*cb)(struct guest_info * info));
+
addr_t v3_get_swapped_pg_addr(struct guest_info * info, pte32_t * shadow_pte, pte32_t * guest_pte);
// PrintGuestPageTables(info, info->shdw_pg_state.guest_cr3);
} else if (evt->scan_code == 0x43) { // F9 Sym test
PrintDebug("Testing sym call\n");
- v3_sym_call5(info, SYMCALL_TEST, 0x1111, 0x2222, 0x3333, 0x4444, 0x5555, NULL, NULL);
+ sym_arg_t a0 = 0x1111;
+ sym_arg_t a1 = 0x2222;
+ sym_arg_t a2 = 0x3333;
+ sym_arg_t a3 = 0x4444;
+ sym_arg_t a4 = 0x5555;
+
+ v3_sym_call5(info, SYMCALL_TEST, &a0, &a1, &a2, &a3, &a4);
+
+ V3_Print("Symcall Test Returned arg0=%x, arg1=%x, arg2=%x, arg3=%x, arg4=%x\n",
+ (uint32_t)a0, (uint32_t)a1, (uint32_t)a2, (uint32_t)a3, (uint32_t)a4);
+
} else if (evt->scan_code == 0x42) { // F8 Sym test2
PrintDebug("Testing sym call\n");
- v3_sym_call1(info, SYMCALL_MEM_LOOKUP, 0, NULL, NULL);
+ sym_arg_t addr = 0;
+ v3_sym_call1(info, SYMCALL_MEM_LOOKUP, &addr);
}
// This is a global pointer to the host's VMCB
-static addr_t host_vmcbs[CONFIG_MAX_CPUS] = {0};
+static addr_t host_vmcbs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
-static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
+static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * vm_info) {
vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
uint_t i;
//
- guest_state->rsp = 0x00;
- guest_state->rip = 0xfff0;
- guest_state->cpl = 0;
-
- guest_state->efer |= EFER_MSR_svm_enable;
-
-
- guest_state->rflags = 0x00000002; // The reserved bit is always 1
ctrl_area->svm_instrs.VMRUN = 1;
ctrl_area->svm_instrs.VMMCALL = 1;
ctrl_area->svm_instrs.VMLOAD = 1;
ctrl_area->instrs.PAUSE = 1;
ctrl_area->instrs.shutdown_evts = 1;
+
+ /* DEBUG FOR RETURN CODE */
+ ctrl_area->exit_code = 1;
+
+
+ /* Setup Guest Machine state */
+
+ vm_info->vm_regs.rsp = 0x00;
+ vm_info->rip = 0xfff0;
+
vm_info->vm_regs.rdx = 0x00000f00;
- guest_state->cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
+ vm_info->cpl = 0;
+ vm_info->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
+ vm_info->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
+ vm_info->ctrl_regs.efer |= EFER_MSR_svm_enable;
- guest_state->cs.selector = 0xf000;
- guest_state->cs.limit = 0xffff;
- guest_state->cs.base = 0x0000000f0000LL;
- guest_state->cs.attrib.raw = 0xf3;
- /* DEBUG FOR RETURN CODE */
- ctrl_area->exit_code = 1;
- struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds),
- &(guest_state->es), &(guest_state->fs),
- &(guest_state->gs), NULL};
+ vm_info->segments.cs.selector = 0xf000;
+ vm_info->segments.cs.limit = 0xffff;
+ vm_info->segments.cs.base = 0x0000000f0000LL;
+
+ // (raw attributes = 0xf3)
+ vm_info->segments.cs.type = 0x3;
+ vm_info->segments.cs.system = 0x1;
+ vm_info->segments.cs.dpl = 0x3;
+ vm_info->segments.cs.present = 1;
+
+
+
+ struct v3_segment * segregs [] = {&(vm_info->segments.ss), &(vm_info->segments.ds),
+ &(vm_info->segments.es), &(vm_info->segments.fs),
+ &(vm_info->segments.gs), NULL};
for ( i = 0; segregs[i] != NULL; i++) {
- struct vmcb_selector * seg = segregs[i];
+ struct v3_segment * seg = segregs[i];
seg->selector = 0x0000;
// seg->base = seg->selector << 4;
seg->base = 0x00000000;
- seg->attrib.raw = 0xf3;
seg->limit = ~0u;
+
+ // (raw attributes = 0xf3)
+ seg->type = 0x3;
+ seg->system = 0x1;
+ seg->dpl = 0x3;
+ seg->present = 1;
}
- guest_state->gdtr.limit = 0x0000ffff;
- guest_state->gdtr.base = 0x0000000000000000LL;
- guest_state->idtr.limit = 0x0000ffff;
- guest_state->idtr.base = 0x0000000000000000LL;
+ vm_info->segments.gdtr.limit = 0x0000ffff;
+ vm_info->segments.gdtr.base = 0x0000000000000000LL;
+ vm_info->segments.idtr.limit = 0x0000ffff;
+ vm_info->segments.idtr.base = 0x0000000000000000LL;
- guest_state->ldtr.selector = 0x0000;
- guest_state->ldtr.limit = 0x0000ffff;
- guest_state->ldtr.base = 0x0000000000000000LL;
- guest_state->tr.selector = 0x0000;
- guest_state->tr.limit = 0x0000ffff;
- guest_state->tr.base = 0x0000000000000000LL;
+ vm_info->segments.ldtr.selector = 0x0000;
+ vm_info->segments.ldtr.limit = 0x0000ffff;
+ vm_info->segments.ldtr.base = 0x0000000000000000LL;
+ vm_info->segments.tr.selector = 0x0000;
+ vm_info->segments.tr.limit = 0x0000ffff;
+ vm_info->segments.tr.base = 0x0000000000000000LL;
- guest_state->dr6 = 0x00000000ffff0ff0LL;
- guest_state->dr7 = 0x0000000000000400LL;
+ vm_info->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
+ vm_info->dbg_regs.dr7 = 0x0000000000000400LL;
v3_init_svm_io_map(vm_info);
vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
PrintDebug("Created\n");
- guest_state->cr3 = vm_info->direct_map_pt;
+ vm_info->ctrl_regs.cr0 |= 0x80000000;
+ vm_info->ctrl_regs.cr3 = vm_info->direct_map_pt;
ctrl_area->cr_reads.cr0 = 1;
ctrl_area->cr_writes.cr0 = 1;
guest_state->g_pat = 0x7040600070406ULL;
- guest_state->cr0 |= 0x80000000;
+
} else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
// Flush the TLB on entries/exits
return 0;
}
-static int start_svm_guest(struct guest_info *info) {
- // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
- // vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
- uint_t num_exits = 0;
+static int update_irq_state_atomic(struct guest_info * info) {
+ vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
- PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
- //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
-
- info->run_state = VM_RUNNING;
- rdtscll(info->yield_start_cycle);
+ if ((info->intr_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
+
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("INTAK cycle completed for irq %d\n", info->intr_state.irq_vector);
+#endif
+ info->intr_state.irq_started = 1;
+ info->intr_state.irq_pending = 0;
- while (1) {
- ullong_t tmp_tsc;
-
- // Conditionally yield the CPU if the timeslice has expired
- v3_yield_cond(info);
+ v3_injecting_intr(info, info->intr_state.irq_vector, V3_EXTERNAL_IRQ);
+ }
- /*
- PrintDebug("SVM Entry to CS=%p rip=%p...\n",
- (void *)(addr_t)info->segments.cs.base,
- (void *)(addr_t)info->rip);
- */
+ if ((info->intr_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("Interrupt %d taken by guest\n", info->intr_state.irq_vector);
+#endif
+
+ // Interrupt was taken fully vectored
+ info->intr_state.irq_started = 0;
+
+ } else {
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
+#endif
+ }
- // disable global interrupts for vm state transition
- v3_clgi();
+ return 0;
+}
+static int update_irq_state(struct guest_info * info) {
+ vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
- rdtscll(info->time_state.cached_host_tsc);
- // guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
+ if (v3_excp_pending(info)) {
+ uint_t excp = v3_get_excp_number(info);
- v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]);
+ guest_ctrl->EVENTINJ.type = SVM_INJECTION_EXCEPTION;
- rdtscll(tmp_tsc);
-
+ if (info->excp_state.excp_error_code_valid) {
+ guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code;
+ guest_ctrl->EVENTINJ.ev = 1;
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
+#endif
+ }
+
+ guest_ctrl->EVENTINJ.vector = excp;
- //PrintDebug("SVM Returned\n");
+ guest_ctrl->EVENTINJ.valid = 1;
+
+ PrintDebug("<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n",
+ (int)info->num_exits,
+ guest_ctrl->EVENTINJ.vector,
+ (void *)(addr_t)info->ctrl_regs.cr2,
+ (void *)(addr_t)info->rip);
+
- // reenable global interrupts after vm exit
- v3_stgi();
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("Injecting Exception %d (EIP=%p)\n",
+ guest_ctrl->EVENTINJ.vector,
+ (void *)(addr_t)info->rip);
+#endif
+ v3_injecting_excp(info, excp);
- // Conditionally yield the CPU if the timeslice has expired
- v3_yield_cond(info);
+ } else if (info->intr_state.irq_started == 1) {
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("IRQ pending from previous injection\n");
+#endif
+ guest_ctrl->guest_ctrl.V_IRQ = 1;
+ guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_state.irq_vector;
+ guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
+ guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
+ } else {
+ switch (v3_intr_pending(info)) {
+ case V3_EXTERNAL_IRQ: {
+ uint32_t irq = v3_get_intr(info);
+
+ guest_ctrl->guest_ctrl.V_IRQ = 1;
+ guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq;
+ guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
+ guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
+
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("Injecting Interrupt %d (EIP=%p)\n",
+ guest_ctrl->guest_ctrl.V_INTR_VECTOR,
+ (void *)(addr_t)info->rip);
+#endif
- v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
- num_exits++;
+ info->intr_state.irq_pending = 1;
+ info->intr_state.irq_vector = irq;
+
+ break;
+ }
+ case V3_NMI:
+ guest_ctrl->EVENTINJ.type = SVM_INJECTION_NMI;
+ break;
+ case V3_SOFTWARE_INTR:
+ guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR;
+ break;
+ case V3_VIRTUAL_IRQ:
+ guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ;
+ break;
+
+ case V3_INVALID_INTR:
+ default:
+ break;
+ }
- if ((num_exits % 5000) == 0) {
- PrintDebug("SVM Exit number %d\n", num_exits);
+ }
+
+ return 0;
+}
+
+
+/*
+ * CAUTION and DANGER!!!
+ *
+ * The VMCB CANNOT(!!) be accessed outside of the clgi/stgi calls inside this function
+ * When exectuing a symbiotic call the VMCB WILL be overwritten, so any dependencies
+ * on its contents will cause things to break. The contents at the time of the exit WILL
+ * change before the exit handler is executed.
+ */
+int v3_svm_enter(struct guest_info * info) {
+ vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
+ vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+ ullong_t tmp_tsc;
+ addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
+
+ // Conditionally yield the CPU if the timeslice has expired
+ v3_yield_cond(info);
+
+ // disable global interrupts for vm state transition
+ v3_clgi();
+
+ // Synchronize the guest state to the VMCB
+ guest_state->cr0 = info->ctrl_regs.cr0;
+ guest_state->cr2 = info->ctrl_regs.cr2;
+ guest_state->cr3 = info->ctrl_regs.cr3;
+ guest_state->cr4 = info->ctrl_regs.cr4;
+ guest_state->dr6 = info->dbg_regs.dr6;
+ guest_state->dr7 = info->dbg_regs.dr7;
+ guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
+ guest_state->rflags = info->ctrl_regs.rflags;
+ guest_state->efer = info->ctrl_regs.efer;
+
+ guest_state->cpl = info->cpl;
+
+ v3_set_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
+
+ guest_state->rax = info->vm_regs.rax;
+ guest_state->rip = info->rip;
+ guest_state->rsp = info->vm_regs.rsp;
+ /* ** */
+
+ /*
+ PrintDebug("SVM Entry to CS=%p rip=%p...\n",
+ (void *)(addr_t)info->segments.cs.base,
+ (void *)(addr_t)info->rip);
+ */
+
+#ifdef CONFIG_SYMBIOTIC
+ if (info->sym_state.sym_call_active == 1) {
+ if (guest_ctrl->guest_ctrl.V_IRQ == 1) {
+ V3_Print("!!! Injecting Interrupt during Sym call !!!\n");
}
+ }
+#endif
+
- if (v3_handle_svm_exit(info) != 0) {
+ rdtscll(info->time_state.cached_host_tsc);
+ // guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
+
+ v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]);
+
+ rdtscll(tmp_tsc);
+
+ //PrintDebug("SVM Returned\n");
+
+ info->num_exits++;
+
+ v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
+
+#ifdef CONFIG_SYMBIOTIC
+ if (info->sym_state.sym_call_active == 0) {
+ update_irq_state_atomic(info);
+ }
+#else
+ update_irq_state_atomic(info);
+#endif
+
+ // Save Guest state from VMCB
+ info->rip = guest_state->rip;
+ info->vm_regs.rsp = guest_state->rsp;
+ info->vm_regs.rax = guest_state->rax;
+
+ info->cpl = guest_state->cpl;
+
+ info->ctrl_regs.cr0 = guest_state->cr0;
+ info->ctrl_regs.cr2 = guest_state->cr2;
+ info->ctrl_regs.cr3 = guest_state->cr3;
+ info->ctrl_regs.cr4 = guest_state->cr4;
+ info->dbg_regs.dr6 = guest_state->dr6;
+ info->dbg_regs.dr7 = guest_state->dr7;
+ info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
+ info->ctrl_regs.rflags = guest_state->rflags;
+ info->ctrl_regs.efer = guest_state->efer;
+
+ v3_get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
+ info->cpu_mode = v3_get_vm_cpu_mode(info);
+ info->mem_mode = v3_get_vm_mem_mode(info);
+ /* ** */
+
+
+ // save exit info here
+ exit_code = guest_ctrl->exit_code;
+ exit_info1 = guest_ctrl->exit_info1;
+ exit_info2 = guest_ctrl->exit_info2;
+
+
+ // reenable global interrupts after vm exit
+ v3_stgi();
+
+
+ // Conditionally yield the CPU if the timeslice has expired
+ v3_yield_cond(info);
+
+
+ if (v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2) != 0) {
+ PrintError("Error in SVM exit handler\n");
+ return -1;
+ }
+
+#ifdef CONFIG_SYMBIOTIC
+ if (info->sym_state.sym_call_active == 0) {
+ update_irq_state(info);
+ }
+#else
+ update_irq_state(info);
+#endif
+
+ return 0;
+}
+
+
+static int start_svm_guest(struct guest_info *info) {
+ // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+ // vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
+
+
+
+ PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
+ //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
+
+ info->run_state = VM_RUNNING;
+ rdtscll(info->yield_start_cycle);
+
+
+ while (1) {
+ if (v3_svm_enter(info) == -1) {
vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
addr_t host_addr;
addr_t linear_addr = 0;
-
+
info->run_state = VM_ERROR;
- PrintDebug("SVM ERROR!!\n");
-
+ V3_Print("SVM ERROR!!\n");
+
v3_print_guest_state(info);
-
- PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code);
-
- PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
- PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
-
- PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
- PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
-
+
+ V3_Print("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code);
+
+ V3_Print("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
+ V3_Print("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
+
+ V3_Print("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
+ V3_Print("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
+
linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
-
+
if (info->mem_mode == PHYSICAL_MEM) {
guest_pa_to_host_va(info, linear_addr, &host_addr);
} else if (info->mem_mode == VIRTUAL_MEM) {
guest_va_to_host_va(info, linear_addr, &host_addr);
}
-
- PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
-
- PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
+
+ V3_Print("Host Address of rip = 0x%p\n", (void *)host_addr);
+
+ V3_Print("Instr (15 bytes) at %p:\n", (void *)host_addr);
v3_dump_mem((uint8_t *)host_addr, 15);
-
-
+
v3_print_stack(info);
-
break;
}
+
+ if ((info->num_exits % 5000) == 0) {
+ V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits);
+ }
+
+
+
+
}
return 0;
}
PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
- PrintDebug("SVM Not Available\n");
+ V3_Print("SVM Not Available\n");
return 0;
} else {
v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
- PrintDebug("SVM is available but is disabled.\n");
+ V3_Print("SVM is available but is disabled.\n");
v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
- PrintDebug("SVM BIOS Disabled, not unlockable\n");
+ V3_Print("SVM BIOS Disabled, not unlockable\n");
} else {
- PrintDebug("SVM is locked with a key\n");
+ V3_Print("SVM is locked with a key\n");
}
return 0;
} else {
- PrintDebug("SVM is available and enabled.\n");
+ V3_Print("SVM is available and enabled.\n");
v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
-
- if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
- PrintDebug("SVM Nested Paging not supported\n");
- } else {
- PrintDebug("SVM Nested Paging supported\n");
- }
-
return 1;
}
}
//PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
- PrintDebug("SVM Nested Paging not supported\n");
+ V3_Print("SVM Nested Paging not supported\n");
return 0;
} else {
- PrintDebug("SVM Nested Paging supported\n");
+ V3_Print("SVM Nested Paging supported\n");
return 1;
}
}
msr.e_reg.low |= EFER_MSR_svm_enable;
v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
- PrintDebug("SVM Enabled\n");
+ V3_Print("SVM Enabled\n");
// Setup the host state save area
host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4);
#include <palacios/vmm_cpuid.h>
#include <palacios/vmm_direct_paging.h>
-#ifdef CONFIG_SYMBIOTIC
-#include <palacios/vmm_sym_iface.h>
-#endif
#ifdef CONFIG_TELEMETRY
#include <palacios/vmm_telemetry.h>
#endif
-int v3_handle_svm_exit(struct guest_info * info) {
- vmcb_ctrl_t * guest_ctrl = 0;
- vmcb_saved_state_t * guest_state = 0;
- ulong_t exit_code = 0;
-
-#ifdef CONFIG_SYMBIOTIC
- static int sym_started = 0;
-#endif
-
-
- guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
- guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
-
- // Update the high level state
- info->rip = guest_state->rip;
- info->vm_regs.rsp = guest_state->rsp;
- info->vm_regs.rax = guest_state->rax;
-
- info->cpl = guest_state->cpl;
-
- info->ctrl_regs.cr0 = guest_state->cr0;
- info->ctrl_regs.cr2 = guest_state->cr2;
- info->ctrl_regs.cr3 = guest_state->cr3;
- info->ctrl_regs.cr4 = guest_state->cr4;
- info->dbg_regs.dr6 = guest_state->dr6;
- info->dbg_regs.dr7 = guest_state->dr7;
- info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
- info->ctrl_regs.rflags = guest_state->rflags;
- info->ctrl_regs.efer = guest_state->efer;
-
- v3_get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
- info->cpu_mode = v3_get_vm_cpu_mode(info);
- info->mem_mode = v3_get_vm_mem_mode(info);
-
- exit_code = guest_ctrl->exit_code;
-
-
-
-#ifdef CONFIG_SYMBIOTIC
- if (0) {
- // if (sym_started == 1) {
- // ignore interrupt injection if we just started a symcall
- PrintDebug("SVM Exit: %s (rip=%p) (info1=%p) (info2=%p)\n", vmexit_code_to_str(exit_code),
- (void *)(addr_t)info->rip, (void *)(addr_t)guest_ctrl->exit_info1,
- (void *)(addr_t)guest_ctrl->exit_info2);
-
- /* if (exit_code == VMEXIT_EXCP14) {
- PrintGuestPageTree(info, guest_ctrl->exit_info2, info->shdw_pg_state.guest_cr3);
- }*/
-
- }
-#endif
-
-
- if ((info->intr_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
-
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("INTAK cycle completed for irq %d\n", info->intr_state.irq_vector);
-#endif
-
- info->intr_state.irq_started = 1;
- info->intr_state.irq_pending = 0;
-
- v3_injecting_intr(info, info->intr_state.irq_vector, V3_EXTERNAL_IRQ);
- }
-
- if ((info->intr_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("Interrupt %d taken by guest\n", info->intr_state.irq_vector);
-#endif
-
- // Interrupt was taken fully vectored
- info->intr_state.irq_started = 0;
-
- } else {
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
-#endif
- }
-
+int v3_handle_svm_exit(struct guest_info * info, addr_t exit_code, addr_t exit_info1, addr_t exit_info2) {
#ifdef CONFIG_TELEMETRY
if (info->enable_telemetry) {
switch (exit_code) {
case VMEXIT_IOIO: {
- struct svm_io_info * io_info = (struct svm_io_info *)&(guest_ctrl->exit_info1);
+ struct svm_io_info * io_info = (struct svm_io_info *)&(exit_info1);
if (io_info->type == 0) {
if (io_info->str) {
- if (v3_handle_svm_io_outs(info) == -1 ) {
+ if (v3_handle_svm_io_outs(info, io_info) == -1 ) {
return -1;
}
} else {
- if (v3_handle_svm_io_out(info) == -1) {
+ if (v3_handle_svm_io_out(info, io_info) == -1) {
return -1;
}
}
} else {
if (io_info->str) {
- if (v3_handle_svm_io_ins(info) == -1) {
+ if (v3_handle_svm_io_ins(info, io_info) == -1) {
return -1;
}
} else {
- if (v3_handle_svm_io_in(info) == -1) {
+ if (v3_handle_svm_io_in(info, io_info) == -1) {
return -1;
}
}
}
+
+ info->rip = exit_info2;
+
break;
}
case VMEXIT_MSR:
- if (guest_ctrl->exit_info1 == 0) {
+ if (exit_info1 == 0) {
if (v3_handle_msr_read(info) == -1) {
return -1;
}
- } else if (guest_ctrl->exit_info1 == 1) {
+ } else if (exit_info1 == 1) {
if (v3_handle_msr_write(info) == -1) {
return -1;
}
}
break;
case VMEXIT_EXCP14: {
- addr_t fault_addr = guest_ctrl->exit_info2;
- pf_error_t * error_code = (pf_error_t *)&(guest_ctrl->exit_info1);
+ addr_t fault_addr = exit_info2;
+ pf_error_t * error_code = (pf_error_t *)&(exit_info1);
#ifdef CONFIG_DEBUG_SHADOW_PAGING
PrintDebug("PageFault at %p (error=%d)\n",
(void *)fault_addr, *(uint_t *)error_code);
break;
}
case VMEXIT_NPF: {
- addr_t fault_addr = guest_ctrl->exit_info2;
- pf_error_t * error_code = (pf_error_t *)&(guest_ctrl->exit_info1);
+ addr_t fault_addr = exit_info2;
+ pf_error_t * error_code = (pf_error_t *)&(exit_info1);
if (info->shdw_pg_mode == NESTED_PAGING) {
if (v3_handle_nested_pagefault(info, fault_addr, *error_code) == -1) {
PrintDebug("Unhandled SVM Exit: %s\n", vmexit_code_to_str(exit_code));
- rip_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
+ rip_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
PrintError("SVM Returned:(VMCB=%p)\n", (void *)(info->vmm_data));
- PrintError("RIP: %p\n", (void *)(addr_t)(guest_state->rip));
+ PrintError("RIP: %p\n", (void *)(addr_t)(info->rip));
PrintError("RIP Linear: %p\n", (void *)(addr_t)(rip_addr));
PrintError("SVM Returned: Exit Code: %p\n", (void *)(addr_t)exit_code);
- PrintError("io_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
- PrintError("io_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
+ PrintError("io_info1 low = 0x%.8x\n", *(uint_t*)&(exit_info1));
+ PrintError("io_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(exit_info1)) + 4));
- PrintError("io_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
- PrintError("io_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
+ PrintError("io_info2 low = 0x%.8x\n", *(uint_t*)&(exit_info2));
+ PrintError("io_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(exit_info2)) + 4));
if (info->shdw_pg_mode == SHADOW_PAGING) {
#endif
-
-#ifdef CONFIG_SYMBIOTIC
- v3_activate_sym_call(info);
-#endif
-
- guest_state->cr0 = info->ctrl_regs.cr0;
- guest_state->cr2 = info->ctrl_regs.cr2;
- guest_state->cr3 = info->ctrl_regs.cr3;
- guest_state->cr4 = info->ctrl_regs.cr4;
- guest_state->dr6 = info->dbg_regs.dr6;
- guest_state->dr7 = info->dbg_regs.dr7;
- guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
- guest_state->rflags = info->ctrl_regs.rflags;
- guest_state->efer = info->ctrl_regs.efer;
-
- guest_state->cpl = info->cpl;
-
- v3_set_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
-
- guest_state->rax = info->vm_regs.rax;
- guest_state->rip = info->rip;
- guest_state->rsp = info->vm_regs.rsp;
-
-
-
-
- if (v3_excp_pending(info)) {
- uint_t excp = v3_get_excp_number(info);
-
- guest_ctrl->EVENTINJ.type = SVM_INJECTION_EXCEPTION;
-
- if (info->excp_state.excp_error_code_valid) {
- guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code;
- guest_ctrl->EVENTINJ.ev = 1;
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
-#endif
- }
-
- guest_ctrl->EVENTINJ.vector = excp;
-
- guest_ctrl->EVENTINJ.valid = 1;
-
- PrintDebug("Injecting Exception %d (EIP=%p)\n",
- guest_ctrl->EVENTINJ.vector,
- (void *)(addr_t)info->rip);
-
-
-
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("Injecting Exception %d (EIP=%p)\n",
- guest_ctrl->EVENTINJ.vector,
- (void *)(addr_t)info->rip);
-#endif
- v3_injecting_excp(info, excp);
-
-#ifdef CONFIG_SYMBIOTIC
- } else if (info->sym_state.call_active == 1) {
- // ignore interrupt injection if we just started a symcall
- PrintDebug("Symcall active\n");
- sym_started = 1;
-#endif
-
- } else if (info->intr_state.irq_started == 1) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("IRQ pending from previous injection\n");
-#endif
- guest_ctrl->guest_ctrl.V_IRQ = 1;
- guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_state.irq_vector;
- guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
- guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
-
- } else {
- switch (v3_intr_pending(info)) {
- case V3_EXTERNAL_IRQ: {
- uint32_t irq = v3_get_intr(info);
-
- guest_ctrl->guest_ctrl.V_IRQ = 1;
- guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq;
- guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
- guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
-
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("Injecting Interrupt %d (EIP=%p)\n",
- guest_ctrl->guest_ctrl.V_INTR_VECTOR,
- (void *)(addr_t)info->rip);
-#endif
-
- info->intr_state.irq_pending = 1;
- info->intr_state.irq_vector = irq;
-
- break;
- }
- case V3_NMI:
- guest_ctrl->EVENTINJ.type = SVM_INJECTION_NMI;
- break;
- case V3_SOFTWARE_INTR:
- guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR;
- break;
- case V3_VIRTUAL_IRQ:
- guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ;
- break;
-
- case V3_INVALID_INTR:
- default:
- break;
- }
-
- }
-
-
-
if (exit_code == VMEXIT_INTR) {
//PrintDebug("INTR ret IP = %x\n", guest_state->rip);
}
// This should package up an IO request and call vmm_handle_io
-int v3_handle_svm_io_in(struct guest_info * info) {
- vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t *)(info->vmm_data));
- // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
- struct svm_io_info * io_info = (struct svm_io_info *)&(ctrl_area->exit_info1);
-
+int v3_handle_svm_io_in(struct guest_info * info, struct svm_io_info * io_info) {
struct v3_io_hook * hook = v3_get_io_hook(info, io_info->port);
int read_size = 0;
return -1;
}
- info->rip = ctrl_area->exit_info2;
-
return 0;
}
/* We might not handle wrap around of the RDI register correctly...
* In that if we do wrap around the effect will manifest in the higher bits of the register
*/
-int v3_handle_svm_io_ins(struct guest_info * info) {
- vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t *)(info->vmm_data));
- vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
-
- struct svm_io_info * io_info = (struct svm_io_info *)&(ctrl_area->exit_info1);
-
+int v3_handle_svm_io_ins(struct guest_info * info, struct svm_io_info * io_info) {
struct v3_io_hook * hook = v3_get_io_hook(info, io_info->port);
int read_size = 0;
addr_t dst_addr = 0;
// direction can equal either 1 or -1
// We will multiply the final added offset by this value to go the correct direction
int direction = 1;
- struct rflags * flags = (struct rflags *)&(guest_state->rflags);
+ struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
if (flags->df) {
direction = -1;
rep_num--;
}
-
- info->rip = ctrl_area->exit_info2;
-
return 0;
}
-int v3_handle_svm_io_out(struct guest_info * info) {
- vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t *)(info->vmm_data));
- // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
- struct svm_io_info * io_info = (struct svm_io_info *)&(ctrl_area->exit_info1);
-
+int v3_handle_svm_io_out(struct guest_info * info, struct svm_io_info * io_info) {
struct v3_io_hook * hook = v3_get_io_hook(info, io_info->port);
int write_size = 0;
return -1;
}
- info->rip = ctrl_area->exit_info2;
-
return 0;
}
* In that if we do wrap around the effect will manifest in the higher bits of the register
*/
-int v3_handle_svm_io_outs(struct guest_info * info) {
- vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t *)(info->vmm_data));
- vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
-
-
- struct svm_io_info * io_info = (struct svm_io_info *)&(ctrl_area->exit_info1);
-
+int v3_handle_svm_io_outs(struct guest_info * info, struct svm_io_info * io_info) {
+
struct v3_io_hook * hook = v3_get_io_hook(info, io_info->port);
int write_size = 0;
addr_t dst_addr = 0;
// direction can equal either 1 or -1
// We will multiply the final added offset by this value to go the correct direction
int direction = 1;
- struct rflags * flags = (struct rflags *)&(guest_state->rflags);
+ struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
if (flags->df) {
direction = -1;
rep_num--;
}
-
- info->rip = ctrl_area->exit_info2;
-
-
return 0;
}
linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
V3_Print("RIP Linear: %p\n", (void *)linear_addr);
+ V3_Print("NumExits: %u\n", (uint32_t)info->num_exits);
+
v3_print_segments(info);
v3_print_ctrl_regs(info);
// CR4
}
v3_print_GPRs(info);
+
+ v3_print_stack(info);
}
guest_va_to_host_va(info, linear_addr, &host_addr);
}
- V3_Print("Host Address of rsp = 0x%p\n", (void *)host_addr);
+ V3_Print("Host Address of rsp = 0x%p\n", (void *)host_addr);
V3_Print("Stack at %p:\n", (void *)host_addr);
// We start i at one because the current stack pointer points to an unused stack element
(os_hooks)->interrupt_cpu(info, logical_cpu);
}
}
+
+
+
+int v3_vm_enter(struct guest_info * info) {
+ switch (v3_cpu_types[info->cpu_id]) {
+#ifdef CONFIG_SVM
+ case V3_SVM_CPU:
+ case V3_SVM_REV3_CPU:
+ v3_svm_enter(info);
+ break;
+#endif
+#if CONFIG_VMX && 0
+ case V3_VMX_CPU:
+ case V3_VMX_EPT_CPU:
+ v3_vmx_enter(info);
+ break;
+#endif
+ default:
+ PrintError("Attemping to enter a guest on an invalid CPU\n");
+ return -1;
+ }
+
+ return 0;
+}
return 0;
}
+#ifdef CONFIG_SYMBIOTIC_SWAP
+
+static int sym_swap_callback(struct guest_info * info) {
+
+ return 0;
+}
+
+#endif
static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
pte32_t * shadow_pt, pte32_t * guest_pt) {
info->swap_state.write_faults++;
}
#endif
- if (error_code.write == 0) {
- V3_Print("Page fault on swapped out page (vaddr=%p) (pte=%x) (error_code=%x)\n",
- (void *)fault_addr, *(uint32_t *)guest_pte, *(uint32_t *)&error_code);
+
+ // This will trigger a callback...
+ v3_sym_get_addr_info(info, fault_addr, sym_swap_callback);
+
+ return 0;
+
+ /*
+ if (error_code.write == 0) {
+ V3_Print("Page fault on swapped out page (vaddr=%p) (pte=%x) (error_code=%x)\n",
+ (void *)fault_addr, *(uint32_t *)guest_pte, *(uint32_t *)&error_code);
- addr_t swp_pg_addr = v3_get_swapped_pg_addr(info, shadow_pte, guest_pte);
+ addr_t swp_pg_addr = v3_get_swapped_pg_addr(info, shadow_pte, guest_pte);
- if (swp_pg_addr == 0) {
- if (inject_guest_pf(info, fault_addr, error_code) == -1) {
- PrintError("Could not inject guest page fault\n");
- return -1;
- }
- } else {
- /*
- * Setup shadow paging state
- */
-
- /* We need some way to check permissions.... */
+ if (swp_pg_addr == 0) {
+ if (inject_guest_pf(info, fault_addr, error_code) == -1) {
+ PrintError("Could not inject guest page fault\n");
+ return -1;
+ }
+ } else {
+
- shadow_pte->accessed = 1;
- shadow_pte->writable = 0;
+ shadow_pte->accessed = 1;
+ shadow_pte->writable = 0;
- if ((fault_addr & 0xc0000000) == 0xc0000000) {
- shadow_pte->user_page = 0;
- } else {
- shadow_pte->user_page = 1;
- }
+ if ((fault_addr & 0xc0000000) == 0xc0000000) {
+ shadow_pte->user_page = 0;
+ } else {
+ shadow_pte->user_page = 1;
+ }
- shadow_pte->write_through = 0;
- shadow_pte->cache_disable = 0;
- shadow_pte->global_page = 0;
+ shadow_pte->write_through = 0;
+ shadow_pte->cache_disable = 0;
+ shadow_pte->global_page = 0;
- shadow_pte->present = 1;
+ shadow_pte->present = 1;
- shadow_pte->page_base_addr = swp_pg_addr;
- }
+ shadow_pte->page_base_addr = swp_pg_addr;
+ }
- return 0;
- }
+ return 0;
+ }
+ */
}
#endif
#define SYM_CPUID_NUM 0x90000000
+// A succesfull symcall returns via the RET_HCALL, with the return values in registers
+// A symcall error returns via the ERR_HCALL with the error code in rbx
#define SYM_CALL_RET_HCALL 0x535
+#define SYM_CALL_ERR_HCALL 0x536
/* Notes: We use a combination of SYSCALL and SYSENTER Semantics
static int sym_call_ret(struct guest_info * info, uint_t hcall_id, void * private_data);
+static int sym_call_err(struct guest_info * info, uint_t hcall_id, void * private_data);
v3_hook_msr(info, SYMCALL_FS_MSR, msr_read, msr_write, info);
v3_register_hypercall(info, SYM_CALL_RET_HCALL, sym_call_ret, NULL);
+ v3_register_hypercall(info, SYM_CALL_ERR_HCALL, sym_call_err, NULL);
return 0;
}
}
-
-static int sym_call_ret(struct guest_info * info, uint_t hcall_id, void * private_data) {
+static int sym_call_err(struct guest_info * info, uint_t hcall_id, void * private_data) {
struct v3_sym_state * state = (struct v3_sym_state *)&(info->sym_state);
- struct v3_sym_context * old_ctx = (struct v3_sym_context *)&(state->old_ctx);
+ PrintError("sym call error\n");
- PrintError("Return from sym call\n");
+ state->sym_call_errno = (int)info->vm_regs.rbx;
v3_print_guest_state(info);
v3_print_mem_map(info);
+ // clear sym flags
+ state->sym_call_error = 1;
+ state->sym_call_returned = 1;
- if (state->notifier != NULL) {
- if (state->notifier(info, state->private_data) == -1) {
- PrintError("Error in return from symcall.\n");
- return -1;
- }
- }
-
-
- // restore guest state
- memcpy(&(info->vm_regs), &(old_ctx->vm_regs), sizeof(struct v3_gprs));
- memcpy(&(info->segments.cs), &(old_ctx->cs), sizeof(struct v3_segment));
- memcpy(&(info->segments.ss), &(old_ctx->ss), sizeof(struct v3_segment));
- info->segments.gs.base = old_ctx->gs_base;
- info->segments.fs.base = old_ctx->fs_base;
- info->rip = old_ctx->rip;
- info->cpl = old_ctx->cpl;
+ return -1;
+}
+static int sym_call_ret(struct guest_info * info, uint_t hcall_id, void * private_data) {
+ struct v3_sym_state * state = (struct v3_sym_state *)&(info->sym_state);
- PrintDebug("restoring guest state\n");
+ PrintError("Return from sym call\n");
v3_print_guest_state(info);
+ v3_print_mem_map(info);
- // clear sym flags
- state->call_active = 0;
-
+ state->sym_call_returned = 1;
return 0;
}
+static int execute_symcall(struct guest_info * info) {
-int v3_sym_call(struct guest_info * info,
- uint64_t call_num, uint64_t arg0,
- uint64_t arg1, uint64_t arg2,
- uint64_t arg3, uint64_t arg4,
- int (*notifier)(struct guest_info * info, void * private_data),
- void * private_data) {
- struct v3_sym_state * state = (struct v3_sym_state *)&(info->sym_state);
-
-
- PrintDebug("Making Sym call\n");
-
- if ((state->sym_page->sym_call_enabled == 0) ||
- (state->call_active == 1) ||
- (state->call_pending == 1)) {
- return -1;
+ while (info->sym_state.sym_call_returned == 0) {
+ if (v3_vm_enter(info) == -1) {
+ PrintError("Error in Sym call\n");
+ return -1;
+ }
}
- state->args[0] = call_num;
- state->args[1] = arg0;
- state->args[2] = arg1;
- state->args[3] = arg2;
- state->args[4] = arg3;
- state->args[5] = arg4;
-
- state->notifier = notifier;
- state->private_data = private_data;
-
- state->call_pending = 1;
-
return 0;
}
-
-int v3_activate_sym_call(struct guest_info * info) {
+int v3_sym_call(struct guest_info * info,
+ uint64_t call_num, sym_arg_t * arg0,
+ sym_arg_t * arg1, sym_arg_t * arg2,
+ sym_arg_t * arg3, sym_arg_t * arg4) {
struct v3_sym_state * state = (struct v3_sym_state *)&(info->sym_state);
struct v3_sym_context * old_ctx = (struct v3_sym_context *)&(state->old_ctx);
struct v3_segment sym_cs;
struct v3_segment sym_ss;
+ uint64_t trash_args[5] = { [0 ... 4] = 0 };
-
- if ((state->sym_page->sym_call_enabled == 0) ||
- (state->call_pending == 0)) {
- // Unable to make sym call or none pending
- if (state->call_active == 1) {
- PrintError("handled exit while in symcall\n");
- }
- return 0;
- }
-
-
- PrintDebug("Activating Symbiotic call\n");
+ PrintDebug("Making Sym call\n");
v3_print_guest_state(info);
+ if ((state->sym_page->sym_call_enabled == 0) ||
+ (state->sym_call_active == 1)) {
+ return -1;
+ }
+
+ if (!arg0) arg0 = &trash_args[0];
+ if (!arg1) arg1 = &trash_args[1];
+ if (!arg2) arg2 = &trash_args[2];
+ if (!arg3) arg3 = &trash_args[3];
+ if (!arg4) arg4 = &trash_args[4];
// Save the old context
memcpy(&(old_ctx->vm_regs), &(info->vm_regs), sizeof(struct v3_gprs));
old_ctx->fs_base = info->segments.fs.base;
old_ctx->rip = info->rip;
old_ctx->cpl = info->cpl;
+ old_ctx->flags = info->ctrl_regs.rflags;
-
-
// Setup the sym call context
info->rip = state->sym_call_rip;
- info->vm_regs.rsp = state->sym_call_rsp;
+ info->vm_regs.rsp = state->sym_call_rsp; // old contest rsp is saved in vm_regs
v3_translate_segment(info, state->sym_call_cs, &sym_cs);
memcpy(&(info->segments.cs), &sym_cs, sizeof(struct v3_segment));
info->segments.fs.base = state->sym_call_fs;
info->cpl = 0;
- info->vm_regs.rax = state->args[0];
- info->vm_regs.rbx = state->args[1];
- info->vm_regs.rcx = state->args[2];
- info->vm_regs.rdx = state->args[3];
- info->vm_regs.rsi = state->args[4];
- info->vm_regs.rdi = state->args[5];
+ info->vm_regs.rax = call_num;
+ info->vm_regs.rbx = *arg0;
+ info->vm_regs.rcx = *arg1;
+ info->vm_regs.rdx = *arg2;
+ info->vm_regs.rsi = *arg3;
+ info->vm_regs.rdi = *arg4;
// Mark sym call as active
- state->call_pending = 0;
- state->call_active = 1;
-
+ state->sym_call_active = 1;
+ state->sym_call_returned = 0;
PrintDebug("Sym state\n");
v3_print_guest_state(info);
- return 1;
+ // Do the sym call entry
+ if (execute_symcall(info) == -1) {
+ PrintError("SYMCALL error\n");
+ return -1;
+ }
+
+ // clear sym flags
+ state->sym_call_active = 0;
+
+ *arg0 = info->vm_regs.rbx;
+ *arg1 = info->vm_regs.rcx;
+ *arg2 = info->vm_regs.rdx;
+ *arg3 = info->vm_regs.rsi;
+ *arg4 = info->vm_regs.rdi;
+
+ // restore guest state
+ memcpy(&(info->vm_regs), &(old_ctx->vm_regs), sizeof(struct v3_gprs));
+ memcpy(&(info->segments.cs), &(old_ctx->cs), sizeof(struct v3_segment));
+ memcpy(&(info->segments.ss), &(old_ctx->ss), sizeof(struct v3_segment));
+ info->segments.gs.base = old_ctx->gs_base;
+ info->segments.fs.base = old_ctx->fs_base;
+ info->rip = old_ctx->rip;
+ info->cpl = old_ctx->cpl;
+ info->ctrl_regs.rflags = old_ctx->flags;
+
+
+
+ PrintDebug("restoring guest state\n");
+ v3_print_guest_state(info);
+
+ return 0;
}
+
+
}
+int v3_sym_get_addr_info(struct guest_info * info, addr_t vaddr,
+ int (*cb)(struct guest_info * info)) {
+ return 0;
+
+}
+
+
+
addr_t v3_get_swapped_pg_addr(struct guest_info * info, pte32_t * shadow_pte, pte32_t * guest_pte) {
struct list_head * shdw_ptr_list = NULL;
struct v3_sym_swap_state * swap_state = &(info->swap_state);