X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fsvm.c;h=35eef9cbf952f9104d378dad5163b20176ee42b7;hb=6a686aa0cf80126e77d4f0f3a0eb882a7d302ae0;hp=7a1c73755324c696a242e034af1c9018e10eddef;hpb=cd012ba87f57c1c694038f1a17f249f90f9a2be8;p=palacios.git diff --git a/palacios/src/palacios/svm.c b/palacios/src/palacios/svm.c index 7a1c737..35eef9c 100644 --- a/palacios/src/palacios/svm.c +++ b/palacios/src/palacios/svm.c @@ -39,14 +39,15 @@ #include #include -#include #include #include +uint32_t v3_last_exit; + // This is a global pointer to the host's VMCB -static addr_t host_vmcbs[CONFIG_MAX_CPUS] = {0}; +static addr_t host_vmcbs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0}; @@ -66,23 +67,15 @@ static vmcb_t * Allocate_VMCB() { -static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) { +static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) { vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb); vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb); uint_t i; // - guest_state->rsp = 0x00; - guest_state->rip = 0xfff0; - - - guest_state->cpl = 0; - guest_state->efer |= EFER_MSR_svm_enable; - - guest_state->rflags = 0x00000002; // The reserved bit is always 1 ctrl_area->svm_instrs.VMRUN = 1; ctrl_area->svm_instrs.VMMCALL = 1; ctrl_area->svm_instrs.VMLOAD = 1; @@ -125,61 +118,82 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) { ctrl_area->instrs.PAUSE = 1; ctrl_area->instrs.shutdown_evts = 1; - vm_info->vm_regs.rdx = 0x00000f00; + /* DEBUG FOR RETURN CODE */ + ctrl_area->exit_code = 1; - guest_state->cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode + /* Setup Guest Machine state */ - guest_state->cs.selector = 0xf000; - guest_state->cs.limit = 0xffff; - guest_state->cs.base = 0x0000000f0000LL; - guest_state->cs.attrib.raw = 0xf3; + core->vm_regs.rsp = 0x00; + core->rip = 0xfff0; + core->vm_regs.rdx = 0x00000f00; + + + core->cpl = 0; + + core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1 + core->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode + core->ctrl_regs.efer |= EFER_MSR_svm_enable; - /* DEBUG FOR RETURN CODE */ - ctrl_area->exit_code = 1; - struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), - &(guest_state->es), &(guest_state->fs), - &(guest_state->gs), NULL}; + + + core->segments.cs.selector = 0xf000; + core->segments.cs.limit = 0xffff; + core->segments.cs.base = 0x0000000f0000LL; + + // (raw attributes = 0xf3) + core->segments.cs.type = 0x3; + core->segments.cs.system = 0x1; + core->segments.cs.dpl = 0x3; + core->segments.cs.present = 1; + + + + struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds), + &(core->segments.es), &(core->segments.fs), + &(core->segments.gs), NULL}; for ( i = 0; segregs[i] != NULL; i++) { - struct vmcb_selector * seg = segregs[i]; + struct v3_segment * seg = segregs[i]; seg->selector = 0x0000; // seg->base = seg->selector << 4; seg->base = 0x00000000; - seg->attrib.raw = 0xf3; seg->limit = ~0u; + + // (raw attributes = 0xf3) + seg->type = 0x3; + seg->system = 0x1; + seg->dpl = 0x3; + seg->present = 1; } - guest_state->gdtr.limit = 0x0000ffff; - guest_state->gdtr.base = 0x0000000000000000LL; - guest_state->idtr.limit = 0x0000ffff; - guest_state->idtr.base = 0x0000000000000000LL; + core->segments.gdtr.limit = 0x0000ffff; + core->segments.gdtr.base = 0x0000000000000000LL; + core->segments.idtr.limit = 0x0000ffff; + core->segments.idtr.base = 0x0000000000000000LL; - guest_state->ldtr.selector = 0x0000; - guest_state->ldtr.limit = 0x0000ffff; - guest_state->ldtr.base = 0x0000000000000000LL; - guest_state->tr.selector = 0x0000; - guest_state->tr.limit = 0x0000ffff; - guest_state->tr.base = 0x0000000000000000LL; + core->segments.ldtr.selector = 0x0000; + core->segments.ldtr.limit = 0x0000ffff; + core->segments.ldtr.base = 0x0000000000000000LL; + core->segments.tr.selector = 0x0000; + core->segments.tr.limit = 0x0000ffff; + core->segments.tr.base = 0x0000000000000000LL; - guest_state->dr6 = 0x00000000ffff0ff0LL; - guest_state->dr7 = 0x0000000000000400LL; + core->dbg_regs.dr6 = 0x00000000ffff0ff0LL; + core->dbg_regs.dr7 = 0x0000000000000400LL; - v3_init_svm_io_map(vm_info); - ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data); + ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->io_map.arch_data); ctrl_area->instrs.IOIO_PROT = 1; - - - v3_init_svm_msr_map(vm_info); - ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data); - ctrl_area->instrs.MSR_PROT = 1; + + ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data); + ctrl_area->instrs.MSR_PROT = 1; PrintDebug("Exiting on interrupts\n"); @@ -187,7 +201,7 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) { ctrl_area->instrs.INTR = 1; - if (vm_info->shdw_pg_mode == SHADOW_PAGING) { + if (core->shdw_pg_mode == SHADOW_PAGING) { PrintDebug("Creating initial shadow page table\n"); /* JRL: This is a performance killer, and a simplistic solution */ @@ -196,16 +210,17 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) { ctrl_area->guest_ASID = 1; - if (v3_init_passthrough_pts(vm_info) == -1) { + if (v3_init_passthrough_pts(core) == -1) { PrintError("Could not initialize passthrough page tables\n"); return ; } - vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL; + core->shdw_pg_state.guest_cr0 = 0x0000000000000010LL; PrintDebug("Created\n"); - guest_state->cr3 = vm_info->direct_map_pt; + core->ctrl_regs.cr0 |= 0x80000000; + core->ctrl_regs.cr3 = core->direct_map_pt; ctrl_area->cr_reads.cr0 = 1; ctrl_area->cr_writes.cr0 = 1; @@ -214,10 +229,10 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) { ctrl_area->cr_reads.cr3 = 1; ctrl_area->cr_writes.cr3 = 1; - v3_hook_msr(vm_info, EFER_MSR, + v3_hook_msr(core->vm_info, EFER_MSR, &v3_handle_efer_read, &v3_handle_efer_write, - vm_info); + core); ctrl_area->instrs.INVLPG = 1; @@ -225,9 +240,9 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) { guest_state->g_pat = 0x7040600070406ULL; - guest_state->cr0 |= 0x80000000; - } else if (vm_info->shdw_pg_mode == NESTED_PAGING) { + + } else if (core->shdw_pg_mode == NESTED_PAGING) { // Flush the TLB on entries/exits ctrl_area->TLB_CONTROL = 1; ctrl_area->guest_ASID = 1; @@ -238,129 +253,392 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) { PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE)); // Set the Nested Page Table pointer - if (v3_init_passthrough_pts(vm_info) == -1) { + if (v3_init_passthrough_pts(core) == -1) { PrintError("Could not initialize Nested page tables\n"); return ; } - ctrl_area->N_CR3 = vm_info->direct_map_pt; + ctrl_area->N_CR3 = core->direct_map_pt; guest_state->g_pat = 0x7040600070406ULL; } } -static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) { - - - v3_pre_config_guest(info, config_ptr); +int v3_init_svm_vmcb(struct guest_info * info, v3_vm_class_t vm_class) { PrintDebug("Allocating VMCB\n"); info->vmm_data = (void*)Allocate_VMCB(); + + if (vm_class == V3_PC_VM) { + PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data); + Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info); + } else { + PrintError("Invalid VM class\n"); + return -1; + } + + return 0; +} + + - PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data); - Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info); +static int update_irq_exit_state(struct guest_info * info) { + vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); - v3_post_config_guest(info, config_ptr); + // Fix for QEMU bug using EVENTINJ as an internal cache + guest_ctrl->EVENTINJ.valid = 0; + + if ((info->intr_core_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) { + +#ifdef CONFIG_DEBUG_INTERRUPTS + PrintDebug("INTAK cycle completed for irq %d\n", info->intr_core_state.irq_vector); +#endif + + info->intr_core_state.irq_started = 1; + info->intr_core_state.irq_pending = 0; + + v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ); + } + + if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) { +#ifdef CONFIG_DEBUG_INTERRUPTS + PrintDebug("Interrupt %d taken by guest\n", info->intr_core_state.irq_vector); +#endif + + // Interrupt was taken fully vectored + info->intr_core_state.irq_started = 0; + + } else if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 1)) { +#ifdef CONFIG_DEBUG_INTERRUPTS + PrintDebug("EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector); +#endif + } return 0; } -static int start_svm_guest(struct guest_info *info) { - // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data)); - // vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); - uint_t num_exits = 0; +static int update_irq_entry_state(struct guest_info * info) { + vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); - PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data); - //PrintDebugVMCB((vmcb_t*)(info->vmm_data)); + if (info->intr_core_state.irq_pending == 0) { + guest_ctrl->guest_ctrl.V_IRQ = 0; + guest_ctrl->guest_ctrl.V_INTR_VECTOR = 0; + } - info->run_state = VM_RUNNING; - rdtscll(info->yield_start_cycle); + if (v3_excp_pending(info)) { + uint_t excp = v3_get_excp_number(info); + + guest_ctrl->EVENTINJ.type = SVM_INJECTION_EXCEPTION; + + if (info->excp_state.excp_error_code_valid) { + guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code; + guest_ctrl->EVENTINJ.ev = 1; +#ifdef CONFIG_DEBUG_INTERRUPTS + PrintDebug("Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code); +#endif + } + + guest_ctrl->EVENTINJ.vector = excp; + + guest_ctrl->EVENTINJ.valid = 1; + +#ifdef CONFIG_DEBUG_INTERRUPTS + PrintDebug("<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n", + (int)info->num_exits, + guest_ctrl->EVENTINJ.vector, + (void *)(addr_t)info->ctrl_regs.cr2, + (void *)(addr_t)info->rip); +#endif + v3_injecting_excp(info, excp); + } else if (info->intr_core_state.irq_started == 1) { +#ifdef CONFIG_DEBUG_INTERRUPTS + PrintDebug("IRQ pending from previous injection\n"); +#endif + guest_ctrl->guest_ctrl.V_IRQ = 1; + guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_core_state.irq_vector; + guest_ctrl->guest_ctrl.V_IGN_TPR = 1; + guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf; - while (1) { - ullong_t tmp_tsc; + } else { + switch (v3_intr_pending(info)) { + case V3_EXTERNAL_IRQ: { + uint32_t irq = v3_get_intr(info); + + guest_ctrl->guest_ctrl.V_IRQ = 1; + guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq; + guest_ctrl->guest_ctrl.V_IGN_TPR = 1; + guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf; + +#ifdef CONFIG_DEBUG_INTERRUPTS + PrintDebug("Injecting Interrupt %d (EIP=%p)\n", + guest_ctrl->guest_ctrl.V_INTR_VECTOR, + (void *)(addr_t)info->rip); +#endif + + info->intr_core_state.irq_pending = 1; + info->intr_core_state.irq_vector = irq; + + break; + } + case V3_NMI: + guest_ctrl->EVENTINJ.type = SVM_INJECTION_NMI; + break; + case V3_SOFTWARE_INTR: + guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR; + break; + case V3_VIRTUAL_IRQ: + guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ; + break; + + case V3_INVALID_INTR: + default: + break; + } - // Conditionally yield the CPU if the timeslice has expired - v3_yield_cond(info); + } - /* - PrintDebug("SVM Entry to CS=%p rip=%p...\n", - (void *)(addr_t)info->segments.cs.base, - (void *)(addr_t)info->rip); - */ + return 0; +} - // disable global interrupts for vm state transition - v3_clgi(); +/* + * CAUTION and DANGER!!! + * + * The VMCB CANNOT(!!) be accessed outside of the clgi/stgi calls inside this function + * When exectuing a symbiotic call, the VMCB WILL be overwritten, so any dependencies + * on its contents will cause things to break. The contents at the time of the exit WILL + * change before the exit handler is executed. + */ +int v3_svm_enter(struct guest_info * info) { + vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); + vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data)); + ullong_t tmp_tsc; + addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0; + + // Conditionally yield the CPU if the timeslice has expired + v3_yield_cond(info); + + // disable global interrupts for vm state transition + v3_clgi(); + + // Synchronize the guest state to the VMCB + guest_state->cr0 = info->ctrl_regs.cr0; + guest_state->cr2 = info->ctrl_regs.cr2; + guest_state->cr3 = info->ctrl_regs.cr3; + guest_state->cr4 = info->ctrl_regs.cr4; + guest_state->dr6 = info->dbg_regs.dr6; + guest_state->dr7 = info->dbg_regs.dr7; + guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff; + guest_state->rflags = info->ctrl_regs.rflags; + guest_state->efer = info->ctrl_regs.efer; + + guest_state->cpl = info->cpl; + v3_set_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments)); - rdtscll(info->time_state.cached_host_tsc); - // guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc; - - v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]); - - rdtscll(tmp_tsc); + guest_state->rax = info->vm_regs.rax; + guest_state->rip = info->rip; + guest_state->rsp = info->vm_regs.rsp; + +#ifdef CONFIG_SYMCALL + if (info->sym_core_state.symcall_state.sym_call_active == 0) { + update_irq_entry_state(info); + } +#else + update_irq_entry_state(info); +#endif - - //PrintDebug("SVM Returned\n"); - // reenable global interrupts after vm exit - v3_stgi(); + /* ** */ + + /* + PrintDebug("SVM Entry to CS=%p rip=%p...\n", + (void *)(addr_t)info->segments.cs.base, + (void *)(addr_t)info->rip); + */ +#ifdef CONFIG_SYMCALL + if (info->sym_core_state.symcall_state.sym_call_active == 1) { + if (guest_ctrl->guest_ctrl.V_IRQ == 1) { + V3_Print("!!! Injecting Interrupt during Sym call !!!\n"); + } + } +#endif - // Conditionally yield the CPU if the timeslice has expired - v3_yield_cond(info); + rdtscll(tmp_tsc); + v3_update_time(info, (tmp_tsc - info->time_state.cached_host_tsc)); + rdtscll(info->time_state.cached_host_tsc); + // guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc; - v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc); - num_exits++; + //V3_Print("Calling v3_svm_launch\n"); - if ((num_exits % 5000) == 0) { - PrintDebug("SVM Exit number %d\n", num_exits); + v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]); + + //V3_Print("SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip); + + + v3_last_exit = (uint32_t)(guest_ctrl->exit_code); + + //rdtscll(tmp_tsc); + // v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc); + + //PrintDebug("SVM Returned\n"); + + info->num_exits++; + + + + + // Save Guest state from VMCB + info->rip = guest_state->rip; + info->vm_regs.rsp = guest_state->rsp; + info->vm_regs.rax = guest_state->rax; + + info->cpl = guest_state->cpl; + + info->ctrl_regs.cr0 = guest_state->cr0; + info->ctrl_regs.cr2 = guest_state->cr2; + info->ctrl_regs.cr3 = guest_state->cr3; + info->ctrl_regs.cr4 = guest_state->cr4; + info->dbg_regs.dr6 = guest_state->dr6; + info->dbg_regs.dr7 = guest_state->dr7; + info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR; + info->ctrl_regs.rflags = guest_state->rflags; + info->ctrl_regs.efer = guest_state->efer; + + v3_get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments)); + info->cpu_mode = v3_get_vm_cpu_mode(info); + info->mem_mode = v3_get_vm_mem_mode(info); + /* ** */ + + + // save exit info here + exit_code = guest_ctrl->exit_code; + exit_info1 = guest_ctrl->exit_info1; + exit_info2 = guest_ctrl->exit_info2; + + +#ifdef CONFIG_SYMCALL + if (info->sym_core_state.symcall_state.sym_call_active == 0) { + update_irq_exit_state(info); + } +#else + update_irq_exit_state(info); +#endif + + + // reenable global interrupts after vm exit + v3_stgi(); + + + // Conditionally yield the CPU if the timeslice has expired + v3_yield_cond(info); + + + if (v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2) != 0) { + PrintError("Error in SVM exit handler\n"); + return -1; + } + + + return 0; +} + + +int v3_start_svm_guest(struct guest_info *info) { + // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data)); + // vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); + + + PrintDebug("Starting SVM core %u\n",info->cpu_id); + if (info->cpu_mode==INIT) { + PrintDebug("SVM core %u: I am an AP in INIT mode, waiting for that to change\n",info->cpu_id); + while (info->cpu_mode==INIT) { + v3_yield(info); + //PrintDebug("SVM core %u: still waiting for INIT\n",info->cpu_id); + } + PrintDebug("SVM core %u: I am out of INIT\n",info->cpu_id); + if (info->cpu_mode==SIPI) { + PrintDebug("SVM core %u: I am waiting on a SIPI to set my starting address\n",info->cpu_id); + while (info->cpu_mode==SIPI) { + v3_yield(info); + //PrintDebug("SVM core %u: still waiting for SIPI\n",info->cpu_id); + } } + PrintDebug("SVM core %u: I have my SIPI\n", info->cpu_id); + } + + if (info->cpu_mode!=REAL) { + PrintError("SVM core %u: I am not in REAL mode at launch! Huh?!\n", info->cpu_id); + return -1; + } + + PrintDebug("SVM core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n", + info->cpu_id, info->segments.cs.selector, (void*)(info->segments.cs.base), + info->segments.cs.limit,(void*)(info->rip)); + - if (v3_handle_svm_exit(info) != 0) { + + PrintDebug("SVM core %u: Launching SVM VM (vmcb=%p)\n", info->cpu_id, (void *)info->vmm_data); + //PrintDebugVMCB((vmcb_t*)(info->vmm_data)); + + info->vm_info->run_state = VM_RUNNING; + rdtscll(info->yield_start_cycle); + + + while (1) { + if (v3_svm_enter(info) == -1) { vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); addr_t host_addr; addr_t linear_addr = 0; - - info->run_state = VM_ERROR; - PrintDebug("SVM ERROR!!\n"); - + info->vm_info->run_state = VM_ERROR; + + V3_Print("SVM core %u: SVM ERROR!!\n", info->cpu_id); + v3_print_guest_state(info); - - PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); - - PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1)); - PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4)); - - PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2)); - PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4)); - + + V3_Print("SVM core %u: SVM Exit Code: %p\n", info->cpu_id, (void *)(addr_t)guest_ctrl->exit_code); + + V3_Print("SVM core %u: exit_info1 low = 0x%.8x\n", info->cpu_id, *(uint_t*)&(guest_ctrl->exit_info1)); + V3_Print("SVM core %u: exit_info1 high = 0x%.8x\n", info->cpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4)); + + V3_Print("SVM core %u: exit_info2 low = 0x%.8x\n", info->cpu_id, *(uint_t*)&(guest_ctrl->exit_info2)); + V3_Print("SVM core %u: exit_info2 high = 0x%.8x\n", info->cpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4)); + linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs)); - + if (info->mem_mode == PHYSICAL_MEM) { - guest_pa_to_host_va(info, linear_addr, &host_addr); + v3_gpa_to_hva(info, linear_addr, &host_addr); } else if (info->mem_mode == VIRTUAL_MEM) { - guest_va_to_host_va(info, linear_addr, &host_addr); + v3_gva_to_hva(info, linear_addr, &host_addr); } - - PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr); - - PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr); + + V3_Print("SVM core %u: Host Address of rip = 0x%p\n", info->cpu_id, (void *)host_addr); + + V3_Print("SVM core %u: Instr (15 bytes) at %p:\n", info->cpu_id, (void *)host_addr); v3_dump_mem((uint8_t *)host_addr, 15); - - + v3_print_stack(info); - break; } + +/* + if ((info->num_exits % 5000) == 0) { + V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits); + } +*/ + } + + // Need to take down the other cores on error... + return 0; } @@ -379,7 +657,7 @@ int v3_is_svm_capable() { PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx); if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) { - PrintDebug("SVM Not Available\n"); + V3_Print("SVM Not Available\n"); return 0; } else { v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low); @@ -387,21 +665,21 @@ int v3_is_svm_capable() { PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low); if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) { - PrintDebug("SVM is available but is disabled.\n"); + V3_Print("SVM is available but is disabled.\n"); v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx); PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx); if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) { - PrintDebug("SVM BIOS Disabled, not unlockable\n"); + V3_Print("SVM BIOS Disabled, not unlockable\n"); } else { - PrintDebug("SVM is locked with a key\n"); + V3_Print("SVM is locked with a key\n"); } return 0; } else { - PrintDebug("SVM is available and enabled.\n"); + V3_Print("SVM is available and enabled.\n"); v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx); PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax); @@ -409,13 +687,6 @@ int v3_is_svm_capable() { PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx); PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx); - - if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) { - PrintDebug("SVM Nested Paging not supported\n"); - } else { - PrintDebug("SVM Nested Paging supported\n"); - } - return 1; } } @@ -429,10 +700,10 @@ static int has_svm_nested_paging() { //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx); if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) { - PrintDebug("SVM Nested Paging not supported\n"); + V3_Print("SVM Nested Paging not supported\n"); return 0; } else { - PrintDebug("SVM Nested Paging supported\n"); + V3_Print("SVM Nested Paging supported\n"); return 1; } } @@ -447,7 +718,7 @@ void v3_init_svm_cpu(int cpu_id) { msr.e_reg.low |= EFER_MSR_svm_enable; v3_set_msr(EFER_MSR, 0, msr.e_reg.low); - PrintDebug("SVM Enabled\n"); + V3_Print("SVM Enabled\n"); // Setup the host state save area host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4); @@ -469,15 +740,6 @@ void v3_init_svm_cpu(int cpu_id) { } -void v3_init_svm_hooks(struct v3_ctrl_ops * vmm_ops) { - - // Setup the SVM specific vmm operations - vmm_ops->init_guest = &init_svm_guest; - vmm_ops->start_guest = &start_svm_guest; - vmm_ops->has_nested_paging = &has_svm_nested_paging; - - return; -}