X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fsvm.c;h=d23c56d8610115f7ef1ec256be06f1f30eb779da;hb=e5649c2775438bbb04baf9a8bd53fa70363c4235;hp=bb5b5250e767901054ae338f665e120b336c80ac;hpb=94f67717b6461df514dc225ed84f03b44c44061b;p=palacios-OLD.git diff --git a/palacios/src/palacios/svm.c b/palacios/src/palacios/svm.c index bb5b525..d23c56d 100644 --- a/palacios/src/palacios/svm.c +++ b/palacios/src/palacios/svm.c @@ -1,3 +1,4 @@ + /* * This file is part of the Palacios Virtual Machine Monitor developed * by the V3VEE Project with funding from the United States National @@ -462,7 +463,9 @@ int v3_svm_enter(struct guest_info * info) { // disable global interrupts for vm state transition v3_clgi(); - // Update timer devices prior to entering VM. + // Update timer devices after being in the VM, with interupts + // disabled, but before doing IRQ updates, so that any interrupts they + //raise get seen immediately. v3_update_timers(info); // Synchronize the guest state to the VMCB @@ -547,13 +550,11 @@ int v3_svm_enter(struct guest_info * info) { info->mem_mode = v3_get_vm_mem_mode(info); /* ** */ - // save exit info here exit_code = guest_ctrl->exit_code; exit_info1 = guest_ctrl->exit_info1; exit_info2 = guest_ctrl->exit_info2; - #ifdef V3_CONFIG_SYMCALL if (info->sym_core_state.symcall_state.sym_call_active == 0) { update_irq_exit_state(info); @@ -562,20 +563,20 @@ int v3_svm_enter(struct guest_info * info) { update_irq_exit_state(info); #endif - // reenable global interrupts after vm exit v3_stgi(); - // Conditionally yield the CPU if the timeslice has expired v3_yield_cond(info); - - - if (v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2) != 0) { - PrintError("Error in SVM exit handler\n"); - PrintError(" last exit was %d\n", v3_last_exit); - return -1; + { + int ret = v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2); + + if (ret != 0) { + PrintError("Error in SVM exit handler (ret=%d)\n", ret); + PrintError(" last Exit was %d (exit code=0x%llx)\n", v3_last_exit, (uint64_t) exit_code); + return -1; + } } @@ -587,29 +588,31 @@ int v3_start_svm_guest(struct guest_info * info) { // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data)); // vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data)); - PrintDebug("Starting SVM core %u\n", info->cpu_id); + PrintDebug("Starting SVM core %u (on logical core %u)\n", info->vcpu_id, info->pcpu_id); - if (info->cpu_id == 0) { + if (info->vcpu_id == 0) { info->core_run_state = CORE_RUNNING; info->vm_info->run_state = VM_RUNNING; } else { - PrintDebug("SVM core %u: Waiting for core initialization\n", info->cpu_id); + PrintDebug("SVM core %u (on %u): Waiting for core initialization\n", info->vcpu_id, info->pcpu_id); while (info->core_run_state == CORE_STOPPED) { v3_yield(info); - //PrintDebug("SVM core %u: still waiting for INIT\n",info->cpu_id); + //PrintDebug("SVM core %u: still waiting for INIT\n", info->vcpu_id); } - PrintDebug("SVM core %u initialized\n", info->cpu_id); + PrintDebug("SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id); } - PrintDebug("SVM core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n", - info->cpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base), + PrintDebug("SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n", + info->vcpu_id, info->pcpu_id, + info->segments.cs.selector, (void *)(info->segments.cs.base), info->segments.cs.limit, (void *)(info->rip)); - PrintDebug("SVM core %u: Launching SVM VM (vmcb=%p)\n", info->cpu_id, (void *)info->vmm_data); + PrintDebug("SVM core %u: Launching SVM VM (vmcb=%p) (on cpu %u)\n", + info->vcpu_id, (void *)info->vmm_data, info->pcpu_id); //PrintDebugVMCB((vmcb_t*)(info->vmm_data)); v3_start_time(info); @@ -628,17 +631,17 @@ int v3_start_svm_guest(struct guest_info * info) { info->vm_info->run_state = VM_ERROR; - V3_Print("SVM core %u: SVM ERROR!!\n", info->cpu_id); + V3_Print("SVM core %u: SVM ERROR!!\n", info->vcpu_id); v3_print_guest_state(info); - V3_Print("SVM core %u: SVM Exit Code: %p\n", info->cpu_id, (void *)(addr_t)guest_ctrl->exit_code); + V3_Print("SVM core %u: SVM Exit Code: %p\n", info->vcpu_id, (void *)(addr_t)guest_ctrl->exit_code); - V3_Print("SVM core %u: exit_info1 low = 0x%.8x\n", info->cpu_id, *(uint_t*)&(guest_ctrl->exit_info1)); - V3_Print("SVM core %u: exit_info1 high = 0x%.8x\n", info->cpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4)); + V3_Print("SVM core %u: exit_info1 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info1)); + V3_Print("SVM core %u: exit_info1 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4)); - V3_Print("SVM core %u: exit_info2 low = 0x%.8x\n", info->cpu_id, *(uint_t*)&(guest_ctrl->exit_info2)); - V3_Print("SVM core %u: exit_info2 high = 0x%.8x\n", info->cpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4)); + V3_Print("SVM core %u: exit_info2 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info2)); + V3_Print("SVM core %u: exit_info2 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4)); linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs)); @@ -648,9 +651,9 @@ int v3_start_svm_guest(struct guest_info * info) { v3_gva_to_hva(info, linear_addr, &host_addr); } - V3_Print("SVM core %u: Host Address of rip = 0x%p\n", info->cpu_id, (void *)host_addr); + V3_Print("SVM core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr); - V3_Print("SVM core %u: Instr (15 bytes) at %p:\n", info->cpu_id, (void *)host_addr); + V3_Print("SVM core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr); v3_dump_mem((uint8_t *)host_addr, 15); v3_print_stack(info); @@ -665,9 +668,11 @@ int v3_start_svm_guest(struct guest_info * info) { } + /* - if ((info->num_exits % 5000) == 0) { + if ((info->num_exits % 50000) == 0) { V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits); + v3_print_guest_state(info); } */ @@ -681,6 +686,31 @@ int v3_start_svm_guest(struct guest_info * info) { +int v3_reset_svm_vm_core(struct guest_info * core, addr_t rip) { + // init vmcb_bios + + // Write the RIP, CS, and descriptor + // assume the rest is already good to go + // + // vector VV -> rip at 0 + // CS = VV00 + // This means we start executing at linear address VV000 + // + // So the selector needs to be VV00 + // and the base needs to be VV000 + // + core->rip = 0; + core->segments.cs.selector = rip << 8; + core->segments.cs.limit = 0xffff; + core->segments.cs.base = rip << 12; + + return 0; +} + + + + + /* Checks machine SVM capability */ /* Implemented from: AMD Arch Manual 3, sect 15.4 */ @@ -730,11 +760,11 @@ int v3_is_svm_capable() { static int has_svm_nested_paging() { uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; - + v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx); - + //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx); - + if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) { V3_Print("SVM Nested Paging not supported\n"); return 0; @@ -742,7 +772,8 @@ static int has_svm_nested_paging() { V3_Print("SVM Nested Paging supported\n"); return 1; } -} + } + void v3_init_svm_cpu(int cpu_id) {