Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


enabled stopping a VM before the secondary cores have been initialized
[palacios.git] / palacios / src / palacios / svm.c
index f66d90e..f61fadf 100644 (file)
 #include <palacios/svm_msr.h>
 
 #include <palacios/vmm_rbtree.h>
+#include <palacios/vmm_barrier.h>
+
+#ifdef V3_CONFIG_CHECKPOINT
+#include <palacios/vmm_checkpoint.h>
+#endif
 
 #include <palacios/vmm_direct_paging.h>
 
@@ -44,7 +49,7 @@
 #include <palacios/vmm_sprintf.h>
 
 
-#ifndef CONFIG_DEBUG_SVM
+#ifndef V3_CONFIG_DEBUG_SVM
 #undef PrintDebug
 #define PrintDebug(fmt, args...)
 #endif
@@ -53,7 +58,7 @@
 uint32_t v3_last_exit;
 
 // This is a global pointer to the host's VMCB
-static addr_t host_vmcbs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
+static addr_t host_vmcbs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
 
 
 
@@ -64,7 +69,15 @@ extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_
 
 
 static vmcb_t * Allocate_VMCB() {
-    vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
+    vmcb_t * vmcb_page = NULL;
+    addr_t vmcb_pa = (addr_t)V3_AllocPages(1);
+
+    if ((void *)vmcb_pa == NULL) {
+       PrintError("Error allocating VMCB\n");
+       return NULL;
+    }
+
+    vmcb_page = (vmcb_t *)V3_VAddr((void *)vmcb_pa);
 
     memset(vmcb_page, 0, 4096);
 
@@ -72,6 +85,25 @@ static vmcb_t * Allocate_VMCB() {
 }
 
 
+static int v3_svm_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data)
+{
+    int status;
+
+    // Call arch-independent handler
+    if ((status = v3_handle_efer_write(core, msr, src, priv_data)) != 0) {
+       return status;
+    }
+
+    // SVM-specific code
+    {
+       // Ensure that hardware visible EFER.SVME bit is set (SVM Enable)
+       struct efer_64 * hw_efer = (struct efer_64 *)&(core->ctrl_regs.efer);
+       hw_efer->svme = 1;
+    }
+
+    return 0;
+}
+
 
 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
@@ -87,7 +119,6 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
     ctrl_area->svm_instrs.STGI = 1;
     ctrl_area->svm_instrs.CLGI = 1;
     ctrl_area->svm_instrs.SKINIT = 1;
-    ctrl_area->svm_instrs.RDTSCP = 1;
     ctrl_area->svm_instrs.ICEBP = 1;
     ctrl_area->svm_instrs.WBINVD = 1;
     ctrl_area->svm_instrs.MONITOR = 1;
@@ -98,9 +129,9 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
 
     ctrl_area->instrs.HLT = 1;
 
-#ifdef CONFIG_TIME_VIRTUALIZE_TSC
-    ctrl_area->instrs.rdtsc = 1;
-    ctrl_area->svm_instrs.rdtscp = 1;
+#ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
+    ctrl_area->instrs.RDTSC = 1;
+    ctrl_area->svm_instrs.RDTSCP = 1;
 #endif
 
     // guest_state->cr0 = 0x00000001;    // PE 
@@ -211,6 +242,11 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
     ctrl_area->instrs.INTR = 1;
 
 
+    v3_hook_msr(core->vm_info, EFER_MSR, 
+               &v3_handle_efer_read,
+               &v3_svm_handle_efer_write, 
+               core);
+
     if (core->shdw_pg_mode == SHADOW_PAGING) {
        PrintDebug("Creating initial shadow page table\n");
        
@@ -239,10 +275,7 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
        ctrl_area->cr_reads.cr3 = 1;
        ctrl_area->cr_writes.cr3 = 1;
 
-       v3_hook_msr(core->vm_info, EFER_MSR, 
-                   &v3_handle_efer_read,
-                   &v3_handle_efer_write, 
-                   core);
+
 
        ctrl_area->instrs.INVLPG = 1;
 
@@ -272,17 +305,41 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
 
        guest_state->g_pat = 0x7040600070406ULL;
     }
+    
+    /* tell the guest that we don't support SVM */
+    v3_hook_msr(core->vm_info, SVM_VM_CR_MSR, 
+       &v3_handle_vm_cr_read,
+       &v3_handle_vm_cr_write, 
+       core);
+
+
+    {
+       v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
+
+       v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
+    }
 }
 
 
-int v3_init_svm_vmcb(struct guest_info * info, v3_vm_class_t vm_class) {
+int v3_init_svm_vmcb(struct guest_info * core, v3_vm_class_t vm_class) {
 
     PrintDebug("Allocating VMCB\n");
-    info->vmm_data = (void*)Allocate_VMCB();
+    core->vmm_data = (void *)Allocate_VMCB();
     
+    if (core->vmm_data == NULL) {
+       PrintError("Could not allocate VMCB, Exiting...\n");
+       return -1;
+    }
+
     if (vm_class == V3_PC_VM) {
-       PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
-       Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
+       PrintDebug("Initializing VMCB (addr=%p)\n", (void *)core->vmm_data);
+       Init_VMCB_BIOS((vmcb_t*)(core->vmm_data), core);
     } else {
        PrintError("Invalid VM class\n");
        return -1;
@@ -292,6 +349,32 @@ int v3_init_svm_vmcb(struct guest_info * info, v3_vm_class_t vm_class) {
 }
 
 
+int v3_deinit_svm_vmcb(struct guest_info * core) {
+    V3_FreePages(V3_PAddr(core->vmm_data), 1);
+    return 0;
+}
+
+
+#ifdef V3_CONFIG_CHECKPOINT
+int v3_svm_save_core(struct guest_info * core, void * ctx){
+
+    v3_chkpt_save_8(ctx, "cpl", &(core->cpl));
+    v3_chkpt_save(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data);
+
+    return 0;
+}
+
+int v3_svm_load_core(struct guest_info * core, void * ctx){
+    
+    v3_chkpt_load_8(ctx, "cpl", &(core->cpl));
+
+    if (v3_chkpt_load(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1) {
+       return -1;
+    }
+
+    return 0;
+}
+#endif
 
 static int update_irq_exit_state(struct guest_info * info) {
     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
@@ -301,7 +384,7 @@ static int update_irq_exit_state(struct guest_info * info) {
 
     if ((info->intr_core_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
        
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
        PrintDebug("INTAK cycle completed for irq %d\n", info->intr_core_state.irq_vector);
 #endif
 
@@ -312,7 +395,7 @@ static int update_irq_exit_state(struct guest_info * info) {
     }
 
     if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
        PrintDebug("Interrupt %d taken by guest\n", info->intr_core_state.irq_vector);
 #endif
 
@@ -320,7 +403,7 @@ static int update_irq_exit_state(struct guest_info * info) {
        info->intr_core_state.irq_started = 0;
 
     } else if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 1)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
        PrintDebug("EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
 #endif
     }
@@ -346,7 +429,7 @@ static int update_irq_entry_state(struct guest_info * info) {
        if (info->excp_state.excp_error_code_valid) {
            guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code;
            guest_ctrl->EVENTINJ.ev = 1;
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
            PrintDebug("Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
 #endif
        }
@@ -355,7 +438,7 @@ static int update_irq_entry_state(struct guest_info * info) {
        
        guest_ctrl->EVENTINJ.valid = 1;
 
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
        PrintDebug("<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n", 
                   (int)info->num_exits, 
                   guest_ctrl->EVENTINJ.vector, 
@@ -365,7 +448,7 @@ static int update_irq_entry_state(struct guest_info * info) {
 
        v3_injecting_excp(info, excp);
     } else if (info->intr_core_state.irq_started == 1) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
        PrintDebug("IRQ pending from previous injection\n");
 #endif
        guest_ctrl->guest_ctrl.V_IRQ = 1;
@@ -383,7 +466,7 @@ static int update_irq_entry_state(struct guest_info * info) {
                guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
                guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
 
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
                PrintDebug("Injecting Interrupt %d (EIP=%p)\n", 
                           guest_ctrl->guest_ctrl.V_INTR_VECTOR, 
                           (void *)(addr_t)info->rip);
@@ -399,6 +482,18 @@ static int update_irq_entry_state(struct guest_info * info) {
                break;
            case V3_SOFTWARE_INTR:
                guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR;
+
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+               PrintDebug("Injecting software interrupt --  type: %d, vector: %d\n", 
+                          SVM_INJECTION_SOFT_INTR, info->intr_core_state.swintr_vector);
+#endif
+               guest_ctrl->EVENTINJ.vector = info->intr_core_state.swintr_vector;
+               guest_ctrl->EVENTINJ.valid = 1;
+            
+               /* reset swintr state */
+               info->intr_core_state.swintr_posted = 0;
+               info->intr_core_state.swintr_vector = 0;
+               
                break;
            case V3_VIRTUAL_IRQ:
                guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ;
@@ -427,13 +522,22 @@ int v3_svm_enter(struct guest_info * info) {
     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data)); 
     addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
+    sint64_t tsc_offset;
 
     // Conditionally yield the CPU if the timeslice has expired
     v3_yield_cond(info);
 
+    // Perform any additional yielding needed for time adjustment
+    v3_adjust_time(info);
+
     // disable global interrupts for vm state transition
     v3_clgi();
 
+    // Update timer devices after being in the VM, with interupts
+    // disabled, but before doing IRQ updates, so that any interrupts they 
+    //raise get seen immediately.
+    v3_update_timers(info);
+
     // Synchronize the guest state to the VMCB
     guest_state->cr0 = info->ctrl_regs.cr0;
     guest_state->cr2 = info->ctrl_regs.cr2;
@@ -453,7 +557,7 @@ int v3_svm_enter(struct guest_info * info) {
     guest_state->rip = info->rip;
     guest_state->rsp = info->vm_regs.rsp;
 
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
        update_irq_entry_state(info);
     }
@@ -470,7 +574,7 @@ int v3_svm_enter(struct guest_info * info) {
       (void *)(addr_t)info->rip);
     */
 
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
     if (info->sym_core_state.symcall_state.sym_call_active == 1) {
        if (guest_ctrl->guest_ctrl.V_IRQ == 1) {
            V3_Print("!!! Injecting Interrupt during Sym call !!!\n");
@@ -478,24 +582,22 @@ int v3_svm_enter(struct guest_info * info) {
     }
 #endif
 
-    v3_update_timers(info);
+    v3_time_enter_vm(info);
+    tsc_offset = v3_tsc_host_offset(&info->time_state);
+    guest_ctrl->TSC_OFFSET = tsc_offset;
 
-    /* If this guest is frequency-lagged behind host time, wait 
-     * for the appropriate host time before resuming the guest. */
-    v3_adjust_time(info);
-
-    guest_ctrl->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
 
     //V3_Print("Calling v3_svm_launch\n");
 
-    v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]);
+    v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]);
 
     //V3_Print("SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip);
 
     v3_last_exit = (uint32_t)(guest_ctrl->exit_code);
 
-    //PrintDebug("SVM Returned\n");
-    
+    // Immediate exit from VM time bookkeeping
+    v3_time_exit_vm(info);
+
     info->num_exits++;
 
     // Save Guest state from VMCB
@@ -520,14 +622,12 @@ int v3_svm_enter(struct guest_info * info) {
     info->mem_mode = v3_get_vm_mem_mode(info);
     /* ** */
 
-
     // save exit info here
     exit_code = guest_ctrl->exit_code;
     exit_info1 = guest_ctrl->exit_info1;
     exit_info2 = guest_ctrl->exit_info2;
 
-
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
        update_irq_exit_state(info);
     }
@@ -535,19 +635,20 @@ int v3_svm_enter(struct guest_info * info) {
     update_irq_exit_state(info);
 #endif
 
-
     // reenable global interrupts after vm exit
     v3_stgi();
-
  
     // Conditionally yield the CPU if the timeslice has expired
     v3_yield_cond(info);
 
-
-
-    if (v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2) != 0) {
-       PrintError("Error in SVM exit handler\n");
-       return -1;
+    {
+       int ret = v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2);
+       
+       if (ret != 0) {
+           PrintError("Error in SVM exit handler (ret=%d)\n", ret);
+           PrintError("  last Exit was %d (exit code=0x%llx)\n", v3_last_exit, (uint64_t) exit_code);
+           return -1;
+       }
     }
 
 
@@ -559,34 +660,51 @@ int v3_start_svm_guest(struct guest_info * info) {
     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
 
-    PrintDebug("Starting SVM core %u\n", info->cpu_id);
+    PrintDebug("Starting SVM core %u (on logical core %u)\n", info->vcpu_id, info->pcpu_id);
 
-    if (info->cpu_id == 0) {
+    if (info->vcpu_id == 0) {
        info->core_run_state = CORE_RUNNING;
        info->vm_info->run_state = VM_RUNNING;
     } else  { 
-       PrintDebug("SVM core %u: Waiting for core initialization\n", info->cpu_id);
+       PrintDebug("SVM core %u (on %u): Waiting for core initialization\n", info->vcpu_id, info->pcpu_id);
 
        while (info->core_run_state == CORE_STOPPED) {
+           
+           if (info->vm_info->run_state == VM_STOPPED) {
+               // The VM was stopped before this core was initialized. 
+               return 0;
+           }
+
            v3_yield(info);
-           //PrintDebug("SVM core %u: still waiting for INIT\n",info->cpu_id);
+           //PrintDebug("SVM core %u: still waiting for INIT\n", info->vcpu_id);
        }
 
-       PrintDebug("SVM core %u initialized\n", info->cpu_id);
+       PrintDebug("SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id);
+
+       // We'll be paranoid about race conditions here
+       v3_wait_at_barrier(info);
     } 
 
-    PrintDebug("SVM core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x),  RIP=0x%p\n", 
-              info->cpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base), 
+    PrintDebug("SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x),  RIP=0x%p\n", 
+              info->vcpu_id, info->pcpu_id, 
+              info->segments.cs.selector, (void *)(info->segments.cs.base), 
               info->segments.cs.limit, (void *)(info->rip));
 
 
 
-    PrintDebug("SVM core %u: Launching SVM VM (vmcb=%p)\n", info->cpu_id, (void *)info->vmm_data);
+    PrintDebug("SVM core %u: Launching SVM VM (vmcb=%p) (on cpu %u)\n", 
+              info->vcpu_id, (void *)info->vmm_data, info->pcpu_id);
     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
     
     v3_start_time(info);
 
     while (1) {
+
+       if (info->vm_info->run_state == VM_STOPPED) {
+           info->core_run_state = CORE_STOPPED;
+           break;
+       }
+       
        if (v3_svm_enter(info) == -1) {
            vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
            addr_t host_addr;
@@ -594,17 +712,17 @@ int v3_start_svm_guest(struct guest_info * info) {
            
            info->vm_info->run_state = VM_ERROR;
            
-           V3_Print("SVM core %u: SVM ERROR!!\n", info->cpu_id); 
+           V3_Print("SVM core %u: SVM ERROR!!\n", info->vcpu_id); 
            
            v3_print_guest_state(info);
            
-           V3_Print("SVM core %u: SVM Exit Code: %p\n", info->cpu_id, (void *)(addr_t)guest_ctrl->exit_code); 
+           V3_Print("SVM core %u: SVM Exit Code: %p\n", info->vcpu_id, (void *)(addr_t)guest_ctrl->exit_code); 
            
-           V3_Print("SVM core %u: exit_info1 low = 0x%.8x\n", info->cpu_id, *(uint_t*)&(guest_ctrl->exit_info1));
-           V3_Print("SVM core %u: exit_info1 high = 0x%.8x\n", info->cpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
+           V3_Print("SVM core %u: exit_info1 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info1));
+           V3_Print("SVM core %u: exit_info1 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
            
-           V3_Print("SVM core %u: exit_info2 low = 0x%.8x\n", info->cpu_id, *(uint_t*)&(guest_ctrl->exit_info2));
-           V3_Print("SVM core %u: exit_info2 high = 0x%.8x\n", info->cpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
+           V3_Print("SVM core %u: exit_info2 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info2));
+           V3_Print("SVM core %u: exit_info2 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
            
            linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
            
@@ -614,19 +732,30 @@ int v3_start_svm_guest(struct guest_info * info) {
                v3_gva_to_hva(info, linear_addr, &host_addr);
            }
            
-           V3_Print("SVM core %u: Host Address of rip = 0x%p\n", info->cpu_id, (void *)host_addr);
+           V3_Print("SVM core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
            
-           V3_Print("SVM core %u: Instr (15 bytes) at %p:\n", info->cpu_id, (void *)host_addr);
+           V3_Print("SVM core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
            v3_dump_mem((uint8_t *)host_addr, 15);
            
            v3_print_stack(info);
 
            break;
        }
+
+       v3_wait_at_barrier(info);
+
+
+       if (info->vm_info->run_state == VM_STOPPED) {
+           info->core_run_state = CORE_STOPPED;
+           break;
+       }
+
        
+
 /*
-       if ((info->num_exits % 5000) == 0) {
+       if ((info->num_exits % 50000) == 0) {
            V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits);
+           v3_print_guest_state(info);
        }
 */
        
@@ -640,6 +769,31 @@ int v3_start_svm_guest(struct guest_info * info) {
 
 
 
+int v3_reset_svm_vm_core(struct guest_info * core, addr_t rip) {
+    // init vmcb_bios
+
+    // Write the RIP, CS, and descriptor
+    // assume the rest is already good to go
+    //
+    // vector VV -> rip at 0
+    //              CS = VV00
+    //  This means we start executing at linear address VV000
+    //
+    // So the selector needs to be VV00
+    // and the base needs to be VV000
+    //
+    core->rip = 0;
+    core->segments.cs.selector = rip << 8;
+    core->segments.cs.limit = 0xffff;
+    core->segments.cs.base = rip << 12;
+
+    return 0;
+}
+
+
+
+
+
 
 /* Checks machine SVM capability */
 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
@@ -689,11 +843,11 @@ int v3_is_svm_capable() {
 
 static int has_svm_nested_paging() {
     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
-
+    
     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
-
+    
     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
-
+    
     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
        V3_Print("SVM Nested Paging not supported\n");
        return 0;
@@ -701,7 +855,8 @@ static int has_svm_nested_paging() {
        V3_Print("SVM Nested Paging supported\n");
        return 1;
     }
-}
+ }
 
 
 void v3_init_svm_cpu(int cpu_id) {
@@ -736,7 +891,27 @@ void v3_init_svm_cpu(int cpu_id) {
 
 
 
+void v3_deinit_svm_cpu(int cpu_id) {
+    reg_ex_t msr;
+    extern v3_cpu_arch_t v3_cpu_types[];
+
+    // reset SVM_VM_HSAVE_PA_MSR
+    // Does setting it to NULL disable??
+    msr.r_reg = 0;
+    v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
 
+    // Disable SVM?
+    v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
+    msr.e_reg.low &= ~EFER_MSR_svm_enable;
+    v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
+
+    v3_cpu_types[cpu_id] = V3_INVALID_CPU;
+
+    V3_FreePages((void *)host_vmcbs[cpu_id], 4);
+
+    V3_Print("Host CPU %d host area freed, and SVM disabled\n", cpu_id);
+    return;
+}