Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Basic HRT startup for HVM, plus assorted cleanup
[palacios.git] / palacios / src / palacios / svm.c
index 55e591e..05fd183 100644 (file)
@@ -18,6 +18,7 @@
  */
 
 
+
 #include <palacios/svm.h>
 #include <palacios/vmm.h>
 
 #include <palacios/svm_msr.h>
 
 #include <palacios/vmm_rbtree.h>
+#include <palacios/vmm_barrier.h>
+#include <palacios/vmm_debug.h>
+
+#include <palacios/vmm_perftune.h>
+
+
+#ifdef V3_CONFIG_CHECKPOINT
+#include <palacios/vmm_checkpoint.h>
+#endif
 
 #include <palacios/vmm_direct_paging.h>
 
 
 #include <palacios/vmm_sprintf.h>
 
+#ifdef V3_CONFIG_MEM_TRACK
+#include <palacios/vmm_mem_track.h>
+#endif 
+
+#ifdef V3_CONFIG_TM_FUNC
+#include <extensions/trans_mem.h>
+#endif
+
+#ifndef V3_CONFIG_DEBUG_SVM
+#undef PrintDebug
+#define PrintDebug(fmt, args...)
+#endif
+
+
 
 uint32_t v3_last_exit;
 
 // This is a global pointer to the host's VMCB
-static addr_t host_vmcbs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
+static addr_t host_vmcbs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
 
 
 
@@ -58,7 +82,15 @@ extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_
 
 
 static vmcb_t * Allocate_VMCB() {
-    vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
+    vmcb_t * vmcb_page = NULL;
+    addr_t vmcb_pa = (addr_t)V3_AllocPages(1);   // need not be shadow safe, not exposed to guest
+
+    if ((void *)vmcb_pa == NULL) {
+      PrintError(VM_NONE, VCORE_NONE, "Error allocating VMCB\n");
+       return NULL;
+    }
+
+    vmcb_page = (vmcb_t *)V3_VAddr((void *)vmcb_pa);
 
     memset(vmcb_page, 0, 4096);
 
@@ -66,6 +98,25 @@ static vmcb_t * Allocate_VMCB() {
 }
 
 
+static int v3_svm_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data)
+{
+    int status;
+
+    // Call arch-independent handler
+    if ((status = v3_handle_efer_write(core, msr, src, priv_data)) != 0) {
+       return status;
+    }
+
+    // SVM-specific code
+    {
+       // Ensure that hardware visible EFER.SVME bit is set (SVM Enable)
+       struct efer_64 * hw_efer = (struct efer_64 *)&(core->ctrl_regs.efer);
+       hw_efer->svme = 1;
+    }
+
+    return 0;
+}
+
 
 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
@@ -74,8 +125,6 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
 
 
     //
-
-
     ctrl_area->svm_instrs.VMRUN = 1;
     ctrl_area->svm_instrs.VMMCALL = 1;
     ctrl_area->svm_instrs.VMLOAD = 1;
@@ -83,7 +132,6 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
     ctrl_area->svm_instrs.STGI = 1;
     ctrl_area->svm_instrs.CLGI = 1;
     ctrl_area->svm_instrs.SKINIT = 1;
-    ctrl_area->svm_instrs.RDTSCP = 1;
     ctrl_area->svm_instrs.ICEBP = 1;
     ctrl_area->svm_instrs.WBINVD = 1;
     ctrl_area->svm_instrs.MONITOR = 1;
@@ -93,6 +141,11 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
     ctrl_area->instrs.CPUID = 1;
 
     ctrl_area->instrs.HLT = 1;
+
+    /* Set at VMM launch as needed */
+    ctrl_area->instrs.RDTSC = 0;
+    ctrl_area->svm_instrs.RDTSCP = 0;
+
     // guest_state->cr0 = 0x00000001;    // PE 
   
     /*
@@ -110,12 +163,16 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
       
       ctrl_area->exceptions.nmi = 1;
     */
+
+#ifdef V3_CONFIG_TM_FUNC
+    v3_tm_set_excp_intercepts(ctrl_area);
+#endif
     
 
     ctrl_area->instrs.NMI = 1;
     ctrl_area->instrs.SMI = 0; // allow SMIs to run in guest
     ctrl_area->instrs.INIT = 1;
-    ctrl_area->instrs.PAUSE = 1;
+    //    ctrl_area->instrs.PAUSE = 1;
     ctrl_area->instrs.shutdown_evts = 1;
 
 
@@ -196,13 +253,30 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
     ctrl_area->instrs.MSR_PROT = 1;   
 
 
-    PrintDebug("Exiting on interrupts\n");
+    PrintDebug(core->vm_info, core, "Exiting on interrupts\n");
     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
     ctrl_area->instrs.INTR = 1;
+    // The above also assures the TPR changes (CR8) are only virtual
 
 
+    // However, we need to see TPR writes since they will
+    // affect the virtual apic
+    // we reflect out cr8 to ctrl_regs->apic_tpr
+    ctrl_area->cr_reads.cr8 = 1;
+    ctrl_area->cr_writes.cr8 = 1;
+    // We will do all TPR comparisons in the virtual apic
+    // We also do not want the V_TPR to be able to mask the PIC
+    ctrl_area->guest_ctrl.V_IGN_TPR = 1;
+
+    
+
+    v3_hook_msr(core->vm_info, EFER_MSR, 
+               &v3_handle_efer_read,
+               &v3_svm_handle_efer_write, 
+               core);
+
     if (core->shdw_pg_mode == SHADOW_PAGING) {
-       PrintDebug("Creating initial shadow page table\n");
+       PrintDebug(core->vm_info, core, "Creating initial shadow page table\n");
        
        /* JRL: This is a performance killer, and a simplistic solution */
        /* We need to fix this */
@@ -211,28 +285,26 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
        
        
        if (v3_init_passthrough_pts(core) == -1) {
-           PrintError("Could not initialize passthrough page tables\n");
+           PrintError(core->vm_info, core, "Could not initialize passthrough page tables\n");
            return ;
        }
 
 
        core->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
-       PrintDebug("Created\n");
+       PrintDebug(core->vm_info, core, "Created\n");
        
        core->ctrl_regs.cr0 |= 0x80000000;
-       core->ctrl_regs.cr3 = core->direct_map_pt;
+
+        v3_activate_passthrough_pt(core);
 
        ctrl_area->cr_reads.cr0 = 1;
        ctrl_area->cr_writes.cr0 = 1;
-       //ctrl_area->cr_reads.cr4 = 1;
+       //intercept cr4 read so shadow pager can use PAE independently of guest
+       ctrl_area->cr_reads.cr4 = 1;
        ctrl_area->cr_writes.cr4 = 1;
        ctrl_area->cr_reads.cr3 = 1;
        ctrl_area->cr_writes.cr3 = 1;
 
-       v3_hook_msr(core->vm_info, EFER_MSR, 
-                   &v3_handle_efer_read,
-                   &v3_handle_efer_write, 
-                   core);
 
        ctrl_area->instrs.INVLPG = 1;
 
@@ -241,7 +313,6 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
        guest_state->g_pat = 0x7040600070406ULL;
 
 
-
     } else if (core->shdw_pg_mode == NESTED_PAGING) {
        // Flush the TLB on entries/exits
        ctrl_area->TLB_CONTROL = 1;
@@ -250,11 +321,11 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
        // Enable Nested Paging
        ctrl_area->NP_ENABLE = 1;
 
-       PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
+       PrintDebug(core->vm_info, core, "NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
 
        // Set the Nested Page Table pointer
        if (v3_init_passthrough_pts(core) == -1) {
-           PrintError("Could not initialize Nested page tables\n");
+           PrintError(core->vm_info, core, "Could not initialize Nested page tables\n");
            return ;
        }
 
@@ -262,26 +333,128 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
 
        guest_state->g_pat = 0x7040600070406ULL;
     }
+    
+    /* tell the guest that we don't support SVM */
+    v3_hook_msr(core->vm_info, SVM_VM_CR_MSR, 
+       &v3_handle_vm_cr_read,
+       &v3_handle_vm_cr_write, 
+       core);
+
+
+    {
+#define INT_PENDING_AMD_MSR            0xc0010055
+
+       v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
+
+       v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
+
+
+       v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
+       v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
+
+       // Passthrough read operations are ok.
+       v3_hook_msr(core->vm_info, INT_PENDING_AMD_MSR, NULL, v3_msr_unhandled_write, NULL);
+    }
 }
 
 
-int v3_init_svm_vmcb(struct guest_info * info, v3_vm_class_t vm_class) {
+int v3_init_svm_vmcb(struct guest_info * core, v3_vm_class_t vm_class) {
 
-    PrintDebug("Allocating VMCB\n");
-    info->vmm_data = (void*)Allocate_VMCB();
+    PrintDebug(core->vm_info, core, "Allocating VMCB\n");
+    core->vmm_data = (void *)Allocate_VMCB();
     
+    if (core->vmm_data == NULL) {
+       PrintError(core->vm_info, core, "Could not allocate VMCB, Exiting...\n");
+       return -1;
+    }
+
     if (vm_class == V3_PC_VM) {
-       PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
-       Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
+       PrintDebug(core->vm_info, core, "Initializing VMCB (addr=%p)\n", (void *)core->vmm_data);
+       Init_VMCB_BIOS((vmcb_t*)(core->vmm_data), core);
     } else {
-       PrintError("Invalid VM class\n");
+       PrintError(core->vm_info, core, "Invalid VM class\n");
        return -1;
     }
 
+    core->core_run_state = CORE_STOPPED;
+
     return 0;
 }
 
 
+int v3_deinit_svm_vmcb(struct guest_info * core) {
+    V3_FreePages(V3_PAddr(core->vmm_data), 1);
+    return 0;
+}
+
+
+#ifdef V3_CONFIG_CHECKPOINT
+int v3_svm_save_core(struct guest_info * core, void * ctx){
+
+  vmcb_saved_state_t * guest_area = GET_VMCB_SAVE_STATE_AREA(core->vmm_data); 
+
+  // Special case saves of data we need immediate access to
+  // in some cases
+  V3_CHKPT_SAVE(ctx, "CPL", core->cpl, failout);
+  V3_CHKPT_SAVE(ctx,"STAR", guest_area->star, failout); 
+  V3_CHKPT_SAVE(ctx,"CSTAR", guest_area->cstar, failout); 
+  V3_CHKPT_SAVE(ctx,"LSTAR", guest_area->lstar, failout); 
+  V3_CHKPT_SAVE(ctx,"SFMASK", guest_area->sfmask, failout); 
+  V3_CHKPT_SAVE(ctx,"KERNELGSBASE", guest_area->KernelGsBase, failout); 
+  V3_CHKPT_SAVE(ctx,"SYSENTER_CS", guest_area->sysenter_cs, failout); 
+  V3_CHKPT_SAVE(ctx,"SYSENTER_ESP", guest_area->sysenter_esp, failout); 
+  V3_CHKPT_SAVE(ctx,"SYSENTER_EIP", guest_area->sysenter_eip, failout); 
+  
+// and then we save the whole enchilada
+  if (v3_chkpt_save(ctx, "VMCB_DATA", PAGE_SIZE, core->vmm_data)) { 
+    PrintError(core->vm_info, core, "Could not save SVM vmcb\n");
+    goto failout;
+  }
+  
+  return 0;
+
+ failout:
+  PrintError(core->vm_info, core, "Failed to save SVM state for core\n");
+  return -1;
+
+}
+
+int v3_svm_load_core(struct guest_info * core, void * ctx){
+    
+
+  vmcb_saved_state_t * guest_area = GET_VMCB_SAVE_STATE_AREA(core->vmm_data); 
+
+  // Reload what we special cased, which we will overwrite in a minute
+  V3_CHKPT_LOAD(ctx, "CPL", core->cpl, failout);
+  V3_CHKPT_LOAD(ctx,"STAR", guest_area->star, failout); 
+  V3_CHKPT_LOAD(ctx,"CSTAR", guest_area->cstar, failout); 
+  V3_CHKPT_LOAD(ctx,"LSTAR", guest_area->lstar, failout); 
+  V3_CHKPT_LOAD(ctx,"SFMASK", guest_area->sfmask, failout); 
+  V3_CHKPT_LOAD(ctx,"KERNELGSBASE", guest_area->KernelGsBase, failout); 
+  V3_CHKPT_LOAD(ctx,"SYSENTER_CS", guest_area->sysenter_cs, failout); 
+  V3_CHKPT_LOAD(ctx,"SYSENTER_ESP", guest_area->sysenter_esp, failout); 
+  V3_CHKPT_LOAD(ctx,"SYSENTER_EIP", guest_area->sysenter_eip, failout); 
+  
+  // and then we load the whole enchilada
+  if (v3_chkpt_load(ctx, "VMCB_DATA", PAGE_SIZE, core->vmm_data)) { 
+    PrintError(core->vm_info, core, "Could not load SVM vmcb\n");
+    goto failout;
+  }
+  
+  return 0;
+
+ failout:
+  PrintError(core->vm_info, core, "Failed to save SVM state for core\n");
+  return -1;
+
+}
+#endif
 
 static int update_irq_exit_state(struct guest_info * info) {
     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
@@ -291,8 +464,8 @@ static int update_irq_exit_state(struct guest_info * info) {
 
     if ((info->intr_core_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
        
-#ifdef CONFIG_DEBUG_INTERRUPTS
-       PrintDebug("INTAK cycle completed for irq %d\n", info->intr_core_state.irq_vector);
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+       PrintDebug(info->vm_info, info, "INTAK cycle completed for irq %d\n", info->intr_core_state.irq_vector);
 #endif
 
        info->intr_core_state.irq_started = 1;
@@ -302,16 +475,16 @@ static int update_irq_exit_state(struct guest_info * info) {
     }
 
     if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
-       PrintDebug("Interrupt %d taken by guest\n", info->intr_core_state.irq_vector);
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+       PrintDebug(info->vm_info, info, "Interrupt %d taken by guest\n", info->intr_core_state.irq_vector);
 #endif
 
        // Interrupt was taken fully vectored
        info->intr_core_state.irq_started = 0;
 
     } else if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 1)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
-       PrintDebug("EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+       PrintDebug(info->vm_info, info, "EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
 #endif
     }
 
@@ -336,8 +509,8 @@ static int update_irq_entry_state(struct guest_info * info) {
        if (info->excp_state.excp_error_code_valid) {
            guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code;
            guest_ctrl->EVENTINJ.ev = 1;
-#ifdef CONFIG_DEBUG_INTERRUPTS
-           PrintDebug("Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+           PrintDebug(info->vm_info, info, "Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
 #endif
        }
        
@@ -345,8 +518,8 @@ static int update_irq_entry_state(struct guest_info * info) {
        
        guest_ctrl->EVENTINJ.valid = 1;
 
-#ifdef CONFIG_DEBUG_INTERRUPTS
-       PrintDebug("<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n", 
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+       PrintDebug(info->vm_info, info, "<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n", 
                   (int)info->num_exits, 
                   guest_ctrl->EVENTINJ.vector, 
                   (void *)(addr_t)info->ctrl_regs.cr2,
@@ -355,26 +528,36 @@ static int update_irq_entry_state(struct guest_info * info) {
 
        v3_injecting_excp(info, excp);
     } else if (info->intr_core_state.irq_started == 1) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
-       PrintDebug("IRQ pending from previous injection\n");
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+       PrintDebug(info->vm_info, info, "IRQ pending from previous injection\n");
 #endif
        guest_ctrl->guest_ctrl.V_IRQ = 1;
        guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_core_state.irq_vector;
+
+       // We ignore the virtual TPR on this injection
+       // TPR/PPR tests have already been done in the APIC.
        guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
-       guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
+       guest_ctrl->guest_ctrl.V_INTR_PRIO = info->intr_core_state.irq_vector >> 4 ;  // 0xf;
 
     } else {
        switch (v3_intr_pending(info)) {
            case V3_EXTERNAL_IRQ: {
-               uint32_t irq = v3_get_intr(info);
+               int irq = v3_get_intr(info); 
+
+               if (irq<0) {
+                 break;
+               }
 
                guest_ctrl->guest_ctrl.V_IRQ = 1;
                guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq;
+
+               // We ignore the virtual TPR on this injection
+               // TPR/PPR tests have already been done in the APIC.
                guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
-               guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
+               guest_ctrl->guest_ctrl.V_INTR_PRIO = info->intr_core_state.irq_vector >> 4 ;  // 0xf;
 
-#ifdef CONFIG_DEBUG_INTERRUPTS
-               PrintDebug("Injecting Interrupt %d (EIP=%p)\n", 
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+               PrintDebug(info->vm_info, info, "Injecting Interrupt %d (EIP=%p)\n", 
                           guest_ctrl->guest_ctrl.V_INTR_VECTOR, 
                           (void *)(addr_t)info->rip);
 #endif
@@ -389,6 +572,18 @@ static int update_irq_entry_state(struct guest_info * info) {
                break;
            case V3_SOFTWARE_INTR:
                guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR;
+
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+               PrintDebug(info->vm_info, info, "Injecting software interrupt --  type: %d, vector: %d\n", 
+                          SVM_INJECTION_SOFT_INTR, info->intr_core_state.swintr_vector);
+#endif
+               guest_ctrl->EVENTINJ.vector = info->intr_core_state.swintr_vector;
+               guest_ctrl->EVENTINJ.valid = 1;
+            
+               /* reset swintr state */
+               info->intr_core_state.swintr_posted = 0;
+               info->intr_core_state.swintr_vector = 0;
+               
                break;
            case V3_VIRTUAL_IRQ:
                guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ;
@@ -404,6 +599,26 @@ static int update_irq_entry_state(struct guest_info * info) {
     return 0;
 }
 
+int 
+v3_svm_config_tsc_virtualization(struct guest_info * info) {
+    vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
+
+
+    if (info->time_state.flags & VM_TIME_TRAP_RDTSC) {
+       ctrl_area->instrs.RDTSC = 1;
+       ctrl_area->svm_instrs.RDTSCP = 1;
+    } else {
+       ctrl_area->instrs.RDTSC = 0;
+       ctrl_area->svm_instrs.RDTSCP = 0;
+
+       if (info->time_state.flags & VM_TIME_TSC_PASSTHROUGH) {
+               ctrl_area->TSC_OFFSET = 0;
+       } else {
+               ctrl_area->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
+       }
+    }
+    return 0;
+}
 
 /* 
  * CAUTION and DANGER!!! 
@@ -417,9 +632,21 @@ int v3_svm_enter(struct guest_info * info) {
     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data)); 
     addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
+    uint64_t guest_cycles = 0;
 
     // Conditionally yield the CPU if the timeslice has expired
-    v3_yield_cond(info);
+    v3_schedule(info);
+
+#ifdef V3_CONFIG_MEM_TRACK
+    v3_mem_track_entry(info);
+#endif 
+
+    // Update timer devices after being in the VM before doing 
+    // IRQ updates, so that any interrupts they raise get seen 
+    // immediately.
+    v3_advance_time(info, NULL);
+    v3_update_timers(info);
+
 
     // disable global interrupts for vm state transition
     v3_clgi();
@@ -431,10 +658,26 @@ int v3_svm_enter(struct guest_info * info) {
     guest_state->cr4 = info->ctrl_regs.cr4;
     guest_state->dr6 = info->dbg_regs.dr6;
     guest_state->dr7 = info->dbg_regs.dr7;
-    guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
+
+    // CR8 is now updated by read/writes and it contains the APIC TPR
+    // the V_TPR should be just the class part of that.
+    // This update is here just for completeness.  We currently
+    // are ignoring V_TPR on all injections and doing the priority logivc
+    // in the APIC.
+    // guest_ctrl->guest_ctrl.V_TPR = ((info->ctrl_regs.apic_tpr) >> 4) & 0xf;
+
+    //guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
+    // 
+    
     guest_state->rflags = info->ctrl_regs.rflags;
     guest_state->efer = info->ctrl_regs.efer;
     
+    /* Synchronize MSRs */
+    guest_state->star = info->msrs.star;
+    guest_state->lstar = info->msrs.lstar;
+    guest_state->sfmask = info->msrs.sfmask;
+    guest_state->KernelGsBase = info->msrs.kern_gs_base;
+
     guest_state->cpl = info->cpl;
 
     v3_set_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
@@ -443,7 +686,9 @@ int v3_svm_enter(struct guest_info * info) {
     guest_state->rip = info->rip;
     guest_state->rsp = info->vm_regs.rsp;
 
-#ifdef CONFIG_SYMCALL
+    V3_FP_ENTRY_RESTORE(info);
+
+#ifdef V3_CONFIG_SYMCALL
     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
        update_irq_entry_state(info);
     }
@@ -451,45 +696,71 @@ int v3_svm_enter(struct guest_info * info) {
     update_irq_entry_state(info);
 #endif
 
+#ifdef V3_CONFIG_TM_FUNC
+    v3_tm_check_intr_state(info, guest_ctrl, guest_state);
+#endif
+
 
     /* ** */
 
     /*
-      PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
+      PrintDebug(info->vm_info, info, "SVM Entry to CS=%p  rip=%p...\n", 
       (void *)(addr_t)info->segments.cs.base, 
       (void *)(addr_t)info->rip);
     */
 
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
     if (info->sym_core_state.symcall_state.sym_call_active == 1) {
        if (guest_ctrl->guest_ctrl.V_IRQ == 1) {
-           V3_Print("!!! Injecting Interrupt during Sym call !!!\n");
+           V3_Print(info->vm_info, info, "!!! Injecting Interrupt during Sym call !!!\n");
        }
     }
 #endif
 
+    v3_svm_config_tsc_virtualization(info);
 
-    v3_update_timers(info);
-    v3_resume_time(info);
-    guest_ctrl->TSC_OFFSET = info->time_state.host_offset;
+    //V3_Print(info->vm_info, info, "Calling v3_svm_launch\n");
+    {  
+       uint64_t entry_tsc = 0;
+       uint64_t exit_tsc = 0;
+       
+#ifdef V3_CONFIG_PWRSTAT_TELEMETRY
+       v3_pwrstat_telemetry_enter(info);
+#endif
 
-    //V3_Print("Calling v3_svm_launch\n");
+#ifdef V3_CONFIG_PMU_TELEMETRY
+       v3_pmu_telemetry_enter(info);
+#endif
 
-    v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]);
 
-    v3_pause_time(info);
-#ifdef OPTION_TIME_MASK_OVERHEAD
-    v3_offset_time(info, -SVM_ENTRY_OVERHEAD);
+       rdtscll(entry_tsc);
+
+       v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]);
+
+       rdtscll(exit_tsc);
+
+#ifdef V3_CONFIG_PMU_TELEMETRY
+       v3_pmu_telemetry_exit(info);
 #endif
 
-    //V3_Print("SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip);
+#ifdef V3_CONFIG_PWRSTAT_TELEMETRY
+       v3_pwrstat_telemetry_exit(info);
+#endif
+
+       guest_cycles = exit_tsc - entry_tsc;
+    }
+
+
+    //V3_Print(info->vm_info, info, "SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip);
 
     v3_last_exit = (uint32_t)(guest_ctrl->exit_code);
 
-    //PrintDebug("SVM Returned\n");
-    
+    v3_advance_time(info, &guest_cycles);
+
     info->num_exits++;
 
+    V3_FP_EXIT_SAVE(info);
+
     // Save Guest state from VMCB
     info->rip = guest_state->rip;
     info->vm_regs.rsp = guest_state->rsp;
@@ -503,23 +774,31 @@ int v3_svm_enter(struct guest_info * info) {
     info->ctrl_regs.cr4 = guest_state->cr4;
     info->dbg_regs.dr6 = guest_state->dr6;
     info->dbg_regs.dr7 = guest_state->dr7;
-    info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
+    //
+    // We do not track this anymore
+    // V_TPR is ignored and we do the logic in the APIC
+    //info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
+    //
     info->ctrl_regs.rflags = guest_state->rflags;
     info->ctrl_regs.efer = guest_state->efer;
     
+    /* Synchronize MSRs */
+    info->msrs.star =  guest_state->star;
+    info->msrs.lstar = guest_state->lstar;
+    info->msrs.sfmask = guest_state->sfmask;
+    info->msrs.kern_gs_base = guest_state->KernelGsBase;
+
     v3_get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
     info->cpu_mode = v3_get_vm_cpu_mode(info);
     info->mem_mode = v3_get_vm_mem_mode(info);
     /* ** */
 
-
     // save exit info here
     exit_code = guest_ctrl->exit_code;
     exit_info1 = guest_ctrl->exit_info1;
     exit_info2 = guest_ctrl->exit_info2;
 
-
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
        update_irq_exit_state(info);
     }
@@ -527,67 +806,119 @@ int v3_svm_enter(struct guest_info * info) {
     update_irq_exit_state(info);
 #endif
 
-
     // reenable global interrupts after vm exit
     v3_stgi();
 
  
     // Conditionally yield the CPU if the timeslice has expired
-    v3_yield_cond(info);
+    v3_schedule(info);
 
+    // This update timers is for time-dependent handlers
+    // if we're slaved to host time
+    v3_advance_time(info, NULL);
+    v3_update_timers(info);
 
 
-    if (v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2) != 0) {
-       PrintError("Error in SVM exit handler\n");
-       return -1;
+    {
+       int ret = v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2);
+       
+       if (ret != 0) {
+           PrintError(info->vm_info, info, "Error in SVM exit handler (ret=%d)\n", ret);
+           PrintError(info->vm_info, info, "  last Exit was %d (exit code=0x%llx)\n", v3_last_exit, (uint64_t) exit_code);
+           return -1;
+       }
+    }
+
+    if (info->timeouts.timeout_active) {
+       /* Check to see if any timeouts have expired */
+       v3_handle_timeouts(info, guest_cycles);
     }
 
+#ifdef V3_CONFIG_MEM_TRACK
+    v3_mem_track_exit(info);
+#endif 
+
 
     return 0;
 }
 
 
-int v3_start_svm_guest(struct guest_info *info) {
+int v3_start_svm_guest(struct guest_info * info) {
     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
 
+    PrintDebug(info->vm_info, info, "Starting SVM core %u (on logical core %u)\n", info->vcpu_id, info->pcpu_id);
 
-    PrintDebug("Starting SVM core %u\n",info->cpu_id);
-    if (info->cpu_mode==INIT) { 
-       PrintDebug("SVM core %u: I am an AP in INIT mode, waiting for that to change\n",info->cpu_id);
-       while (info->cpu_mode==INIT) {
-           v3_yield(info);
-           //PrintDebug("SVM core %u: still waiting for INIT\n",info->cpu_id);
-       }
-       PrintDebug("SVM core %u: I am out of INIT\n",info->cpu_id);
-       if (info->cpu_mode==SIPI) { 
-           PrintDebug("SVM core %u: I am waiting on a SIPI to set my starting address\n",info->cpu_id);
-           while (info->cpu_mode==SIPI) {
-               v3_yield(info);
-               //PrintDebug("SVM core %u: still waiting for SIPI\n",info->cpu_id);
-           }
-       }
-       PrintDebug("SVM core %u: I have my SIPI\n", info->cpu_id);
-    }
 
-    if (info->cpu_mode!=REAL) { 
-       PrintError("SVM core %u: I am not in REAL mode at launch!  Huh?!\n", info->cpu_id);
+#ifdef V3_CONFIG_HVM
+    if (v3_setup_hvm_hrt_core_for_boot(info)) { 
+       PrintError(info->vm_info, info, "Failed to setup HRT core...\n");
        return -1;
     }
+#endif
 
-    PrintDebug("SVM core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x),  RIP=0x%p\n", 
-              info->cpu_id, info->segments.cs.selector, (void*)(info->segments.cs.base), 
-              info->segments.cs.limit,(void*)(info->rip));
+           
 
 
+    while (1) {
 
-    PrintDebug("SVM core %u: Launching SVM VM (vmcb=%p)\n", info->cpu_id, (void *)info->vmm_data);
-    //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
-    
-    info->vm_info->run_state = VM_RUNNING;
-    v3_start_time(info);
+       if (info->core_run_state == CORE_STOPPED) {
 
-    while (1) {
+           if (info->vcpu_id == 0) {
+               info->core_run_state = CORE_RUNNING;
+           } else  { 
+               PrintDebug(info->vm_info, info, "SVM core %u (on %u): Waiting for core initialization\n", info->vcpu_id, info->pcpu_id);
+
+               V3_NO_WORK(info);
+
+               while (info->core_run_state == CORE_STOPPED) {
+                   
+                   if (info->vm_info->run_state == VM_STOPPED) {
+                       // The VM was stopped before this core was initialized. 
+                       return 0;
+                   }
+                   
+                   V3_STILL_NO_WORK(info);
+
+                   //PrintDebug(info->vm_info, info, "SVM core %u: still waiting for INIT\n", info->vcpu_id);
+               }
+
+               V3_HAVE_WORK_AGAIN(info);
+               
+               PrintDebug(info->vm_info, info, "SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id);
+               
+               // We'll be paranoid about race conditions here
+               v3_wait_at_barrier(info);
+           } 
+           
+           PrintDebug(info->vm_info, info, "SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x),  RIP=0x%p\n", 
+                      info->vcpu_id, info->pcpu_id, 
+                      info->segments.cs.selector, (void *)(info->segments.cs.base), 
+                      info->segments.cs.limit, (void *)(info->rip));
+           
+           
+           
+           PrintDebug(info->vm_info, info, "SVM core %u: Launching SVM VM (vmcb=%p) (on cpu %u)\n", 
+                      info->vcpu_id, (void *)info->vmm_data, info->pcpu_id);
+           //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
+           
+           v3_start_time(info);
+       }
+       
+       if (info->vm_info->run_state == VM_STOPPED) {
+           info->core_run_state = CORE_STOPPED;
+           break;
+       }
+
+#ifdef V3_CONFIG_PMU_TELEMETRY
+       v3_pmu_telemetry_start(info);
+#endif
+       
+#ifdef V3_CONFIG_PWRSTAT_TELEMETRY
+       v3_pwrstat_telemetry_start(info);
+#endif
+       
        if (v3_svm_enter(info) == -1) {
            vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
            addr_t host_addr;
@@ -595,17 +926,17 @@ int v3_start_svm_guest(struct guest_info *info) {
            
            info->vm_info->run_state = VM_ERROR;
            
-           V3_Print("SVM core %u: SVM ERROR!!\n", info->cpu_id); 
+           V3_Print(info->vm_info, info, "SVM core %u: SVM ERROR!!\n", info->vcpu_id); 
            
            v3_print_guest_state(info);
            
-           V3_Print("SVM core %u: SVM Exit Code: %p\n", info->cpu_id, (void *)(addr_t)guest_ctrl->exit_code); 
+           V3_Print(info->vm_info, info, "SVM core %u: SVM Exit Code: %p\n", info->vcpu_id, (void *)(addr_t)guest_ctrl->exit_code); 
            
-           V3_Print("SVM core %u: exit_info1 low = 0x%.8x\n", info->cpu_id, *(uint_t*)&(guest_ctrl->exit_info1));
-           V3_Print("SVM core %u: exit_info1 high = 0x%.8x\n", info->cpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
+           V3_Print(info->vm_info, info, "SVM core %u: exit_info1 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info1));
+           V3_Print(info->vm_info, info, "SVM core %u: exit_info1 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
            
-           V3_Print("SVM core %u: exit_info2 low = 0x%.8x\n", info->cpu_id, *(uint_t*)&(guest_ctrl->exit_info2));
-           V3_Print("SVM core %u: exit_info2 high = 0x%.8x\n", info->cpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
+           V3_Print(info->vm_info, info, "SVM core %u: exit_info2 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info2));
+           V3_Print(info->vm_info, info, "SVM core %u: exit_info2 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
            
            linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
            
@@ -615,24 +946,42 @@ int v3_start_svm_guest(struct guest_info *info) {
                v3_gva_to_hva(info, linear_addr, &host_addr);
            }
            
-           V3_Print("SVM core %u: Host Address of rip = 0x%p\n", info->cpu_id, (void *)host_addr);
+           V3_Print(info->vm_info, info, "SVM core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
            
-           V3_Print("SVM core %u: Instr (15 bytes) at %p:\n", info->cpu_id, (void *)host_addr);
+           V3_Print(info->vm_info, info, "SVM core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
            v3_dump_mem((uint8_t *)host_addr, 15);
            
            v3_print_stack(info);
+           
+           break;
+       }
+       
+       v3_wait_at_barrier(info);
+       
 
+       if (info->vm_info->run_state == VM_STOPPED) {
+           info->core_run_state = CORE_STOPPED;
            break;
        }
+
        
+
 /*
-       if ((info->num_exits % 5000) == 0) {
-           V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits);
+       if ((info->num_exits % 50000) == 0) {
+           V3_Print(info->vm_info, info, "SVM Exit number %d\n", (uint32_t)info->num_exits);
+           v3_print_guest_state(info);
        }
 */
        
     }
 
+#ifdef V3_CONFIG_PMU_TELEMETRY
+    v3_pmu_telemetry_end(info);
+#endif
+
+#ifdef V3_CONFIG_PWRSTAT_TELEMETRY
+    v3_pwrstat_telemetry_end(info);
+#endif
     // Need to take down the other cores on error... 
 
     return 0;
@@ -641,6 +990,31 @@ int v3_start_svm_guest(struct guest_info *info) {
 
 
 
+int v3_reset_svm_vm_core(struct guest_info * core, addr_t rip) {
+    // init vmcb_bios
+
+    // Write the RIP, CS, and descriptor
+    // assume the rest is already good to go
+    //
+    // vector VV -> rip at 0
+    //              CS = VV00
+    //  This means we start executing at linear address VV000
+    //
+    // So the selector needs to be VV00
+    // and the base needs to be VV000
+    //
+    core->rip = 0;
+    core->segments.cs.selector = rip << 8;
+    core->segments.cs.limit = 0xffff;
+    core->segments.cs.base = rip << 12;
+
+    return 0;
+}
+
+
+
+
+
 
 /* Checks machine SVM capability */
 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
@@ -650,38 +1024,38 @@ int v3_is_svm_capable() {
 
     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
   
-    PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
+    PrintDebug(VM_NONE, VCORE_NONE,  "CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
 
     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
-      V3_Print("SVM Not Available\n");
+      V3_Print(VM_NONE, VCORE_NONE,  "SVM Not Available\n");
       return 0;
     }  else {
        v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
        
-       PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
+       PrintDebug(VM_NONE, VCORE_NONE, "SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
        
        if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
-           V3_Print("SVM is available but is disabled.\n");
+           V3_Print(VM_NONE, VCORE_NONE, "SVM is available but is disabled.\n");
            
            v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
            
-           PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
+           PrintDebug(VM_NONE, VCORE_NONE,  "CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
            
            if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
-               V3_Print("SVM BIOS Disabled, not unlockable\n");
+               V3_Print(VM_NONE, VCORE_NONE,  "SVM BIOS Disabled, not unlockable\n");
            } else {
-               V3_Print("SVM is locked with a key\n");
+               V3_Print(VM_NONE, VCORE_NONE,  "SVM is locked with a key\n");
            }
            return 0;
 
        } else {
-           V3_Print("SVM is available and  enabled.\n");
+           V3_Print(VM_NONE, VCORE_NONE,  "SVM is available and  enabled.\n");
 
            v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
-           PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
-           PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx);
-           PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
-           PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
+           PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
+           PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx);
+           PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
+           PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
 
            return 1;
        }
@@ -690,19 +1064,20 @@ int v3_is_svm_capable() {
 
 static int has_svm_nested_paging() {
     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
-
+    
     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
-
-    //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
-
+    
+    //PrintDebug(VM_NONE, VCORE_NONE,  "CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
+    
     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
-       V3_Print("SVM Nested Paging not supported\n");
+       V3_Print(VM_NONE, VCORE_NONE, "SVM Nested Paging not supported\n");
        return 0;
     } else {
-       V3_Print("SVM Nested Paging supported\n");
+       V3_Print(VM_NONE, VCORE_NONE, "SVM Nested Paging supported\n");
        return 1;
     }
-}
+ }
 
 
 void v3_init_svm_cpu(int cpu_id) {
@@ -714,17 +1089,22 @@ void v3_init_svm_cpu(int cpu_id) {
     msr.e_reg.low |= EFER_MSR_svm_enable;
     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
 
-    V3_Print("SVM Enabled\n");
+    V3_Print(VM_NONE, VCORE_NONE,  "SVM Enabled\n");
 
     // Setup the host state save area
-    host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4);
+    host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4); // need not be shadow-safe, not exposed to guest
+
+    if (!host_vmcbs[cpu_id]) {
+       PrintError(VM_NONE, VCORE_NONE,  "Failed to allocate VMCB\n");
+       return;
+    }
 
     /* 64-BIT-ISSUE */
     //  msr.e_reg.high = 0;
     //msr.e_reg.low = (uint_t)host_vmcb;
     msr.r_reg = host_vmcbs[cpu_id];
 
-    PrintDebug("Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
+    PrintDebug(VM_NONE, VCORE_NONE,  "Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
 
 
@@ -737,7 +1117,27 @@ void v3_init_svm_cpu(int cpu_id) {
 
 
 
+void v3_deinit_svm_cpu(int cpu_id) {
+    reg_ex_t msr;
+    extern v3_cpu_arch_t v3_cpu_types[];
+
+    // reset SVM_VM_HSAVE_PA_MSR
+    // Does setting it to NULL disable??
+    msr.r_reg = 0;
+    v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
+
+    // Disable SVM?
+    v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
+    msr.e_reg.low &= ~EFER_MSR_svm_enable;
+    v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
+
+    v3_cpu_types[cpu_id] = V3_INVALID_CPU;
+
+    V3_FreePages((void *)host_vmcbs[cpu_id], 4);
 
+    V3_Print(VM_NONE, VCORE_NONE,  "Host CPU %d host area freed, and SVM disabled\n", cpu_id);
+    return;
+}
 
 
 
@@ -818,7 +1218,7 @@ void v3_init_svm_cpu(int cpu_id) {
     end <<= 32;
     end += end_lo;
     
-    PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
+    PrintDebug(core->vm_info, core, "VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
     
     __asm__ __volatile__ (
                          "rdtsc ; "
@@ -840,7 +1240,7 @@ void v3_init_svm_cpu(int cpu_id) {
        end += end_lo;
 
 
-       PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
+       PrintDebug(core->vm_info, core, "VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
     }
     /* End Latency Test */
 
@@ -937,7 +1337,12 @@ void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
   
 
   ctrl_area->instrs.IOIO_PROT = 1;
-  ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
+  ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3); // need not be shadow-safe, not exposed to guest
+
+  if (!ctrl_area->IOPM_BASE_PA) { 
+      PrintError(core->vm_info, core, "Cannot allocate IO bitmap\n");
+      return;
+  }
   
   {
     reg_ex_t tmp_reg;
@@ -962,12 +1367,12 @@ void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
     GetGDTR(gdt_buf);
     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
-    PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
+    PrintDebug(core->vm_info, core, "GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
 
     GetIDTR(idt_buf);
     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
-    PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
+    PrintDebug(core->vm_info, core, "IDT: base: %x, limit: %x\n",idt_base, idt_limit);
 
 
     // gdt_base -= 0x2000;
@@ -992,7 +1397,7 @@ void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
     // Enable Nested Paging
     ctrl_area->NP_ENABLE = 1;
 
-    PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
+    PrintDebug(core->vm_info, core, "NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
 
         // Set the Nested Page Table pointer
     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
@@ -1003,8 +1408,8 @@ void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
 
     guest_state->g_pat = 0x7040600070406ULL;
 
-    PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
-    PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
+    PrintDebug(core->vm_info, core, "Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
+    PrintDebug(core->vm_info, core, "Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
     // Enable Paging
     //    guest_state->cr0 |= 0x80000000;
   }