Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


fix for intel hardware
Jack Lange [Mon, 2 May 2011 18:34:02 +0000 (13:34 -0500)]
palacios/include/palacios/vm_guest.h
palacios/include/palacios/vmcs.h
palacios/include/palacios/vmm_types.h
palacios/include/palacios/vmx.h
palacios/include/palacios/vmx_hw_info.h
palacios/src/palacios/vmcs.c
palacios/src/palacios/vmm_config.c
palacios/src/palacios/vmx.c

index 5d4527f..c5dddfc 100644 (file)
@@ -145,8 +145,6 @@ struct v3_vm_info {
     uint32_t mem_align;
     struct v3_mem_map mem_map;
 
-    v3_paging_size_t paging_size; // for nested paging
-
     struct v3_mem_hooks mem_hooks;
 
     struct v3_shdw_impl_state shdw_impl;
index 35e7cc0..f8028b5 100644 (file)
@@ -275,10 +275,10 @@ struct vmx_intr_state {
     union {
        uint32_t value;
        struct {
-           uint8_t block_sti    : 1;
-           uint8_t block_mov_ss : 1;
-           uint8_t block_smi    : 1;
-           uint8_t block_nmi    : 1;
+           uint32_t block_sti    : 1;
+           uint32_t block_mov_ss : 1;
+           uint32_t block_smi    : 1;
+           uint32_t block_nmi    : 1;
            uint32_t rsvd        : 28;
        } __attribute__((packed));
     } __attribute__((packed));
@@ -295,14 +295,14 @@ struct vmx_pending_dbg_excps {
        } __attribute__((packed));
 
        struct {
-           uint8_t b0       : 1;
-           uint8_t b1       : 1;
-           uint8_t b2       : 1;
-           uint8_t b3       : 1;
-           uint8_t rsvd1    : 8;
-           uint8_t bp_set   : 1;
-           uint8_t rsvd2    : 1;
-           uint8_t bp_ss    : 1;
+           uint64_t b0       : 1;
+           uint64_t b1       : 1;
+           uint64_t b2       : 1;
+           uint64_t b3       : 1;
+           uint64_t rsvd1    : 8;
+           uint64_t bp_set   : 1;
+           uint64_t rsvd2    : 1;
+           uint64_t bp_ss    : 1;
            uint64_t rsvd3   : 49;
        } __attribute__((packed));
     } __attribute__((packed));
@@ -334,6 +334,14 @@ struct vmcs_segment {
 };
 
 
+struct vmcs_msr_entry {
+    uint32_t index;
+    uint32_t rsvd;
+    uint32_t lo;
+    uint32_t hi;
+} __attribute__((packed));
+
+
 struct vmcs_interrupt_state {
     union {
        uint32_t val;
index 4ef72d4..1a17997 100644 (file)
@@ -30,8 +30,6 @@ typedef enum {SHADOW_PAGING, NESTED_PAGING} v3_paging_mode_t;
 typedef enum {VM_RUNNING, VM_STOPPED, VM_SUSPENDED, VM_ERROR, VM_EMULATING} v3_vm_operating_mode_t;
 typedef enum {CORE_RUNNING, CORE_STOPPED} v3_core_operating_mode_t;
 
-typedef enum {PAGING_4KB, PAGING_2MB} v3_paging_size_t;
-
 typedef enum {REAL, /*UNREAL,*/ PROTECTED, PROTECTED_PAE, LONG, LONG_32_COMPAT, LONG_16_COMPAT} v3_cpu_mode_t;
 typedef enum {PHYSICAL_MEM, VIRTUAL_MEM} v3_mem_mode_t;
 
index 33b8654..f008a4b 100644 (file)
@@ -209,6 +209,8 @@ struct vmx_data {
     struct vmx_entry_ctrls entry_ctrls;
 
     struct vmx_exception_bitmap excp_bmap;
+
+    void * msr_area;
 };
 
 int v3_is_vmx_capable();
index 5f57fc0..c11dcb1 100644 (file)
@@ -53,15 +53,15 @@ struct vmx_basic_msr {
        } __attribute__((packed));
 
        struct {    uint32_t revision;
-           uint32_t regionSize   : 13;
-           uint8_t rsvd1         : 3; /* Always 0 */
-           uint8_t physWidth     : 1; /* VMCS address field widths 
+           uint64_t regionSize   : 13;
+           uint64_t rsvd1         : 3; /* Always 0 */
+           uint64_t physWidth     : 1; /* VMCS address field widths 
                                          (1=32bits, 0=natural width) */
-           uint8_t smm           : 1;
-           uint8_t memType       : 4; /* 0 = UC, 6 = WriteBack */
-           uint8_t io_str_info   : 1;
-           uint8_t def1_maybe_0  : 1; /* 1="Any VMX ctrls that default to 1 may be cleared to 0" */
-           uint32_t rsvd2        : 8; /* Always 0 */
+           uint64_t smm           : 1;
+           uint64_t memType       : 4; /* 0 = UC, 6 = WriteBack */
+           uint64_t io_str_info   : 1;
+           uint64_t def1_maybe_0  : 1; /* 1="Any VMX ctrls that default to 1 may be cleared to 0" */
+           uint64_t rsvd2        : 8; /* Always 0 */
        }  __attribute__((packed));
     }  __attribute__((packed));
 }  __attribute__((packed));
@@ -75,17 +75,17 @@ struct vmx_misc_msr {
        } __attribute__((packed));
 
        struct {
-           uint8_t tsc_multiple       : 5; /* Bit position in TSC field that drives vmx timer step */
-           uint8_t exits_store_LMA    : 1;
-           uint8_t can_halt           : 1;
-           uint8_t can_shtdown        : 1;
-           uint8_t can_wait_for_sipi  : 1;
-           uint8_t rsvd1              : 7;
-           uint16_t num_cr3_targets   : 9;
-           uint8_t max_msr_cache_size : 3; /* (512 * (max_msr_cache_size + 1)) == max msr load/store list size */
-           uint8_t SMM_ctrl_avail     : 1;
-           uint8_t rsvd2              : 3; 
-           uint32_t MSEG_rev_id;
+           uint64_t tsc_multiple       : 5; /* Bit position in TSC field that drives vmx timer step */
+           uint64_t exits_store_LMA    : 1;
+           uint64_t can_halt           : 1;
+           uint64_t can_shtdown        : 1;
+           uint64_t can_wait_for_sipi  : 1;
+           uint64_t rsvd1              : 7;
+           uint64_t num_cr3_targets    : 9;
+           uint64_t max_msr_cache_size : 3; /* (512 * (max_msr_cache_size + 1)) == max msr load/store list size */
+           uint64_t SMM_ctrl_avail     : 1;
+           uint64_t rsvd2              : 3; 
+           uint64_t MSEG_rev_id;
        }  __attribute__((packed));
     }  __attribute__((packed));
 } __attribute__((packed));
@@ -99,29 +99,29 @@ struct vmx_ept_msr {
        } __attribute__((packed));
 
        struct {
-           uint8_t exec_only_ok             : 1;
-           uint8_t rsvd1                    : 5;
-           uint8_t pg_walk_len4             : 1; /* support for a page walk of length 4 */
-           uint8_t rsvd2                    : 1;
-           uint8_t ept_uc_ok                : 1; /* EPT page tables can be uncacheable */
-           uint8_t rsvd3                    : 5;
-           uint8_t ept_wb_ok                : 1; /* EPT page tables can be writeback */
-           uint8_t rsvd4                    : 1;
-           uint8_t ept_2MB_ok               : 1; /* 2MB EPT pages supported */
-           uint8_t ept_1GB_ok               : 1; /* 1GB EPT pages supported */
-           uint8_t rsvd5                    : 2;
-           uint8_t INVEPT_avail             : 1; /* INVEPT instruction is available */
-           uint8_t rsvd6                    : 4;
-           uint8_t INVEPT_single_ctx_avail  : 1;
-           uint8_t INVEPT_all_ctx_avail     : 1;
-           uint8_t rsvd7                    : 5;
-           uint8_t INVVPID_avail            : 1;
-           uint8_t rsvd8                    : 7;
-           uint8_t INVVPID_1addr_avail      : 1;
-           uint8_t INVVPID_single_ctx_avail : 1;
-           uint8_t INVVPID_all_ctx_avail    : 1;
-           uint8_t INVVPID_single_ctx_w_glbls_avail : 1;
-           uint32_t rsvd9                   : 20;
+           uint64_t exec_only_ok             : 1;
+           uint64_t rsvd1                    : 5;
+           uint64_t pg_walk_len4             : 1; /* support for a page walk of length 4 */
+           uint64_t rsvd2                    : 1;
+           uint64_t ept_uc_ok                : 1; /* EPT page tables can be uncacheable */
+           uint64_t rsvd3                    : 5;
+           uint64_t ept_wb_ok                : 1; /* EPT page tables can be writeback */
+           uint64_t rsvd4                    : 1;
+           uint64_t ept_2MB_ok               : 1; /* 2MB EPT pages supported */
+           uint64_t ept_1GB_ok               : 1; /* 1GB EPT pages supported */
+           uint64_t rsvd5                    : 2;
+           uint64_t INVEPT_avail             : 1; /* INVEPT instruction is available */
+           uint64_t rsvd6                    : 4;
+           uint64_t INVEPT_single_ctx_avail  : 1;
+           uint64_t INVEPT_all_ctx_avail     : 1;
+           uint64_t rsvd7                    : 5;
+           uint64_t INVVPID_avail            : 1;
+           uint64_t rsvd8                    : 7;
+           uint64_t INVVPID_1addr_avail      : 1;
+           uint64_t INVVPID_single_ctx_avail : 1;
+           uint64_t INVVPID_all_ctx_avail    : 1;
+           uint64_t INVVPID_single_ctx_w_glbls_avail : 1;
+           uint64_t rsvd9                   : 20;
        }  __attribute__((packed));
     }  __attribute__((packed));
 }__attribute__((packed));
@@ -156,6 +156,7 @@ struct vmx_hw_info {
 
     struct vmx_cr_field cr0;
     struct vmx_cr_field cr4;
+
 };
 
 
index 3503326..9e5bd77 100644 (file)
@@ -326,16 +326,6 @@ int v3_update_vmcs_host_state(struct guest_info * info) {
     vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
     vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
 
-#define FS_BASE_MSR 0xc0000100
-#define GS_BASE_MSR 0xc0000101
-
-    // FS.BASE MSR
-    v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-    vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);    
-
-    // GS.BASE MSR
-    v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-    vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);    
 
 
 
@@ -423,8 +413,11 @@ int v3_update_vmcs_host_state(struct guest_info * info) {
 #define SYSENTER_CS_MSR 0x00000174
 #define SYSENTER_ESP_MSR 0x00000175
 #define SYSENTER_EIP_MSR 0x00000176
+#define FS_BASE_MSR 0xc0000100
+#define GS_BASE_MSR 0xc0000101
 #define EFER_MSR 0xc0000080
 
+
     // SYSENTER CS MSR
     v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
     vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.lo);
@@ -437,10 +430,33 @@ int v3_update_vmcs_host_state(struct guest_info * info) {
     v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
     vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
 
+
+    // FS.BASE MSR
+    v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);    
+
+    // GS.BASE MSR
+    v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);    
+
+
     // EFER
     v3_get_msr(EFER_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
     vmx_ret |= check_vmcs_write(VMCS_HOST_EFER, tmp_msr.value);
 
+    // PERF GLOBAL CONTROL
+
+    // PAT
+
+
+    // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
+
+    
+
+    
+
+
+
     return vmx_ret;
 }
 
index 6f5da69..640144e 100644 (file)
@@ -286,7 +286,6 @@ static int determine_paging_mode(struct guest_info * info, v3_cfg_tree_t * core_
     v3_cfg_tree_t * vm_tree = info->vm_info->cfg_data->cfg;
     v3_cfg_tree_t * pg_tree = v3_cfg_subtree(vm_tree, "paging");
     char * pg_mode          = v3_cfg_val(pg_tree, "mode");
-    char * page_size        = v3_cfg_val(pg_tree, "page_size");
     
     PrintDebug("Paging mode specified as %s\n", pg_mode);
 
@@ -313,24 +312,6 @@ static int determine_paging_mode(struct guest_info * info, v3_cfg_tree_t * core_
     }
 
 
-    if (info->shdw_pg_mode == NESTED_PAGING) {
-       PrintDebug("Guest Paging Mode: NESTED_PAGING\n");
-       if (strcasecmp(page_size, "4kb") == 0) { /* TODO: this may not be an ideal place for this */
-           info->vm_info->paging_size = PAGING_4KB;
-       } else if (strcasecmp(page_size, "2mb") == 0) {
-           info->vm_info->paging_size = PAGING_2MB;
-       } else {
-           PrintError("Invalid VM paging size: '%s'\n", page_size);
-           return -1;
-       }
-       PrintDebug("VM page size=%s\n", page_size);
-    } else if (info->shdw_pg_mode == SHADOW_PAGING) {
-        PrintDebug("Guest Paging Mode: SHADOW_PAGING\n");
-    } else {
-       PrintError("Guest paging mode incorrectly set.\n");
-       return -1;
-    }
-
     if (v3_cfg_val(pg_tree, "large_pages") != NULL) {
        if (strcasecmp(v3_cfg_val(pg_tree, "large_pages"), "true") == 0) {
            info->use_large_pages = 1;
index c4b951f..be2c77c 100644 (file)
@@ -209,24 +209,30 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
 
 
 
+    
+
+
 
 #ifdef __V3_64BIT__
     vmx_state->exit_ctrls.host_64_on = 1;
 #endif
 
 
-
     /* Not sure how exactly to handle this... */
     v3_hook_msr(core->vm_info, EFER_MSR, 
                &v3_handle_efer_read,
                &v3_handle_efer_write, 
                core);
 
+    // Or is it this??? 
+    vmx_state->entry_ctrls.ld_efer = 1;
+    vmx_state->exit_ctrls.ld_efer = 1;
+    vmx_state->exit_ctrls.save_efer = 1;
+    /*   ***   */
 
     vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
 
 
-
     /* Setup paging */
     if (core->shdw_pg_mode == SHADOW_PAGING) {
         PrintDebug("Creating initial shadow page table\n");
@@ -260,8 +266,6 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
     } else if ((core->shdw_pg_mode == NESTED_PAGING) && 
               (v3_cpu_types[core->cpu_id] == V3_VMX_EPT_CPU)) {
 
-       // initialize 1to1 pts
-
 #define CR0_PE 0x00000001
 #define CR0_PG 0x80000000
 #define CR0_WP 0x00010000 // To ensure mem hooks work
@@ -284,18 +288,14 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
        /* Enable EPT */
        vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
        vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
-       //      vmx_state->sec_proc_ctrls.unrstrct_guest = 1; // enable unrestricted guest operation
 
-       vmx_state->entry_ctrls.ld_efer = 1;
-       vmx_state->exit_ctrls.ld_efer = 1;
-       vmx_state->exit_ctrls.save_efer = 1;
+
 
        if (v3_init_ept(core, &hw_info) == -1) {
            PrintError("Error initializing EPT\n");
            return -1;
        }
 
-
     } else if ((core->shdw_pg_mode == NESTED_PAGING) && 
               (v3_cpu_types[core->cpu_id] == V3_VMX_EPT_UG_CPU)) {
        int i = 0;
@@ -369,9 +369,6 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
        vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
        vmx_state->sec_proc_ctrls.unrstrct_guest = 1; // enable unrestricted guest operation
 
-       vmx_state->entry_ctrls.ld_efer = 1;
-       vmx_state->exit_ctrls.ld_efer = 1;
-       vmx_state->exit_ctrls.save_efer = 1;
 
        /* Disable shadow paging stuff */
        vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
@@ -391,10 +388,69 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
     }
 
 
-    // Hook the VMX msrs
+    // hook vmx msrs
 
     // Setup SYSCALL/SYSENTER MSRs in load/store area
+    
+    // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
+    {
+#define IA32_STAR 0xc0000081
+#define IA32_LSTAR 0xc0000082
+#define IA32_FMASK 0xc0000084
+#define IA32_KERN_GS_BASE 0xc0000102
 
+#define IA32_CSTAR 0xc0000083 // Compatibility mode STAR (ignored for now... hopefully its not that important...)
+
+       int msr_ret = 0;
+
+       struct vmcs_msr_entry * exit_store_msrs = NULL;
+       struct vmcs_msr_entry * exit_load_msrs = NULL;
+       struct vmcs_msr_entry * entry_load_msrs = NULL;;
+       int max_msrs = (hw_info.misc_info.max_msr_cache_size + 1) * 4;
+
+       V3_Print("Setting up MSR load/store areas (max_msr_count=%d)\n", max_msrs);
+
+       if (max_msrs < 4) {
+           PrintError("Max MSR cache size is too small (%d)\n", max_msrs);
+           return -1;
+       }
+
+       vmx_state->msr_area = V3_VAddr(V3_AllocPages(1));
+
+       if (vmx_state->msr_area == NULL) {
+           PrintError("could not allocate msr load/store area\n");
+           return -1;
+       }
+
+       msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_CNT, 4);
+       msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_CNT, 4);
+       msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_CNT, 4);
+       
+       
+       exit_store_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area);
+       exit_load_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area + (sizeof(struct vmcs_msr_entry) * 4));
+       entry_load_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area + (sizeof(struct vmcs_msr_entry) * 8));
+
+
+       exit_store_msrs[0].index = IA32_STAR;
+       exit_store_msrs[1].index = IA32_LSTAR;
+       exit_store_msrs[2].index = IA32_FMASK;
+       exit_store_msrs[3].index = IA32_KERN_GS_BASE;
+       
+       memcpy(exit_store_msrs, exit_load_msrs, sizeof(struct vmcs_msr_entry) * 4);
+       memcpy(exit_store_msrs, entry_load_msrs, sizeof(struct vmcs_msr_entry) * 4);
+
+       
+       v3_get_msr(IA32_STAR, &(exit_load_msrs[0].hi), &(exit_load_msrs[0].lo));
+       v3_get_msr(IA32_LSTAR, &(exit_load_msrs[1].hi), &(exit_load_msrs[1].lo));
+       v3_get_msr(IA32_FMASK, &(exit_load_msrs[2].hi), &(exit_load_msrs[2].lo));
+       v3_get_msr(IA32_KERN_GS_BASE, &(exit_load_msrs[3].hi), &(exit_load_msrs[3].lo));
+
+       msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_ADDR, (addr_t)V3_PAddr(exit_store_msrs));
+       msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_ADDR, (addr_t)V3_PAddr(exit_load_msrs));
+       msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_ADDR, (addr_t)V3_PAddr(entry_load_msrs));
+
+    }    
 
     /* Sanity check ctrl/reg fields against hw_defaults */
 
@@ -473,7 +529,10 @@ int v3_init_vmx_vmcs(struct guest_info * core, v3_vm_class_t vm_class) {
 
     if (vm_class == V3_PC_VM) {
        PrintDebug("Initializing VMCS\n");
-       init_vmcs_bios(core, vmx_state);
+       if (init_vmcs_bios(core, vmx_state) == -1) {
+           PrintError("Error initializing VMCS to BIOS state\n");
+           return -1;
+       }
     } else {
        PrintError("Invalid VM Class\n");
        return -1;
@@ -487,6 +546,7 @@ int v3_deinit_vmx_vmcs(struct guest_info * core) {
     struct vmx_data * vmx_state = core->vmm_data;
 
     V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1);
+    V3_FreePages(vmx_state->msr_area, 1);
 
     V3_Free(vmx_state);
 
@@ -501,7 +561,7 @@ static int update_irq_exit_state(struct guest_info * info) {
 
     if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
 #ifdef CONFIG_DEBUG_INTERRUPTS
-        PrintDebug("Calling v3_injecting_intr\n");
+        V3_Print("Calling v3_injecting_intr\n");
 #endif
         info->intr_core_state.irq_started = 0;
         v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
@@ -533,14 +593,14 @@ static int update_irq_entry_state(struct guest_info * info) {
             int_info.error_code = 1;
 
 #ifdef CONFIG_DEBUG_INTERRUPTS
-            PrintDebug("Injecting exception %d with error code %x\n", 
+            V3_Print("Injecting exception %d with error code %x\n", 
                     int_info.vector, info->excp_state.excp_error_code);
 #endif
         }
 
         int_info.valid = 1;
 #ifdef CONFIG_DEBUG_INTERRUPTS
-        PrintDebug("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
+        V3_Print("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
 #endif
         check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
 
@@ -552,7 +612,7 @@ static int update_irq_entry_state(struct guest_info * info) {
         if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
 
 #ifdef CONFIG_DEBUG_INTERRUPTS
-            PrintDebug("IRQ pending from previous injection\n");
+            V3_Print("IRQ pending from previous injection\n");
 #endif
 
             // Copy the IDT vectoring info over to reinject the old interrupt
@@ -579,7 +639,7 @@ static int update_irq_entry_state(struct guest_info * info) {
                     ent_int.valid = 1;
 
 #ifdef CONFIG_DEBUG_INTERRUPTS
-                    PrintDebug("Injecting Interrupt %d at exit %u(EIP=%p)\n", 
+                    V3_Print("Injecting Interrupt %d at exit %u(EIP=%p)\n", 
                               info->intr_core_state.irq_vector, 
                               (uint32_t)info->num_exits, 
                               (void *)(addr_t)info->rip);
@@ -624,7 +684,7 @@ static int update_irq_entry_state(struct guest_info * info) {
         check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
 
 #ifdef CONFIG_DEBUG_INTERRUPTS
-        PrintDebug("Enabling Interrupt-Window exiting: %d\n", instr_len);
+        V3_Print("Enabling Interrupt-Window exiting: %d\n", instr_len);
 #endif
 
         vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
@@ -723,6 +783,12 @@ int v3_vmx_enter(struct guest_info * info) {
     check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
     check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
 
+    if (v3_update_vmcs_host_state(info)) {
+       v3_enable_ints();
+        PrintError("Could not write host state\n");
+        return -1;
+    }
+
 
     if (vmx_info->state == VMX_UNLAUNCHED) {
        vmx_info->state = VMX_LAUNCHED;
@@ -739,8 +805,10 @@ int v3_vmx_enter(struct guest_info * info) {
        uint32_t error = 0;
 
         vmcs_read(VMCS_INSTR_ERR, &error);
-        PrintError("VMENTRY Error: %d\n", error);
 
+       v3_enable_ints();
+
+        PrintError("VMENTRY Error: %d\n", error);
        return -1;
     }
 
@@ -790,7 +858,7 @@ int v3_vmx_enter(struct guest_info * info) {
         vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
 
 #ifdef CONFIG_DEBUG_INTERRUPTS
-        PrintDebug("Interrupts available again! (RIP=%llx)\n", info->rip);
+       V3_Print("Interrupts available again! (RIP=%llx)\n", info->rip);
 #endif
     }