Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


bug fixes for VMX
Jack Lange [Wed, 8 Feb 2012 19:47:22 +0000 (14:47 -0500)]
linux_module/iface-file.c
palacios/include/palacios/vmm_msr.h
palacios/src/palacios/vm_guest.c
palacios/src/palacios/vmcs.c
palacios/src/palacios/vmm_ctrl_regs.c
palacios/src/palacios/vmx.c

index 39f909b..be31f16 100644 (file)
@@ -344,6 +344,7 @@ static int guest_file_init(struct v3_guest * guest, void ** vm_data) {
 
 static int guest_file_deinit(struct v3_guest * guest, void * vm_data) {
     
+    kfree(vm_data);
     return 0;
 }
 
index f8cbd4e..1869e33 100644 (file)
@@ -26,6 +26,7 @@
 #include <palacios/vmm_types.h>
 #include <palacios/vmm_list.h>
 
+#define IA32_PAT_MSR    0x277
 #define SYSENTER_CS_MSR 0x00000174
 #define SYSENTER_ESP_MSR 0x00000175
 #define SYSENTER_EIP_MSR 0x00000176
index 46b1369..df30d1d 100644 (file)
@@ -38,14 +38,14 @@ v3_cpu_mode_t v3_get_vm_cpu_mode(struct guest_info * info) {
     struct efer_64 * efer;
     struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
     struct v3_segment * cs = &(info->segments.cs);
-    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+
 
     if (info->shdw_pg_mode == SHADOW_PAGING) {
        cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
        efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
     } else if (info->shdw_pg_mode == NESTED_PAGING) {
        cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
-       efer = (struct efer_64 *)&(guest_state->efer);
+       efer = (struct efer_64 *)&(info->ctrl_regs.efer);
     } else {
        PrintError("Invalid Paging Mode...\n");
        V3_ASSERT(0);
@@ -72,14 +72,14 @@ uint_t v3_get_addr_width(struct guest_info * info) {
     struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
     struct efer_64 * efer;
     struct v3_segment * cs = &(info->segments.cs);
-    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+
 
     if (info->shdw_pg_mode == SHADOW_PAGING) {
        cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
        efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
     } else if (info->shdw_pg_mode == NESTED_PAGING) {
        cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
-       efer = (struct efer_64 *)&(guest_state->efer);
+       efer = (struct efer_64 *)&(info->ctrl_regs.efer);
     } else {
        PrintError("Invalid Paging Mode...\n");
        V3_ASSERT(0);
@@ -243,18 +243,17 @@ void v3_print_ctrl_regs(struct guest_info * info) {
     struct v3_ctrl_regs * regs = &(info->ctrl_regs);
     int i = 0;
     v3_reg_t * reg_ptr;
-    char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", NULL};
-    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(info->vmm_data);
+    char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", "EFER", NULL};
+   
 
     reg_ptr = (v3_reg_t *)regs;
 
-    V3_Print("32 bit Ctrl Regs:\n");
+    V3_Print("Ctrl Regs:\n");
 
     for (i = 0; reg_names[i] != NULL; i++) {
        V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));  
     }
 
-    V3_Print("\tEFER=0x%p\n", (void*)(addr_t)(guest_state->efer));
 
 }
 
index 8fb4080..e28029e 100644 (file)
@@ -234,6 +234,7 @@ int v3_vmx_save_vmcs(struct guest_info * info) {
 
 #ifdef __V3_64BIT__
     check_vmcs_read(VMCS_GUEST_EFER, &(info->ctrl_regs.efer));
+    check_vmcs_read(VMCS_ENTRY_CTRLS, &(vmx_info->entry_ctrls.value));
 #endif
     
     error =  v3_read_vmcs_segments(&(info->segments));
@@ -263,9 +264,6 @@ int v3_vmx_restore_vmcs(struct guest_info * info) {
     check_vmcs_write(VMCS_ENTRY_CTRLS, vmx_info->entry_ctrls.value);
 #endif
 
-
-
-
     error = v3_write_vmcs_segments(&(info->segments));
 
     return error;
@@ -487,6 +485,9 @@ int v3_update_vmcs_host_state(struct guest_info * info) {
 
     // PAT
 
+    v3_get_msr(IA32_PAT_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_PAT, tmp_msr.value);  
+
 
     // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
     {
index 3616ae8..7bc0f0b 100644 (file)
@@ -430,7 +430,7 @@ int v3_handle_cr3_read(struct guest_info * info) {
 
 // We don't need to virtualize CR4, all we need is to detect the activation of PAE
 int v3_handle_cr4_read(struct guest_info * info) {
-    //  PrintError("CR4 Read not handled\n");
+    PrintError("CR4 Read not handled\n");
     // Do nothing...
     return 0;
 }
@@ -460,6 +460,7 @@ int v3_handle_cr4_write(struct guest_info * info) {
     
     // Check to see if we need to flush the tlb
     
+
     if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) { 
        struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand);
        struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
@@ -533,16 +534,16 @@ int v3_handle_cr4_write(struct guest_info * info) {
        return -1;
     }
     
-    
-    if (flush_tlb) {
-       PrintDebug("Handling PSE/PGE/PAE -> TLBFlush (doing flush now!)\n");
-       if (v3_activate_shadow_pt(info) == -1) {
-           PrintError("Failed to activate shadow page tables when emulating TLB flush in handling cr4 write\n");
-           return -1;
+    if (info->shdw_pg_mode == SHADOW_PAGING) {
+       if (flush_tlb) {
+           PrintDebug("Handling PSE/PGE/PAE -> TLBFlush (doing flush now!)\n");
+           if (v3_activate_shadow_pt(info) == -1) {
+               PrintError("Failed to activate shadow page tables when emulating TLB flush in handling cr4 write\n");
+               return -1;
+           }
        }
     }
     
-    
     info->rip += dec_instr.instr_length;
     return 0;
 }
@@ -588,6 +589,11 @@ int v3_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src
        hw_efer->lma = 1;
     }
 
+
+    PrintDebug("RIP=%p\n", (void *)core->rip);
+    PrintDebug("New EFER value HW(hi=%p), VM(hi=%p)\n", (void *)*(uint64_t *)hw_efer, (void *)vm_efer->value); 
+
+
     return 0;
 }
 
index d6dfafc..fd618e7 100644 (file)
@@ -68,6 +68,9 @@ static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
         return 1;
     }
 
+
+    
+
     return 0;
 }
 
@@ -100,7 +103,39 @@ static addr_t allocate_vmcs() {
     return (addr_t)V3_PAddr((void *)vmcs_page);
 }
 
+/*
 
+static int debug_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * src, void * priv_data) {
+    struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
+    V3_Print("\n\nEFER READ\n");
+    
+    v3_print_guest_state(core);
+
+    src->value = efer->value;
+    return 0;
+}
+
+static int debug_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
+    struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
+    V3_Print("\n\nEFER WRITE\n");
+    
+    v3_print_guest_state(core);
+
+    efer->value = src.value;
+
+    {
+       struct vmx_data * vmx_state = core->vmm_data;
+
+       V3_Print("Trapping page faults and GPFs\n");
+       vmx_state->excp_bmap.pf = 1;
+       vmx_state->excp_bmap.gp = 1;
+       
+        check_vmcs_write(VMCS_EXCP_BITMAP, vmx_state->excp_bmap.value);
+    }
+
+    return 0;
+}
+*/
 
 
 static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) {
@@ -171,11 +206,7 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
     vmx_state->exit_ctrls.host_64_on = 1;
 #endif
 
-    // Hook all accesses to EFER register
-    v3_hook_msr(core->vm_info, EFER_MSR, 
-               &v3_handle_efer_read,
-               &v3_handle_efer_write, 
-               core);
+
 
     // Restore host's EFER register on each VM EXIT
     vmx_state->exit_ctrls.ld_efer = 1;
@@ -184,9 +215,12 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
     vmx_state->exit_ctrls.save_efer = 1;
     vmx_state->entry_ctrls.ld_efer  = 1;
 
-    // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
-    vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
+    vmx_state->exit_ctrls.save_pat = 1;
+    vmx_state->exit_ctrls.ld_pat = 1;
+    vmx_state->entry_ctrls.ld_pat = 1;
 
+    /* Temporary GPF trap */
+    vmx_state->excp_bmap.gp = 1;
 
     // Setup Guests initial PAT field
     vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
@@ -205,6 +239,10 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
 #define CR0_WP 0x00010000 // To ensure mem hooks work
         vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
 
+
+       // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
+       vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
+
         core->ctrl_regs.cr3 = core->direct_map_pt;
 
         // vmx_state->pinbased_ctrls |= NMI_EXIT;
@@ -221,6 +259,12 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
        // Setup VMX Assist
        v3_vmxassist_init(core, vmx_state);
 
+       // Hook all accesses to EFER register
+       v3_hook_msr(core->vm_info, EFER_MSR, 
+                   &v3_handle_efer_read,
+                   &v3_handle_efer_write, 
+                   core);
+
     } else if ((core->shdw_pg_mode == NESTED_PAGING) && 
               (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_CPU)) {
 
@@ -231,6 +275,9 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
 
         // vmx_state->pinbased_ctrls |= NMI_EXIT;
 
+       // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
+       vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
+       
         /* Disable CR exits */
        vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
        vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
@@ -254,6 +301,9 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
            return -1;
        }
 
+       // Hook all accesses to EFER register
+       v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
+
     } else if ((core->shdw_pg_mode == NESTED_PAGING) && 
               (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
        int i = 0;
@@ -336,11 +386,18 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
        vmx_state->pri_proc_ctrls.invlpg_exit = 0;
 
 
+       // Cause VM_EXIT whenever the CR4.VMXE bit is set
+       vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
+
+
        if (v3_init_ept(core, &hw_info) == -1) {
            PrintError("Error initializing EPT\n");
            return -1;
        }
 
+       // Hook all accesses to EFER register
+       //v3_hook_msr(core->vm_info, EFER_MSR, &debug_efer_read, &debug_efer_write, core);
+       v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
     } else {
        PrintError("Invalid Virtual paging mode\n");
        return -1;
@@ -410,6 +467,7 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
        msr_ret |= v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
        msr_ret |= v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
 
+       msr_ret |= v3_hook_msr(core->vm_info, IA32_PAT_MSR, NULL, NULL, NULL);
 
        // Not sure what to do about this... Does not appear to be an explicit hardware cache version...
        msr_ret |= v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);