Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Merge branch 'devel' of ssh://newskysaw.cs.northwestern.edu/home/palacios/palacios...
Patrick G. Bridges [Thu, 9 Feb 2012 20:24:52 +0000 (13:24 -0700)]
1  2 
palacios/src/palacios/vmm.c
palacios/src/palacios/vmx.c

@@@ -39,6 -39,8 +39,8 @@@
  
  
  v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
+ v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
  struct v3_os_hooks * os_hooks = NULL;
  int v3_dbg_enable = 0;
  
@@@ -105,6 -107,9 +107,9 @@@ void Init_V3(struct v3_os_hooks * hooks
      // Set global variables. 
      os_hooks = hooks;
  
+     // Determine the global machine type
+     v3_mach_type = V3_INVALID_CPU;
      for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
        v3_cpu_types[i] = V3_INVALID_CPU;
      }
  
  
      if ((hooks) && (hooks->call_on_cpu)) {
        for (i = 0; i < num_cpus; i++) {
  
            V3_Print("Initializing VMM extensions on cpu %d\n", i);
            hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
+           if (v3_mach_type == V3_INVALID_CPU) {
+               v3_mach_type = v3_cpu_types[i];
+           }
        }
      }
  
@@@ -210,7 -219,7 +219,7 @@@ static int start_core(void * p
      PrintDebug("virtual core %u (on logical core %u): in start_core (RIP=%p)\n", 
               core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
  
-     switch (v3_cpu_types[0]) {
+     switch (v3_mach_type) {
  #ifdef V3_CONFIG_SVM
        case V3_SVM_CPU:
        case V3_SVM_REV3_CPU:
@@@ -441,7 -450,7 +450,7 @@@ int v3_stop_vm(struct v3_vm_info * vm) 
  
      vm->run_state = VM_STOPPED;
  
 -    // force exit all cores via a cross call/IPI
 +    // XXX force exit all cores via a cross call/IPI XXX
  
      while (1) {
        int i = 0;
@@@ -628,7 -637,7 +637,7 @@@ void v3_interrupt_cpu(struct v3_vm_inf
  
  
  int v3_vm_enter(struct guest_info * info) {
-     switch (v3_cpu_types[0]) {
+     switch (v3_mach_type) {
  #ifdef V3_CONFIG_SVM
        case V3_SVM_CPU:
        case V3_SVM_REV3_CPU:
@@@ -68,6 -68,9 +68,9 @@@ static int inline check_vmcs_write(vmcs
          return 1;
      }
  
+     
      return 0;
  }
  
@@@ -100,7 -103,39 +103,39 @@@ static addr_t allocate_vmcs() 
      return (addr_t)V3_PAddr((void *)vmcs_page);
  }
  
+ /*
  
+ static int debug_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * src, void * priv_data) {
+     struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
+     V3_Print("\n\nEFER READ\n");
+     
+     v3_print_guest_state(core);
+     src->value = efer->value;
+     return 0;
+ }
+ static int debug_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
+     struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
+     V3_Print("\n\nEFER WRITE\n");
+     
+     v3_print_guest_state(core);
+     efer->value = src.value;
+     {
+       struct vmx_data * vmx_state = core->vmm_data;
+       V3_Print("Trapping page faults and GPFs\n");
+       vmx_state->excp_bmap.pf = 1;
+       vmx_state->excp_bmap.gp = 1;
+       
+        check_vmcs_write(VMCS_EXCP_BITMAP, vmx_state->excp_bmap.value);
+     }
+     return 0;
+ }
+ */
  
  
  static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) {
      vmx_state->exit_ctrls.host_64_on = 1;
  #endif
  
-     // Hook all accesses to EFER register
-     v3_hook_msr(core->vm_info, EFER_MSR, 
-               &v3_handle_efer_read,
-               &v3_handle_efer_write, 
-               core);
  
      // Restore host's EFER register on each VM EXIT
      vmx_state->exit_ctrls.ld_efer = 1;
      vmx_state->exit_ctrls.save_efer = 1;
      vmx_state->entry_ctrls.ld_efer  = 1;
  
-     // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
-     vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
+     vmx_state->exit_ctrls.save_pat = 1;
+     vmx_state->exit_ctrls.ld_pat = 1;
+     vmx_state->entry_ctrls.ld_pat = 1;
  
+     /* Temporary GPF trap */
+     vmx_state->excp_bmap.gp = 1;
  
      // Setup Guests initial PAT field
      vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
  #define CR0_WP 0x00010000 // To ensure mem hooks work
          vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
  
+       // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
+       vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
          core->ctrl_regs.cr3 = core->direct_map_pt;
  
          // vmx_state->pinbased_ctrls |= NMI_EXIT;
        // Setup VMX Assist
        v3_vmxassist_init(core, vmx_state);
  
+       // Hook all accesses to EFER register
+       v3_hook_msr(core->vm_info, EFER_MSR, 
+                   &v3_handle_efer_read,
+                   &v3_handle_efer_write, 
+                   core);
      } else if ((core->shdw_pg_mode == NESTED_PAGING) && 
               (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_CPU)) {
  
  
          // vmx_state->pinbased_ctrls |= NMI_EXIT;
  
+       // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
+       vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
+       
          /* Disable CR exits */
        vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
        vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
            return -1;
        }
  
+       // Hook all accesses to EFER register
+       v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
      } else if ((core->shdw_pg_mode == NESTED_PAGING) && 
               (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
        int i = 0;
        vmx_state->pri_proc_ctrls.invlpg_exit = 0;
  
  
+       // Cause VM_EXIT whenever the CR4.VMXE bit is set
+       vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
        if (v3_init_ept(core, &hw_info) == -1) {
            PrintError("Error initializing EPT\n");
            return -1;
        }
  
+       // Hook all accesses to EFER register
+       //v3_hook_msr(core->vm_info, EFER_MSR, &debug_efer_read, &debug_efer_write, core);
+       v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
      } else {
        PrintError("Invalid Virtual paging mode\n");
        return -1;
        msr_ret |= v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
        msr_ret |= v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
  
+       msr_ret |= v3_hook_msr(core->vm_info, IA32_PAT_MSR, NULL, NULL, NULL);
  
        // Not sure what to do about this... Does not appear to be an explicit hardware cache version...
        msr_ret |= v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
@@@ -730,6 -788,9 +788,9 @@@ static int update_irq_entry_state(struc
  
  
  static struct vmx_exit_info exit_log[10];
+ static uint64_t rip_log[10];
  
  static void print_exit_log(struct guest_info * info) {
      int cnt = info->num_exits % 10;
        V3_Print("\tint_info = %p\n", (void *)(addr_t)tmp->int_info);
        V3_Print("\tint_err = %p\n", (void *)(addr_t)tmp->int_err);
        V3_Print("\tinstr_info = %p\n", (void *)(addr_t)tmp->instr_info);
+       V3_Print("\tguest_linear_addr= %p\n", (void *)(addr_t)tmp->guest_linear_addr);
+       V3_Print("\tRIP = %p\n", (void *)rip_log[cnt]);
  
        cnt--;
  
@@@ -805,7 -869,6 +869,7 @@@ v3_vmx_schedule_timeout(struct guest_in
   */
  int v3_vmx_enter(struct guest_info * info) {
      int ret = 0;
 +    sint64_t tsc_offset;
      uint32_t tsc_offset_low, tsc_offset_high;
      struct vmx_exit_info exit_info;
      struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
      // Perform last-minute time bookkeeping prior to entering the VM
      v3_time_enter_vm(info);
  
 -    tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
 -    tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
 +    tsc_offset = v3_tsc_host_offset(&info->time_state);
 +    tsc_offset_high = (uint32_t)(( tsc_offset >> 32) & 0xffffffff);
 +    tsc_offset_low = (uint32_t)(tsc_offset & 0xffffffff);
 +
      check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
      check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
  
      //PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_info.exit_reason, exit_info.exit_qual);
  
      exit_log[info->num_exits % 10] = exit_info;
+     rip_log[info->num_exits % 10] = get_addr_linear(info, info->rip, &(info->segments.cs));
  
  #ifdef V3_CONFIG_SYMCALL
      if (info->sym_core_state.symcall_state.sym_call_active == 0) {
@@@ -1114,8 -1176,9 +1179,9 @@@ int v3_reset_vmx_vm_core(struct guest_i
  
  void v3_init_vmx_cpu(int cpu_id) {
      addr_t vmx_on_region = 0;
+     extern v3_cpu_arch_t v3_mach_type;
  
-     if (cpu_id == 0) {
+     if (v3_mach_type == V3_INVALID_CPU) {
        if (v3_init_vmx_hw(&hw_info) == -1) {
            PrintError("Could not initialize VMX hardware features on cpu %d\n", cpu_id);
            return;
            v3_cpu_types[cpu_id] = V3_VMX_EPT_UG_CPU;
        }
      }
+     
  }