Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Basic HRT startup for HVM, plus assorted cleanup
[palacios.git] / palacios / src / palacios / vmx.c
index 3a64842..f0823b4 100644 (file)
 #include <palacios/vmx_assist.h>
 #include <palacios/vmx_hw_info.h>
 
+#ifdef V3_CONFIG_MEM_TRACK
+#include <palacios/vmm_mem_track.h>
+#endif 
+
 #ifndef V3_CONFIG_DEBUG_VMX
 #undef PrintDebug
 #define PrintDebug(fmt, args...)
@@ -97,7 +101,7 @@ static addr_t allocate_vmcs() {
 
     PrintDebug(VM_NONE, VCORE_NONE, "Allocating page\n");
 
-    temp = V3_AllocPages(1);
+    temp = V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
     if (!temp) { 
        PrintError(VM_NONE, VCORE_NONE, "Cannot allocate VMCS\n");
        return -1;
@@ -270,16 +274,19 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
 
 
        // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
-       vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
+       vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE );
 
-        core->ctrl_regs.cr3 = core->direct_map_pt;
+       v3_activate_passthrough_pt(core);
 
         // vmx_state->pinbased_ctrls |= NMI_EXIT;
 
         /* Add CR exits */
         vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
         vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
-       
+
+       // Note that we intercept cr4.pae writes
+       // and we have cr4 read-shadowed to the shadow pager's cr4
+
        vmx_state->pri_proc_ctrls.invlpg_exit = 1;
        
        /* Add page fault exits */
@@ -326,7 +333,7 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
 
 
 
-       if (v3_init_ept(core, &hw_info) == -1) {
+       if (v3_init_nested_paging_core(core, &hw_info) == -1) {
            PrintError(core->vm_info, core, "Error initializing EPT\n");
            return -1;
        }
@@ -424,7 +431,7 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
        ((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->ne = 1;
        ((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->cd = 0;
 
-       if (v3_init_ept(core, &hw_info) == -1) {
+       if (v3_init_nested_paging_core(core, &hw_info) == -1) {
            PrintError(core->vm_info, core, "Error initializing EPT\n");
            return -1;
        }
@@ -456,7 +463,7 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state)
            return -1;
        }
 
-       vmx_state->msr_area_paddr = (addr_t)V3_AllocPages(1);
+       vmx_state->msr_area_paddr = (addr_t)V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
        
        if (vmx_state->msr_area_paddr == (addr_t)NULL) {
            PrintError(core->vm_info, core, "could not allocate msr load/store area\n");
@@ -687,7 +694,7 @@ int v3_vmx_load_core(struct guest_info * core, void * ctx){
   struct cr0_32 * shadow_cr0;
   addr_t vmcs_page_paddr;  //HPA
   
-  vmcs_page_paddr = (addr_t) V3_AllocPages(1);
+  vmcs_page_paddr = (addr_t) V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
   
   if (!vmcs_page_paddr) { 
     PrintError(core->vm_info, core, "Could not allocate space for a vmcs in VMX\n");
@@ -975,6 +982,10 @@ int v3_vmx_enter(struct guest_info * info) {
     // Conditionally yield the CPU if the timeslice has expired
     v3_schedule(info);
 
+#ifdef V3_CONFIG_MEM_TRACK
+    v3_mem_track_entry(info);
+#endif 
+
     // Update timer devices late after being in the VM so that as much 
     // of the time in the VM is accounted for as possible. Also do it before
     // updating IRQ entry state so that any interrupts the timers raise get 
@@ -1028,7 +1039,8 @@ int v3_vmx_enter(struct guest_info * info) {
        
        check_vmcs_write(VMCS_PREEMPT_TIMER, preempt_window);
     }
-   
+
+    V3_FP_ENTRY_RESTORE(info);
 
     {  
        uint64_t entry_tsc = 0;
@@ -1081,6 +1093,8 @@ int v3_vmx_enter(struct guest_info * info) {
 
     info->num_exits++;
 
+    V3_FP_EXIT_SAVE(info);
+
     /* If we have the preemption time, then use it to get more accurate guest time */
     if (vmx_info->pin_ctrls.active_preempt_timer) {
        uint32_t cycles_left = 0;
@@ -1172,6 +1186,10 @@ int v3_vmx_enter(struct guest_info * info) {
        v3_handle_timeouts(info, guest_cycles);
     }
 
+#ifdef V3_CONFIG_MEM_TRACK
+    v3_mem_track_exit(info);
+#endif 
+
     return 0;
 }
 
@@ -1180,6 +1198,13 @@ int v3_start_vmx_guest(struct guest_info * info) {
 
     PrintDebug(info->vm_info, info, "Starting VMX core %u\n", info->vcpu_id);
 
+#if V3_CONFIG_HVM
+    if (v3_setup_hvm_vm_for_boot(vm)) { 
+       PrintError(vm, VCORE_NONE, "HVM setup for boot failed\n");
+       return -1;
+    }
+#endif
+    
     while (1) {
        if (info->core_run_state == CORE_STOPPED) {
            if (info->vcpu_id == 0) {
@@ -1187,6 +1212,8 @@ int v3_start_vmx_guest(struct guest_info * info) {
            } else {
                
                PrintDebug(info->vm_info, info, "VMX core %u: Waiting for core initialization\n", info->vcpu_id);
+
+                V3_NO_WORK(info);
                
                while (info->core_run_state == CORE_STOPPED) {
                    
@@ -1194,11 +1221,13 @@ int v3_start_vmx_guest(struct guest_info * info) {
                        // The VM was stopped before this core was initialized. 
                        return 0;
                    }
-                   
-                   v3_yield(info,-1);
+
+                   V3_STILL_NO_WORK(info);
                    //PrintDebug(info->vm_info, info, "VMX core %u: still waiting for INIT\n",info->vcpu_id);
                }
-               
+
+               V3_HAVE_WORK_AGAIN(info);
+
                PrintDebug(info->vm_info, info, "VMX core %u initialized\n", info->vcpu_id);
                
                // We'll be paranoid about race conditions here