Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Working on debugging time management/apic timer code.
[palacios.git] / palacios / src / palacios / vmm_time.c
index fc2e37e..27bb2fc 100644 (file)
@@ -104,66 +104,109 @@ int v3_offset_time( struct guest_info * info, sint64_t offset )
     return 0;
 }
 
-// Control guest time in relation to host time so that the two stay 
-// appropriately synchronized to the extent possible. 
-int v3_adjust_time(struct guest_info * info) {
+#ifdef V3_CONFIG_TIME_DILATION
+static uint64_t compute_target_host_time(struct guest_info * info, uint64_t guest_time)
+{
     struct vm_time * time_state = &(info->time_state);
-    uint64_t host_time, target_host_time;
-    uint64_t guest_time, target_guest_time, old_guest_time;
-    uint64_t guest_elapsed, host_elapsed, desired_elapsed;
-
-    /* Compute the target host time given how much time has *already*
-     * passed in the guest */
-    guest_time = v3_get_guest_time(time_state);
+    uint64_t guest_elapsed, desired_elapsed;
+    
     guest_elapsed = (guest_time - time_state->initial_time);
     desired_elapsed = (guest_elapsed * time_state->host_cpu_freq) / time_state->guest_cpu_freq;
-    target_host_time = time_state->initial_time + desired_elapsed;
+    return time_state->initial_time + desired_elapsed;
+}
+
+static uint64_t compute_target_guest_time(struct guest_info *info)
+{
+    struct vm_time * time_state = &(info->time_state);
+    uint64_t host_elapsed, desired_elapsed;
+
+    host_elapsed = v3_get_host_time(time_state) - time_state->initial_time;
+    desired_elapsed = (host_elapsed * time_state->guest_cpu_freq) / time_state->host_cpu_freq;
+
+    return time_state->initial_time + desired_elapsed;
+
+} 
+
+/* Yield time in the host to deal with a guest that wants to run slower than 
+ * the native host cycle frequency */
+static int yield_host_time(struct guest_info * info) {
+    struct vm_time * time_state = &(info->time_state);
+    uint64_t host_time, target_host_time;
+    uint64_t guest_time, old_guest_time;
 
     /* Now, let the host run while the guest is stopped to make the two
-     * sync up. */
+     * sync up. Note that this doesn't assume that guest time is stopped;
+     * the offsetting in the next step will change add an offset to guest
+     * time to account for the time paused even if the geust isn't 
+     * usually paused in the VMM. */
     host_time = v3_get_host_time(time_state);
-    old_guest_time = v3_get_guest_time(time_state);
+    old_guest_time = v3_compute_guest_time(time_state, host_time);
+    target_host_time = compute_target_host_time(info, old_guest_time);
 
     while (target_host_time > host_time) {
        v3_yield(info);
        host_time = v3_get_host_time(time_state);
     }
 
-    guest_time = v3_get_guest_time(time_state);
+    guest_time = v3_compute_guest_time(time_state, host_time);
 
-    // We do *not* assume the guest timer was paused in the VM. If it was
-    // this offseting is 0. If it wasn't we need this.
+    /* We do *not* assume the guest timer was paused in the VM. If it was
+     * this offseting is 0. If it wasn't, we need this. */
     v3_offset_time(info, (sint64_t)old_guest_time - (sint64_t)guest_time);
 
+    return 0;
+}
+
+static int skew_guest_time(struct guest_info * info) {
+    struct vm_time * time_state = &(info->time_state);
+    uint64_t target_guest_time, guest_time;
     /* Now the host may have gotten ahead of the guest because
      * yielding is a coarse grained thing. Figure out what guest time
      * we want to be at, and use the use the offsetting mechanism in 
      * the VMM to make the guest run forward. We limit *how* much we skew 
      * it forward to prevent the guest time making large jumps, 
      * however. */
-    host_elapsed = host_time - time_state->initial_time;
-    desired_elapsed = (host_elapsed * time_state->guest_cpu_freq) / time_state->host_cpu_freq;
-    target_guest_time = time_state->initial_time + desired_elapsed;
+    target_guest_time = compute_target_guest_time(info);
+    guest_time = v3_get_guest_time(time_state);
 
     if (guest_time < target_guest_time) {
-       uint64_t max_skew, desired_skew, skew;
+       sint64_t max_skew, desired_skew, skew;
 
        if (time_state->enter_time) {
-           max_skew = (time_state->exit_time - time_state->enter_time) / 10;
+           /* Limit forward skew to 10% of the amount the guest has
+            * run since we last could skew time */
+           max_skew = ((sint64_t)guest_time - (sint64_t)time_state->enter_time) / 10;
        } else {
            max_skew = 0;
        }
 
-       desired_skew = target_guest_time - guest_time;
+       desired_skew = (sint64_t)target_guest_time - (sint64_t)guest_time;
        skew = desired_skew > max_skew ? max_skew : desired_skew;
-/*     PrintDebug("Guest %llu cycles behind where it should be.\n",
+       PrintDebug("Guest %lld cycles behind where it should be.\n",
                   desired_skew);
-       PrintDebug("Limit on forward skew is %llu. Skewing forward %llu.\n",
-                  max_skew, skew); */
+       PrintDebug("Limit on forward skew is %lld. Skewing forward %lld.\n",
+                  max_skew, skew); 
        
        v3_offset_time(info, skew);
     }
-    
+
+    return 0;
+}
+#endif /* V3_CONFIG_TIME_DILATION */
+
+// Control guest time in relation to host time so that the two stay 
+// appropriately synchronized to the extent possible. 
+int v3_adjust_time(struct guest_info * info) {
+
+#ifdef V3_CONFIG_TIME_DILATION
+    /* First deal with yielding if we want to slow down the guest */
+    yield_host_time(info);
+
+    /* Now, if the guest is too slow, (either from excess yielding above,
+     * or because the VMM is doing something that takes a long time to emulate)
+     * allow guest time to jump forward a bit */
+    skew_guest_time(info);
+#endif
     return 0;
 }
 
@@ -185,15 +228,14 @@ v3_time_enter_vm( struct guest_info * info )
     struct vm_time * time_state = &(info->time_state);
     uint64_t guest_time, host_time;
 
-    guest_time = v3_get_guest_time(time_state);
     host_time = v3_get_host_time(time_state);
+    guest_time = v3_get_guest_time(time_state);
     time_state->enter_time = host_time;
-    time_state->guest_host_offset = guest_time - host_time;
-
-    // Because we just modified the offset - shouldn't matter as this should be 
-    // the last time-related call prior to entering the VMM, but worth it 
-    // just in case.
-    time_state->exit_time = host_time; 
+#ifdef V3_CONFIG_TIME_DILATION
+    time_state->guest_host_offset = (sint64_t)guest_time - (sint64_t)host_time;
+#else
+    time_state->guest_host_offset = 0;
+#endif
 
     return 0;
 }
@@ -232,7 +274,9 @@ void v3_update_timers(struct guest_info * info) {
 
     time_state->last_update = v3_get_guest_time(time_state);
     cycles = time_state->last_update - old_time;
+    V3_ASSERT(cycles >= 0);
 
+    //    V3_Print("Updating timers with %lld elapsed cycles.\n", cycles);
     list_for_each_entry(tmp_timer, &(time_state->timers), timer_link) {
        tmp_timer->ops->update_timer(info, cycles, time_state->guest_cpu_freq, tmp_timer->private_data);
     }