v3_cpu_mode_t v3_get_host_cpu_mode();
-void v3_yield(struct guest_info * info);
-void v3_yield_cond(struct guest_info * info);
-void v3_yield_timed(struct guest_info * info, unsigned int usec);
+void v3_yield(struct guest_info * info, int usec);
+void v3_yield_cond(struct guest_info * info, int usec);
void v3_print_cond(const char * fmt, ...);
void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector);
uint64_t guest_cycles = 0;
// Conditionally yield the CPU if the timeslice has expired
- v3_yield_cond(info);
+ v3_yield_cond(info,-1);
// Update timer devices after being in the VM before doing
// IRQ updates, so that any interrupts they raise get seen
v3_stgi();
// Conditionally yield the CPU if the timeslice has expired
- v3_yield_cond(info);
+ v3_yield_cond(info,-1);
// This update timers is for time-dependent handlers
// if we're slaved to host time
return 0;
}
- v3_yield(info);
+ v3_yield(info,-1);
//PrintDebug("SVM core %u: still waiting for INIT\n", info->vcpu_id);
}
break;
}
- v3_yield(NULL);
+ v3_yield(NULL,-1);
}
V3_Print("VM stopped. Returning\n");
V3_Print("Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
- v3_yield(NULL);
+ v3_yield(NULL,-1);
}
return 0;
break;
}
- v3_yield(NULL);
+ v3_yield(NULL,-1);
}
-void v3_yield_cond(struct guest_info * info) {
+void v3_yield_cond(struct guest_info * info, int usec) {
uint64_t cur_cycle;
cur_cycle = v3_get_host_time(&info->time_state);
// (void *)cur_cycle, (void *)info->yield_start_cycle,
// (void *)info->yield_cycle_period);
- V3_Yield();
+ if (usec < 0) {
+ V3_Yield();
+ } else {
+ V3_Yield_Timed(usec);
+ }
+
info->yield_start_cycle += info->vm_info->yield_cycle_period;
}
}
* unconditional cpu yield
* if the yielding thread is a guest context, the guest quantum is reset on resumption
* Non guest context threads should call this function with a NULL argument
- */
-void v3_yield(struct guest_info * info) {
- V3_Yield();
+ *
+ * usec <0 => the non-timed yield is used
+ * usec >=0 => the timed yield is used, which also usually implies interruptible
+ */
+void v3_yield(struct guest_info * info, int usec) {
+ if (usec < 0) {
+ V3_Yield();
+ } else {
+ V3_Yield_Timed(usec);
+ }
if (info) {
info->yield_start_cycle += info->vm_info->yield_cycle_period;
}
-/*
- * unconditional cpu yield for a period of time
- * Otherwise identical to v3_yield
- */
-void v3_yield_timed(struct guest_info *info, unsigned int usec)
-{
- V3_Yield_Timed(usec);
-
- if (info) {
- info->yield_start_cycle += info->vm_info->yield_cycle_period;
- }
-}
void v3_print_cond(const char * fmt, ...) {
break;
}
- v3_yield(local_core);
+ v3_yield(local_core,-1);
}
return 0;
// wait for cpu bit to clear
while (v3_bitmap_check(&(barrier->cpu_map), core->vcpu_id)) {
- v3_yield(core);
+ v3_yield(core,-1);
}
return 0;
uint64_t t, cycles;
/* Yield, allowing time to pass while yielded */
t = v3_get_host_time(&info->time_state);
- v3_yield_timed(info,YIELD_TIME_USEC);
+ v3_yield(info,YIELD_TIME_USEC);
cycles = v3_get_host_time(&info->time_state) - t;
v3_advance_time(info, &cycles);
info->time_state.guest_cycles = 0;
PrintDebug("Starting time for core %d at host time %llu/guest time %llu.\n",
info->vcpu_id, t, info->time_state.guest_cycles);
- v3_yield(info);
+ v3_yield(info,-1);
return 0;
}
uint64_t guest_cycles = 0;
// Conditionally yield the CPU if the timeslice has expired
- v3_yield_cond(info);
+ v3_yield_cond(info,-1);
// Update timer devices late after being in the VM so that as much
// of the time in the VM is accounted for as possible. Also do it before
v3_enable_ints();
// Conditionally yield the CPU if the timeslice has expired
- v3_yield_cond(info);
+ v3_yield_cond(info,-1);
v3_advance_time(info, NULL);
v3_update_timers(info);
return 0;
}
- v3_yield(info);
+ v3_yield(info,-1);
//PrintDebug("VMX core %u: still waiting for INIT\n",info->vcpu_id);
}