21 -- (IFACE) Connect Stream
22 -- (VMM) Stop Guest
+29 -- (VMM) Simulate guest
30 -- (EXT) Activate Inspector
257 -- (IFACE) VGA Console Framebuf Input
258 -- (IFACE) VGA Console Framebuf Query
-10245 -- (IFACE) Connect Host Device
\ No newline at end of file
+10245 -- (IFACE) Connect Host Device
#define V3_VM_STOP 26
#define V3_VM_LOAD 27
#define V3_VM_SAVE 28
+#define V3_VM_SIMULATE 29
#define V3_VM_INSPECT 30
v3_continue_vm(guest->v3_ctx);
break;
}
+ case V3_VM_SIMULATE: {
+ printk("Simulating VM (%s) for %lu msecs\n", guest->name, arg);
+ v3_simulate_vm(guest->v3_ctx, arg);
+ break;
+ }
+
+
#ifdef V3_CONFIG_CHECKPOINT
case V3_VM_SAVE: {
struct v3_chkpt_info chkpt;
-all: v3_ctrl v3_stop v3_cons v3_mem v3_monitor v3_stream v3_user_host_dev_example v3_os_debug v3_user_keyed_stream_example v3_user_keyed_stream_file v3_core_move v3_save v3_load
+all: v3_ctrl v3_stop v3_cons v3_mem v3_monitor v3_stream v3_user_host_dev_example v3_os_debug v3_user_keyed_stream_example v3_user_keyed_stream_file v3_core_move v3_save v3_load v3_simulate
gcc -static v3_stop.c -o v3_stop
gcc -static v3_pause.c -o v3_pause
gcc -static v3_continue.c -o v3_continue
+ gcc -static v3_simulate.c -o v3_simulate
gcc -static v3_create.c -o v3_create
gcc -static v3_free.c -o v3_free
--- /dev/null
+/*
+ * V3 Control utility
+ * (c) Jack lange, 2010
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "v3_ctrl.h"
+
+
+int main(int argc, char* argv[]) {
+ char * filename = argv[1];
+ unsigned int msecs = atoi(argv[2]);
+ int vm_fd = 0;
+
+
+ if (argc <= 2) {
+ printf("Usage: ./v3_simulate <vm-dev> <msecs>\n");
+ return -1;
+ }
+
+ printf("Simulating VM for %lu msecs\n", msecs);
+
+ vm_fd = open(filename, O_RDONLY);
+
+ if (vm_fd == -1) {
+ printf("Error opening V3Vee VM device\n");
+ return -1;
+ }
+
+ ioctl(vm_fd, 29, msecs);
+
+ /* Close the file descriptor. */
+ close(vm_fd);
+
+
+
+ return 0;
+}
+
+
#include <palacios/vmm_regs.h>
#include <palacios/vmm_extensions.h>
#include <palacios/vmm_barrier.h>
+#include <palacios/vmm_timeout.h>
#ifdef V3_CONFIG_TELEMETRY
uint_t cpl;
struct vm_core_time time_state;
+ struct v3_core_timeouts timeouts;
v3_paging_mode_t shdw_pg_mode;
struct v3_shdw_pg_state shdw_pg_state;
int v3_stop_vm(struct v3_vm_info * vm);
int v3_pause_vm(struct v3_vm_info * vm);
int v3_continue_vm(struct v3_vm_info * vm);
+int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs);
+
int v3_save_vm(struct v3_vm_info * vm, char * store, char * url);
int v3_load_vm(struct v3_vm_info * vm, char * store, char * url);
int v3_init_barrier(struct v3_vm_info * vm_info);
int v3_deinit_barrier(struct v3_vm_info * vm_info);
+
int v3_raise_barrier(struct v3_vm_info * vm_info, struct guest_info * local_core);
int v3_lower_barrier(struct v3_vm_info * vm_info);
int v3_wait_at_barrier(struct guest_info * core);
+/* Special Barrier activation functions.
+ * DO NOT USE THESE UNLESS YOU KNOW WHAT YOU ARE DOING
+ */
+int v3_raise_barrier_nowait(struct v3_vm_info * vm_info, struct guest_info * local_core);
+int v3_wait_for_barrier(struct v3_vm_info * vm_info, struct guest_info * local_core);
+/* ** */
+
+
+
#endif
#endif
uint_t num_timers;
struct list_head timers;
- // Installed timeout handlers, and the time (in monotonic guest time) of hte
- // next timeout.
- uint64_t next_timeout;
- struct list_head timeout_hooks;
};
struct v3_timer_ops {
struct list_head timer_link;
};
-typedef void (*v3_timeout_callback_t)(struct guest_info * info, void * priv_data);
-struct v3_timeout_hook {
- void * private_data;
- v3_timeout_callback_t callback;
-
- struct list_head hook_link;
-};
+
// Basic functions for handling passage of time in palacios
void v3_init_time_core(struct guest_info * core);
int v3_start_time(struct guest_info * core);
int v3_time_enter_vm(struct guest_info * core);
-int v3_time_exit_vm(struct guest_info * core);
+int v3_time_exit_vm(struct guest_info * core, uint64_t * guest_cycles);
int v3_pause_time(struct guest_info * core);
int v3_resume_time(struct guest_info * core);
int v3_remove_timer(struct guest_info * info, struct v3_timer * timer);
void v3_update_timers(struct guest_info * info);
-// Functions for handling one-shot timeouts in Palacios. Note that only one
-// timeout is every currently outstanding (the soonest scheduled one!), and that
-// all hooks are called on any timeout. If a hook gets called before the desired
-// timeout time, that hook should reschedule its own timeout if desired.
-struct v3_timeout_hook * v3_add_timeout_hook(struct guest_info * info, v3_timeout_callback_t callback, void * priv_data);
-int v3_remove_timeout_hook(struct guest_info * info, struct v3_timeout_hook * hook);
-int v3_schedule_timeout(struct guest_info * info, ullong_t cycles);
-int v3_check_timeout(struct guest_info * info);
+
// Functions to return the different notions of time in Palacios.
static inline uint64_t v3_get_host_time(struct vm_core_time *t) {
--- /dev/null
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2012, Jack Lange <jacklange@cs.pitt.edu>
+ * Copyright (c) 2012, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Jack Lange <jacklange@cs.pitt.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#ifndef __VMM_TIMEOUT_H__
+#define __VMM_TIMEOUT_H__
+
+#ifdef __V3VEE__
+
+#include <palacios/vmm_types.h>
+
+struct guest_info;
+
+struct v3_core_timeouts {
+ uint8_t timeout_active;
+ uint64_t next_timeout; // # of cycles until next timeout
+
+
+ int (*callback)(struct guest_info * core, void * private_data);
+ void * private_data;
+};
+
+
+
+int v3_add_core_timeout(struct guest_info * core, uint64_t cycles,
+ int (*callback)(struct guest_info * core,
+ void * private_data),
+ void * private_data);
+
+
+int v3_handle_timeouts(struct guest_info * core, uint64_t guest_cycles);
+
+
+#endif
+
+#endif
typedef enum {SHADOW_PAGING, NESTED_PAGING} v3_paging_mode_t;
-typedef enum {VM_RUNNING, VM_STOPPED, VM_PAUSED, VM_ERROR} v3_vm_operating_mode_t;
+typedef enum {VM_RUNNING, VM_STOPPED, VM_PAUSED, VM_ERROR, VM_SIMULATING} v3_vm_operating_mode_t;
typedef enum {CORE_RUNNING, CORE_STOPPED} v3_core_operating_mode_t;
typedef enum {REAL, /*UNREAL,*/ PROTECTED, PROTECTED_PAE, LONG, LONG_32_COMPAT, LONG_16_COMPAT} v3_cpu_mode_t;
vmm_multitree.o \
vmm_bitmap.o \
vmm_barrier.o \
-
+ vmm_timeout.o \
obj-$(V3_CONFIG_XED) += vmm_xed.o
vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
sint64_t tsc_offset;
+ uint64_t guest_cycles = 0;
// Conditionally yield the CPU if the timeslice has expired
v3_yield_cond(info);
// Perform any additional yielding needed for time adjustment
v3_adjust_time(info);
- // Check for timeout - since this calls generic hooks in devices
- // that may do things like pause the VM, it cannot be with interrupts
- // disabled.
- v3_check_timeout(info);
+
// disable global interrupts for vm state transition
v3_clgi();
//V3_Print("Calling v3_svm_launch\n");
+ {
+ uint64_t entry_tsc = 0;
+ uint64_t exit_tsc = 0;
+
+ rdtscll(entry_tsc);
+
+ v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]);
+
+ rdtscll(exit_tsc);
+
+ guest_cycles = exit_tsc - entry_tsc;
+ }
- v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]);
//V3_Print("SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip);
v3_last_exit = (uint32_t)(guest_ctrl->exit_code);
// Immediate exit from VM time bookkeeping
- v3_time_exit_vm(info);
+ v3_time_exit_vm(info, &guest_cycles);
info->num_exits++;
}
}
+ if (info->timeouts.timeout_active) {
+ /* Check to see if any timeouts have expired */
+ v3_handle_timeouts(info, guest_cycles);
+ }
+
return 0;
}
#include <palacios/vmm_lowlevel.h>
#include <palacios/vmm_sprintf.h>
#include <palacios/vmm_extensions.h>
+#include <palacios/vmm_timeout.h>
+
#ifdef V3_CONFIG_SVM
#include <palacios/svm.h>
return -1;
}
+ vm->run_state = VM_RUNNING;
+
v3_lower_barrier(vm);
+ return 0;
+}
+
+
+
+static int sim_callback(struct guest_info * core, void * private_data) {
+ struct v3_bitmap * timeout_map = private_data;
+
+ v3_bitmap_set(timeout_map, core->vcpu_id);
+
+ V3_Print("Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
+
+ while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
+ v3_yield(NULL);
+ }
+
+ return 0;
+}
+
+
+
+
+int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
+ struct v3_bitmap timeout_map;
+ int i = 0;
+ int all_blocked = 0;
+ uint64_t cycles = 0;
+ uint64_t cpu_khz = V3_CPU_KHZ();
+
+ if (vm->run_state != VM_PAUSED) {
+ PrintError("VM must be paused before simulation begins\n");
+ return -1;
+ }
+
+ /* AT this point VM is paused */
+
+ // initialize bitmap
+ v3_bitmap_init(&timeout_map, vm->num_cores);
+
+
+
+
+ // calculate cycles from msecs...
+ // IMPORTANT: Floating point not allowed.
+ cycles = (msecs * cpu_khz);
+
+
+
+ V3_Print("Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
+
+ // set timeout
+
+ for (i = 0; i < vm->num_cores; i++) {
+ if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
+ PrintError("Could not register simulation timeout for core %d\n", i);
+ return -1;
+ }
+ }
+
+ V3_Print("timeouts set on all cores\n ");
+
+
+ // Run the simulation
+// vm->run_state = VM_SIMULATING;
vm->run_state = VM_RUNNING;
+ v3_lower_barrier(vm);
+
+
+ V3_Print("Barrier lowered: We are now Simulating!!\n");
+
+ // block until simulation is complete
+ while (all_blocked == 0) {
+ all_blocked = 1;
+
+ for (i = 0; i < vm->num_cores; i++) {
+ if (v3_bitmap_check(&timeout_map, i) == 0) {
+ all_blocked = 0;
+ }
+ }
+
+ if (all_blocked == 1) {
+ break;
+ }
+
+ v3_yield(NULL);
+ }
+
+
+ V3_Print("Simulation is complete\n");
+
+ // Simulation is complete
+ // Reset back to PAUSED state
+
+ v3_raise_barrier_nowait(vm, NULL);
+ vm->run_state = VM_PAUSED;
+
+ v3_bitmap_reset(&timeout_map);
+
+ v3_wait_for_barrier(vm, NULL);
return 0;
+
}
#ifdef V3_CONFIG_CHECKPOINT
return 0;
}
-
-/* Barrier synchronization primitive
- * -- This call will block until all the guest cores are waiting at a common synchronization point
- * in a yield loop. The core will block at the sync point until the barrier is lowered.
- *
- * ARGUMENTS:
- * vm_info -- The VM for which the barrier is being activated
- * local_core -- The core whose thread this function is being called from, or NULL
- * if the calling thread is not associated with a VM's core context
- */
-
-int v3_raise_barrier(struct v3_vm_info * vm_info, struct guest_info * local_core) {
+int v3_raise_barrier_nowait(struct v3_vm_info * vm_info, struct guest_info * local_core) {
struct v3_barrier * barrier = &(vm_info->barrier);
addr_t flag;
int acquired = 0;
- int all_blocked = 0;
int local_vcpu = -1;
int i = 0;
-
-
flag = v3_lock_irqsave(barrier->lock);
if (barrier->active == 0) {
}
}
+ return 0;
+}
+
+int v3_wait_for_barrier(struct v3_vm_info * vm_info, struct guest_info * local_core) {
+ struct v3_barrier * barrier = &(vm_info->barrier);
+ int all_blocked = 0;
+ int i = 0;
+
// wait for barrier catch on all cores
while (all_blocked == 0) {
all_blocked = 1;
v3_yield(local_core);
}
-
return 0;
}
+/* Barrier synchronization primitive
+ * -- This call will block until all the guest cores are waiting at a common synchronization point
+ * in a yield loop. The core will block at the sync point until the barrier is lowered.
+ *
+ * ARGUMENTS:
+ * vm_info -- The VM for which the barrier is being activated
+ * local_core -- The core whose thread this function is being called from, or NULL
+ * if the calling thread is not associated with a VM's core context
+ */
+
+int v3_raise_barrier(struct v3_vm_info * vm_info, struct guest_info * local_core) {
+ int ret = 0;
+
+ ret = v3_raise_barrier_nowait(vm_info, local_core);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ return v3_wait_for_barrier(vm_info, local_core);
+}
+
+
+
/* Lowers a barrier that has already been raised
* guest cores will automatically resume execution
* once this has been called
if (time_state->enter_time) {
/* Limit forward skew to 10% of the amount the guest has
* run since we last could skew time */
- max_skew = (sint64_t)(guest_time - time_state->enter_time) / 10.0;
+ max_skew = (sint64_t)(guest_time - time_state->enter_time) / 10;
} else {
max_skew = 0;
}
/* Called immediately upon entry in the the VMM */
int
-v3_time_exit_vm( struct guest_info * info )
+v3_time_exit_vm( struct guest_info * info, uint64_t * guest_cycles )
{
struct vm_core_time * time_state = &(info->time_state);
}
}
-/* Handle TSC timeout hooks */
-struct v3_timeout_hook *
-v3_add_timeout_hook(struct guest_info * info, v3_timeout_callback_t callback,
- void * priv_data) {
- struct v3_timeout_hook * timeout = NULL;
- timeout = (struct v3_timeout_hook *)V3_Malloc(sizeof(struct v3_timeout_hook));
- V3_ASSERT(timeout != NULL);
-
- timeout->callback = callback;
- timeout->private_data = priv_data;
-
- list_add(&(timeout->hook_link), &(info->time_state.timeout_hooks));
- return timeout;
-}
-
-int
-v3_remove_timeout_hook(struct guest_info * info, struct v3_timeout_hook * hook) {
- list_del(&(hook->hook_link));
- V3_Free(hook);
- return 0;
-}
-
-int v3_schedule_timeout(struct guest_info * info, ullong_t guest_timeout) {
- struct vm_core_time *time_state = &info->time_state;
- /* Note that virtualization architectures that support it (like newer
- * VMX systems) will turn on an active preemption timeout if
- * available to get this timeout as closely as possible. Other systems
- * only catch it in the periodic interrupt and so are less precise */
- if (guest_timeout < time_state->next_timeout) {
- time_state->next_timeout = guest_timeout;
- }
- return 0;
-}
-
-int v3_check_timeout( struct guest_info * info ) {
- struct vm_core_time *time_state = &info->time_state;
- if (time_state->next_timeout <= v3_get_guest_time(time_state)) {
- struct v3_timeout_hook * tmp_timeout;
- time_state->next_timeout = (ullong_t)-1;
- list_for_each_entry(tmp_timeout, &(time_state->timeout_hooks), hook_link) {
- tmp_timeout->callback(info, tmp_timeout->private_data);
- }
- }
- return 0;
-}
/*
* Handle full virtualization of the time stamp counter. As noted
time_state->exit_time = 0;
time_state->pause_time = 0;
- INIT_LIST_HEAD(&(time_state->timeout_hooks));
- time_state->next_timeout = (ullong_t)-1;
INIT_LIST_HEAD(&(time_state->timers));
time_state->num_timers = 0;
--- /dev/null
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2012, Jack Lange <jacklange@cs.pitt.edu>
+ * Copyright (c) 2012, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Jack Lange <jacklange@cs.pitt.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+
+#include <palacios/vmm.h>
+#include <palacios/vm_guest.h>
+#include <palacios/vmm_timeout.h>
+
+
+
+int v3_add_core_timeout(struct guest_info * core, uint64_t cycles,
+ int (*callback)(struct guest_info * core,
+ void * private_data),
+ void * private_data) {
+ struct v3_core_timeouts * timeouts = &(core->timeouts);
+
+ if (timeouts->timeout_active) {
+ PrintError("Tried to activate a timeout whiel one is already active\n");
+ return -1;
+ }
+
+ timeouts->callback = callback;
+ timeouts->private_data = private_data;
+ timeouts->timeout_active = 1;
+ timeouts->next_timeout = cycles;
+
+ return 0;
+}
+
+
+
+int v3_handle_timeouts(struct guest_info * core, uint64_t guest_cycles) {
+ struct v3_core_timeouts * timeouts = &(core->timeouts);
+
+ /*
+ V3_Print("Handling timeout from %llu guest cycles (Next timeout=%llu)\n", guest_cycles,
+ timeouts->next_timeout);
+ */
+
+ if (guest_cycles >= timeouts->next_timeout) {
+ timeouts->next_timeout = 0;
+ timeouts->timeout_active = 0;
+
+ if (timeouts->callback) {
+
+ V3_Print("Calling timeout callback\n");
+ timeouts->callback(core, timeouts->private_data);
+ }
+ } else {
+ timeouts->next_timeout -= guest_cycles;
+ }
+
+ return 0;
+}
#include <palacios/vmx_msr.h>
#include <palacios/vmm_decoder.h>
#include <palacios/vmm_barrier.h>
+#include <palacios/vmm_timeout.h>
#ifdef V3_CONFIG_CHECKPOINT
#include <palacios/vmm_checkpoint.h>
static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) {
int vmx_ret = 0;
+ /* Get Available features */
+ struct vmx_pin_ctrls avail_pin_ctrls;
+ avail_pin_ctrls.value = v3_vmx_get_ctrl_features(&(hw_info.pin_ctrls));
+ /* ** */
+
+
// disable global interrupts for vm state initialization
v3_disable_ints();
vmx_state->pin_ctrls.ext_int_exit = 1;
+ /* We enable the preemption timer by default to measure accurate guest time */
+ if (avail_pin_ctrls.active_preempt_timer) {
+ V3_Print("VMX Preemption Timer is available\n");
+ vmx_state->pin_ctrls.active_preempt_timer = 1;
+ vmx_state->exit_ctrls.save_preempt_timer = 1;
+ }
+
vmx_state->pri_proc_ctrls.hlt_exit = 1;
vmx_state->entry_ctrls.ld_pat = 1;
/* Temporary GPF trap */
- vmx_state->excp_bmap.gp = 1;
+ // vmx_state->excp_bmap.gp = 1;
// Setup Guests initial PAT field
vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
}
-int
-v3_vmx_schedule_timeout(struct guest_info * info)
-{
- struct vmx_data * vmx_state = (struct vmx_data *)(info->vmm_data);
- sint64_t cycles;
- uint32_t timeout;
-
- /* Check if the hardware supports an active timeout */
-#define VMX_ACTIVE_PREEMPT_TIMER_PIN 0x40
- if (hw_info.pin_ctrls.req_mask & VMX_ACTIVE_PREEMPT_TIMER_PIN) {
- /* The hardware doesn't support us modifying this pin control */
- return 0;
- }
- /* Check if we have one to schedule and schedule it if we do */
- cycles = (sint64_t)info->time_state.next_timeout - (sint64_t)v3_get_guest_time(&info->time_state);
- if (info->time_state.next_timeout == (ullong_t) -1) {
- timeout = 0;
- vmx_state->pin_ctrls.active_preempt_timer = 0;
- } else if (cycles < 0) {
- /* set the timeout to 0 to force an immediate re-exit since it expired between
- * when we checked a timeout and now. IF SOMEONE CONTINAULLY SETS A SHORT TIMEOUT,
- * THIS CAN LOCK US OUT OF THE GUEST! */
- timeout = 0;
- vmx_state->pin_ctrls.active_preempt_timer = 1;
- } else {
- /* The hardware supports scheduling a timeout, and we have one to
- * schedule */
- timeout = (uint32_t)cycles >> hw_info.misc_info.tsc_multiple;
- vmx_state->pin_ctrls.active_preempt_timer = 1;
- }
-
- /* Actually program the timer based on the settings above. */
- check_vmcs_write(VMCS_PREEMPT_TIMER, timeout);
- check_vmcs_write(VMCS_PIN_CTRLS, vmx_state->pin_ctrls.value);
- return 0;
-}
/*
* CAUTION and DANGER!!!
uint32_t tsc_offset_low, tsc_offset_high;
struct vmx_exit_info exit_info;
struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
+ uint64_t guest_cycles = 0;
// Conditionally yield the CPU if the timeslice has expired
v3_yield_cond(info);
// Perform any additional yielding needed for time adjustment
v3_adjust_time(info);
- // Check for timeout - since this calls generic hooks in devices
- // that may do things like pause the VM, it cannot be with interrupts
- // disabled.
- v3_check_timeout(info);
-
// disable global interrupts for vm state transition
v3_disable_ints();
vmcs_write(VMCS_GUEST_CR3, guest_cr3);
}
- // Update vmx active preemption timer to exit at the next timeout if
- // the hardware supports it.
- v3_vmx_schedule_timeout(info);
// Perform last-minute time bookkeeping prior to entering the VM
v3_time_enter_vm(info);
check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
+
+
if (v3_update_vmcs_host_state(info)) {
v3_enable_ints();
PrintError("Could not write host state\n");
return -1;
}
+
+ if (vmx_info->pin_ctrls.active_preempt_timer) {
+ /* Preemption timer is active */
+ uint32_t preempt_window = 0xffffffff;
-
- if (vmx_info->state == VMX_UNLAUNCHED) {
- vmx_info->state = VMX_LAUNCHED;
- ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
- } else {
- V3_ASSERT(vmx_info->state != VMX_UNLAUNCHED);
- ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
+ if (info->timeouts.timeout_active) {
+ preempt_window = info->timeouts.next_timeout;
+ }
+
+ check_vmcs_write(VMCS_PREEMPT_TIMER, preempt_window);
}
-
+
+
+ {
+ uint64_t entry_tsc = 0;
+ uint64_t exit_tsc = 0;
+
+ if (vmx_info->state == VMX_UNLAUNCHED) {
+ vmx_info->state = VMX_LAUNCHED;
+ rdtscll(entry_tsc);
+ ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
+ rdtscll(exit_tsc);
+ } else {
+ V3_ASSERT(vmx_info->state != VMX_UNLAUNCHED);
+ rdtscll(entry_tsc);
+ ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
+ rdtscll(exit_tsc);
+ }
+
+ guest_cycles = exit_tsc - entry_tsc;
+ }
// PrintDebug("VMX Exit: ret=%d\n", ret);
}
+ info->num_exits++;
+
+ /* If we have the preemption time, then use it to get more accurate guest time */
+ if (vmx_info->pin_ctrls.active_preempt_timer) {
+ uint32_t cycles_left = 0;
+ check_vmcs_read(VMCS_PREEMPT_TIMER, &(cycles_left));
+
+ if (info->timeouts.timeout_active) {
+ guest_cycles = info->timeouts.next_timeout - cycles_left;
+ } else {
+ guest_cycles = 0xffffffff - cycles_left;
+ }
+ }
// Immediate exit from VM time bookkeeping
- v3_time_exit_vm(info);
+ v3_time_exit_vm(info, &guest_cycles);
- info->num_exits++;
/* Update guest state */
v3_vmx_save_vmcs(info);
info->cpu_mode = v3_get_vm_cpu_mode(info);
+
check_vmcs_read(VMCS_EXIT_INSTR_LEN, &(exit_info.instr_len));
check_vmcs_read(VMCS_EXIT_INSTR_INFO, &(exit_info.instr_info));
check_vmcs_read(VMCS_EXIT_REASON, &(exit_info.exit_reason));
return -1;
}
+ if (info->timeouts.timeout_active) {
+ /* Check to see if any timeouts have expired */
+ v3_handle_timeouts(info, guest_cycles);
+ }
+
return 0;
}
}
+
#ifdef V3_CONFIG_TELEMETRY
if (info->vm_info->enable_telemetry) {
v3_telemetry_start_exit(info);