#include <palacios/vmm_lowlevel.h>
#include <palacios/vmm_ctrl_regs.h>
#include <palacios/vmm_config.h>
+#include <palacios/vmm_time.h>
#include <palacios/vm_guest_mem.h>
#include <palacios/vmm_direct_paging.h>
#include <palacios/vmx_io.h>
return 0;
}
+
+int v3_deinit_vmx_vmcs(struct guest_info * core) {
+ struct vmx_data * vmx_state = core->vmm_data;
+
+ V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1);
+
+ V3_Free(vmx_state);
+
+ return 0;
+}
+
+
static int update_irq_exit_state(struct guest_info * info) {
struct vmx_exit_idt_vec_info idt_vec_info;
// Conditionally yield the CPU if the timeslice has expired
v3_yield_cond(info);
- // v3_print_guest_state(info);
+ // Perform any additional yielding needed for time adjustment
+ v3_adjust_time(info);
+
+ // Update timer devices prior to entering VM.
+ v3_update_timers(info);
// disable global interrupts for vm state transition
v3_disable_ints();
vmcs_write(VMCS_GUEST_CR3, guest_cr3);
}
- v3_update_timers(info);
-
- /* If this guest is frequency-lagged behind host time, wait
- * for the appropriate host time before resuming the guest. */
- v3_adjust_time(info);
+ // Perform last-minute time bookkeeping prior to entering the VM
+ v3_time_enter_vm(info);
tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
return -1;
}
+ // Immediate exit from VM time bookkeeping
+ v3_time_exit_vm(info);
+
info->num_exits++;
/* Update guest state */
v3_start_time(info);
while (1) {
+
+ if (info->vm_info->run_state == VM_STOPPED) {
+ info->core_run_state = CORE_STOPPED;
+ break;
+ }
+
if (v3_vmx_enter(info) == -1) {
v3_print_vmcs();
print_exit_log(info);
return -1;
}
+
+
+ if (info->vm_info->run_state == VM_STOPPED) {
+ info->core_run_state = CORE_STOPPED;
+ break;
+ }
/*
if ((info->num_exits % 5000) == 0) {
V3_Print("VMX Exit number %d\n", (uint32_t)info->num_exits);
struct v3_msr tmp_msr;
uint64_t ret = 0;
- v3_get_msr(VMX_CR4_FIXED0_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
+ v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+
#ifdef __V3_64BIT__
__asm__ __volatile__ (
"movq %%cr4, %%rbx;"
}
+
+void v3_deinit_vmx_cpu(int cpu_id) {
+ extern v3_cpu_arch_t v3_cpu_types[];
+ v3_cpu_types[cpu_id] = V3_INVALID_CPU;
+ V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1);
+}