/* Add external interrupts, NMI exiting, and virtual NMI */
vmx_state->pin_ctrls.nmi_exit = 1;
+ vmx_state->pin_ctrls.virt_nmi = 1;
vmx_state->pin_ctrls.ext_int_exit = 1;
+
/* We enable the preemption timer by default to measure accurate guest time */
if (avail_pin_ctrls.active_preempt_timer) {
V3_Print("VMX Preemption Timer is available\n");
vmx_state->exit_ctrls.save_preempt_timer = 1;
}
+ // we want it to use this when halting
vmx_state->pri_proc_ctrls.hlt_exit = 1;
+ // cpuid tells it that it does not have these instructions
+ vmx_state->pri_proc_ctrls.monitor_exit = 1;
+ vmx_state->pri_proc_ctrls.mwait_exit = 1;
+ // we don't need to handle a pause, although this is where
+ // we could pull out of a spin lock acquire or schedule to find its partner
vmx_state->pri_proc_ctrls.pause_exit = 0;
+
vmx_state->pri_proc_ctrls.tsc_offset = 1;
#ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
vmx_state->pri_proc_ctrls.rdtsc_exit = 1;
// Setup Guests initial PAT field
vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
+ // Capture CR8 mods so that we can keep the apic_tpr correct
+ vmx_state->pri_proc_ctrls.cr8_ld_exit = 1;
+ vmx_state->pri_proc_ctrls.cr8_str_exit = 1;
+
+
/* Setup paging */
if (core->shdw_pg_mode == SHADOW_PAGING) {
PrintDebug("Creating initial shadow page table\n");
// Cause VM_EXIT whenever the CR4.VMXE bit is set
vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
#define CR0_NE 0x00000020
- vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, CR0_NE);
+#define CR0_CD 0x40000000
+ vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, CR0_NE | CR0_CD);
((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->ne = 1;
+ ((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->cd = 0;
if (v3_init_ept(core, &hw_info) == -1) {
PrintError("Error initializing EPT\n");
#endif
+
if (v3_update_vmcs_ctrl_fields(core)) {
* JRL: This is broken
*/
int v3_vmx_save_core(struct guest_info * core, void * ctx){
- struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
-
- // note that the vmcs pointer is an HPA, but we need an HVA
- if (v3_chkpt_save(ctx, "vmcs_data", PAGE_SIZE_4KB,
- V3_VAddr((void*) (vmx_info->vmcs_ptr_phys))) ==-1) {
- PrintError("Could not save vmcs data for VMX\n");
- return -1;
- }
-
- return 0;
+ struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
+
+ // note that the vmcs pointer is an HPA, but we need an HVA
+ if (v3_chkpt_save(ctx, "vmcs_data", PAGE_SIZE_4KB,
+ V3_VAddr((void*) (vmx_info->vmcs_ptr_phys)))) {
+ PrintError("Could not save vmcs data for VMX\n");
+ return -1;
+ }
+
+ return 0;
}
int v3_vmx_load_core(struct guest_info * core, void * ctx){
- struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
- struct cr0_32 * shadow_cr0;
- addr_t vmcs_page_paddr; //HPA
-
- vmcs_page_paddr = (addr_t) V3_AllocPages(1);
-
- if (!vmcs_page_paddr) {
- PrintError("Could not allocate space for a vmcs in VMX\n");
- return -1;
- }
-
- if (v3_chkpt_load(ctx, "vmcs_data", PAGE_SIZE_4KB,
- V3_VAddr((void *)vmcs_page_paddr)) == -1) {
- PrintError("Could not load vmcs data for VMX\n");
- return -1;
- }
-
- vmcs_clear(vmx_info->vmcs_ptr_phys);
-
- // Probably need to delete the old one...
- V3_FreePages((void*)(vmx_info->vmcs_ptr_phys),1);
-
- vmcs_load(vmcs_page_paddr);
-
- v3_vmx_save_vmcs(core);
-
- shadow_cr0 = (struct cr0_32 *)&(core->ctrl_regs.cr0);
+ struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
+ struct cr0_32 * shadow_cr0;
+ addr_t vmcs_page_paddr; //HPA
+
+ vmcs_page_paddr = (addr_t) V3_AllocPages(1);
+
+ if (!vmcs_page_paddr) {
+ PrintError("Could not allocate space for a vmcs in VMX\n");
+ return -1;
+ }
+
+ if (v3_chkpt_load(ctx, "vmcs_data", PAGE_SIZE_4KB,
+ V3_VAddr((void *)vmcs_page_paddr)) == -1) {
+ PrintError("Could not load vmcs data for VMX\n");
+ V3_FreePages((void*)vmcs_page_paddr,1);
+ return -1;
+ }
+
+ vmcs_clear(vmx_info->vmcs_ptr_phys);
+
+ // Probably need to delete the old one...
+ V3_FreePages((void*)(vmx_info->vmcs_ptr_phys),1);
+
+ vmcs_load(vmcs_page_paddr);
+
+ v3_vmx_save_vmcs(core);
+ shadow_cr0 = (struct cr0_32 *)&(core->ctrl_regs.cr0);
- /* Get the CPU mode to set the guest_ia32e entry ctrl */
- if (core->shdw_pg_mode == SHADOW_PAGING) {
- if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) {
- if (v3_activate_shadow_pt(core) == -1) {
- PrintError("Failed to activate shadow page tables\n");
- return -1;
- }
- } else {
- if (v3_activate_passthrough_pt(core) == -1) {
- PrintError("Failed to activate passthrough page tables\n");
- return -1;
- }
- }
+ /* Get the CPU mode to set the guest_ia32e entry ctrl */
+
+ if (core->shdw_pg_mode == SHADOW_PAGING) {
+ if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) {
+ if (v3_activate_shadow_pt(core) == -1) {
+ PrintError("Failed to activate shadow page tables\n");
+ return -1;
+ }
+ } else {
+ if (v3_activate_passthrough_pt(core) == -1) {
+ PrintError("Failed to activate passthrough page tables\n");
+ return -1;
+ }
}
-
- return 0;
+ }
+
+ return 0;
}
#endif
uint64_t guest_cycles = 0;
// Conditionally yield the CPU if the timeslice has expired
- v3_yield_cond(info);
+ v3_yield_cond(info,-1);
// Update timer devices late after being in the VM so that as much
// of the time in the VM is accounted for as possible. Also do it before
#endif
}
+
+ // Lastly we check for an NMI exit, and reinject if so
+ {
+ struct vmx_basic_exit_info * basic_info = (struct vmx_basic_exit_info *)&(exit_info.exit_reason);
+
+ if (basic_info->reason == VMX_EXIT_INFO_EXCEPTION_OR_NMI) {
+ if ((uint8_t)exit_info.int_info == 2) {
+ asm("int $2");
+ }
+ }
+ }
+
// reenable global interrupts after vm exit
v3_enable_ints();
// Conditionally yield the CPU if the timeslice has expired
- v3_yield_cond(info);
+ v3_yield_cond(info,-1);
v3_advance_time(info, NULL);
v3_update_timers(info);
return 0;
}
- v3_yield(info);
+ v3_yield(info,-1);
//PrintDebug("VMX core %u: still waiting for INIT\n",info->vcpu_id);
}