/* Control register exit masks */
#define CR4_VMXE 0x00002000
+#define CR4_PAE 0x00000020
struct vmx_exit_info * exit_info);
int v3_vmx_handle_cr3_access(struct guest_info * info,
struct vmx_exit_cr_qual * cr_qual);
-
+int v3_vmx_handle_cr4_access(struct guest_info * info,
+ struct vmx_exit_cr_qual * cr_qual);
#endif
}
+static int v3_svm_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data)
+{
+ int status;
+
+ // Call arch-independent handler
+ if ((status = v3_handle_efer_write(core, msr, src, priv_data)) != 0)
+ return status;
+
+ // SVM-specific code
+ if (core->shdw_pg_mode == NESTED_PAGING) {
+ // Ensure that hardware visible EFER.SVME bit is set (SVM Enable)
+ struct efer_64 * hw_efer = (struct efer_64 *)&(core->ctrl_regs.efer);
+ hw_efer->svme = 1;
+ }
+
+ return 0;
+}
+
static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
v3_hook_msr(core->vm_info, EFER_MSR,
&v3_handle_efer_read,
- &v3_handle_efer_write,
+ &v3_svm_handle_efer_write,
core);
if (core->shdw_pg_mode == SHADOW_PAGING) {
#ifdef __V3_64BIT__
check_vmcs_write(VMCS_GUEST_EFER, info->ctrl_regs.efer);
+ check_vmcs_write(VMCS_ENTRY_CTRLS, vmx_info->entry_ctrls.value);
#endif
}
-
-// TODO: this is a disaster we need to clean this up...
int v3_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
- //struct efer_64 * new_efer = (struct efer_64 *)&(src.value);
- struct efer_64 * shadow_efer = (struct efer_64 *)&(core->ctrl_regs.efer);
- struct v3_msr * guest_efer = &(core->shdw_pg_state.guest_efer);
+ struct v3_msr * vm_efer = &(core->shdw_pg_state.guest_efer);
+ struct efer_64 * hw_efer = (struct efer_64 *)&(core->ctrl_regs.efer);
+ struct efer_64 old_hw_efer = *((struct efer_64 *)&core->ctrl_regs.efer);
- PrintDebug("EFER Write\n");
- PrintDebug("EFER Write Values: HI=%x LO=%x\n", src.hi, src.lo);
+ PrintDebug("EFER Write HI=%x LO=%x\n", src.hi, src.lo);
- //PrintDebug("Old EFER=%p\n", (void *)*(addr_t*)(shadow_efer));
-
- // We virtualize the guests efer to hide the SVME and LMA bits
- guest_efer->value = src.value;
-
- if (core->shdw_pg_mode == SHADOW_PAGING) {
- // Enable/Disable Syscall
- shadow_efer->sce = src.value & 0x1;
- } else if (core->shdw_pg_mode == NESTED_PAGING) {
- *(uint64_t *)shadow_efer = src.value;
- shadow_efer->svme = 1;
+ // Set EFER value seen by guest if it reads EFER
+ vm_efer->value = src.value;
+
+ // Set EFER value seen by hardware while the guest is running
+ *(uint64_t *)hw_efer = src.value;
+
+ // Catch unsupported features
+ if ((old_hw_efer.lme == 1) && (hw_efer->lme == 0)) {
+ PrintError("Disabling long mode once it has been enabled is not supported\n");
+ return -1;
}
+
+ // Set LME and LMA bits seen by hardware
+ if (old_hw_efer.lme == 0) {
+ // Long mode was not previously enabled, so the lme bit cannot
+ // be set yet. It will be set later when the guest sets CR0.PG
+ // to enable paging.
+ hw_efer->lme = 0;
+ } else {
+ // Long mode was previously enabled. Ensure LMA bit is set.
+ // VMX does not automatically set LMA, and this should not affect SVM.
+ hw_efer->lma = 1;
+ }
+
return 0;
}
#ifdef __V3_64BIT__
+ // Ensure host runs in 64-bit mode at each VM EXIT
vmx_state->exit_ctrls.host_64_on = 1;
#endif
-
- /* Not sure how exactly to handle this... */
+ // Hook all accesses to EFER register
v3_hook_msr(core->vm_info, EFER_MSR,
&v3_handle_efer_read,
&v3_handle_efer_write,
core);
- // Or is it this???
- vmx_state->entry_ctrls.ld_efer = 1;
+ // Restore host's EFER register on each VM EXIT
vmx_state->exit_ctrls.ld_efer = 1;
+
+ // Save/restore guest's EFER register to/from VMCS on VM EXIT/ENTRY
vmx_state->exit_ctrls.save_efer = 1;
- /* *** */
+ vmx_state->entry_ctrls.ld_efer = 1;
- vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
+ // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
+ vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
/* Setup paging */
return -1;
}
+int v3_vmx_handle_cr4_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
+ if (cr_qual->access_type < 2) {
+
+ if (cr_qual->access_type == 0) {
+ if (v3_handle_cr4_write(info) != 0) {
+ PrintError("Could not handle CR4 write\n");
+ return -1;
+ }
+ info->ctrl_regs.cr4 |= 0x2000; // no VMX allowed in guest, so mask CR4.VMXE
+ } else {
+ if (v3_handle_cr4_read(info) != 0) {
+ PrintError("Could not handle CR4 read\n");
+ return -1;
+ }
+ }
+
+ return 0;
+ }
+
+ PrintError("Invalid CR4 Access type?? (type=%d)\n", cr_qual->access_type);
+ return -1;
+}
+
static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
if (info->shdw_pg_mode == SHADOW_PAGING) {
// Paging transition
if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
- struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
+ struct efer_64 * vm_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
+ struct efer_64 * hw_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
- if (guest_efer->lme == 1) {
+ if (vm_efer->lme) {
// PrintDebug("Enabling long mode\n");
- guest_efer->lma = 1;
- guest_efer->lme = 1;
+ hw_efer->lma = 1;
+ hw_efer->lme = 1;
vmx_info->entry_ctrls.guest_ia32e = 1;
}
return -1;
}
break;
+ case 4:
+ //PrintDebug("Handling CR4 Access\n");
+ if (v3_vmx_handle_cr4_access(info, cr_qual) == -1) {
+ PrintError("Error in CR4 access handler\n");
+ return -1;
+ }
+ break;
default:
PrintError("Unhandled CR access: %d\n", cr_qual->cr_id);
return -1;
}
- info->rip += exit_info->instr_len;
+ // TODO: move RIP increment into all of the above individual CR
+ // handlers, not just v3_vmx_handle_cr4_access()
+ if (cr_qual->cr_id != 4)
+ info->rip += exit_info->instr_len;
break;
}