struct efer_64 * efer;
struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
struct v3_segment * cs = &(info->segments.cs);
- vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+
if (info->shdw_pg_mode == SHADOW_PAGING) {
cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
} else if (info->shdw_pg_mode == NESTED_PAGING) {
cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
- efer = (struct efer_64 *)&(guest_state->efer);
+ efer = (struct efer_64 *)&(info->ctrl_regs.efer);
} else {
PrintError("Invalid Paging Mode...\n");
V3_ASSERT(0);
struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
struct efer_64 * efer;
struct v3_segment * cs = &(info->segments.cs);
- vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+
if (info->shdw_pg_mode == SHADOW_PAGING) {
cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
} else if (info->shdw_pg_mode == NESTED_PAGING) {
cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
- efer = (struct efer_64 *)&(guest_state->efer);
+ efer = (struct efer_64 *)&(info->ctrl_regs.efer);
} else {
PrintError("Invalid Paging Mode...\n");
V3_ASSERT(0);
struct v3_ctrl_regs * regs = &(info->ctrl_regs);
int i = 0;
v3_reg_t * reg_ptr;
- char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", NULL};
- vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(info->vmm_data);
+ char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", "EFER", NULL};
+
reg_ptr = (v3_reg_t *)regs;
- V3_Print("32 bit Ctrl Regs:\n");
+ V3_Print("Ctrl Regs:\n");
for (i = 0; reg_names[i] != NULL; i++) {
V3_Print("\t%s=0x%p (at %p)\n", reg_names[i], (void *)(addr_t)reg_ptr[i], &(reg_ptr[i]));
}
- V3_Print("\tEFER=0x%p\n", (void*)(addr_t)(guest_state->efer));
}
// We don't need to virtualize CR4, all we need is to detect the activation of PAE
int v3_handle_cr4_read(struct guest_info * info) {
- // PrintError("CR4 Read not handled\n");
+ PrintError("CR4 Read not handled\n");
// Do nothing...
return 0;
}
// Check to see if we need to flush the tlb
+
if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand);
struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
return -1;
}
-
- if (flush_tlb) {
- PrintDebug("Handling PSE/PGE/PAE -> TLBFlush (doing flush now!)\n");
- if (v3_activate_shadow_pt(info) == -1) {
- PrintError("Failed to activate shadow page tables when emulating TLB flush in handling cr4 write\n");
- return -1;
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ if (flush_tlb) {
+ PrintDebug("Handling PSE/PGE/PAE -> TLBFlush (doing flush now!)\n");
+ if (v3_activate_shadow_pt(info) == -1) {
+ PrintError("Failed to activate shadow page tables when emulating TLB flush in handling cr4 write\n");
+ return -1;
+ }
}
}
-
info->rip += dec_instr.instr_length;
return 0;
}
hw_efer->lma = 1;
}
+
+ PrintDebug("RIP=%p\n", (void *)core->rip);
+ PrintDebug("New EFER value HW(hi=%p), VM(hi=%p)\n", (void *)*(uint64_t *)hw_efer, (void *)vm_efer->value);
+
+
return 0;
}
return 1;
}
+
+
+
return 0;
}
return (addr_t)V3_PAddr((void *)vmcs_page);
}
+/*
+static int debug_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * src, void * priv_data) {
+ struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
+ V3_Print("\n\nEFER READ\n");
+
+ v3_print_guest_state(core);
+
+ src->value = efer->value;
+ return 0;
+}
+
+static int debug_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
+ struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
+ V3_Print("\n\nEFER WRITE\n");
+
+ v3_print_guest_state(core);
+
+ efer->value = src.value;
+
+ {
+ struct vmx_data * vmx_state = core->vmm_data;
+
+ V3_Print("Trapping page faults and GPFs\n");
+ vmx_state->excp_bmap.pf = 1;
+ vmx_state->excp_bmap.gp = 1;
+
+ check_vmcs_write(VMCS_EXCP_BITMAP, vmx_state->excp_bmap.value);
+ }
+
+ return 0;
+}
+*/
static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) {
vmx_state->exit_ctrls.host_64_on = 1;
#endif
- // Hook all accesses to EFER register
- v3_hook_msr(core->vm_info, EFER_MSR,
- &v3_handle_efer_read,
- &v3_handle_efer_write,
- core);
+
// Restore host's EFER register on each VM EXIT
vmx_state->exit_ctrls.ld_efer = 1;
vmx_state->exit_ctrls.save_efer = 1;
vmx_state->entry_ctrls.ld_efer = 1;
- // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
- vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
+ vmx_state->exit_ctrls.save_pat = 1;
+ vmx_state->exit_ctrls.ld_pat = 1;
+ vmx_state->entry_ctrls.ld_pat = 1;
+ /* Temporary GPF trap */
+ vmx_state->excp_bmap.gp = 1;
// Setup Guests initial PAT field
vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
#define CR0_WP 0x00010000 // To ensure mem hooks work
vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
+
+ // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
+ vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
+
core->ctrl_regs.cr3 = core->direct_map_pt;
// vmx_state->pinbased_ctrls |= NMI_EXIT;
// Setup VMX Assist
v3_vmxassist_init(core, vmx_state);
+ // Hook all accesses to EFER register
+ v3_hook_msr(core->vm_info, EFER_MSR,
+ &v3_handle_efer_read,
+ &v3_handle_efer_write,
+ core);
+
} else if ((core->shdw_pg_mode == NESTED_PAGING) &&
(v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_CPU)) {
// vmx_state->pinbased_ctrls |= NMI_EXIT;
+ // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
+ vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
+
/* Disable CR exits */
vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
return -1;
}
+ // Hook all accesses to EFER register
+ v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
+
} else if ((core->shdw_pg_mode == NESTED_PAGING) &&
(v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
int i = 0;
vmx_state->pri_proc_ctrls.invlpg_exit = 0;
+ // Cause VM_EXIT whenever the CR4.VMXE bit is set
+ vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
+
+
if (v3_init_ept(core, &hw_info) == -1) {
PrintError("Error initializing EPT\n");
return -1;
}
+ // Hook all accesses to EFER register
+ //v3_hook_msr(core->vm_info, EFER_MSR, &debug_efer_read, &debug_efer_write, core);
+ v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
} else {
PrintError("Invalid Virtual paging mode\n");
return -1;
msr_ret |= v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
msr_ret |= v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
+ msr_ret |= v3_hook_msr(core->vm_info, IA32_PAT_MSR, NULL, NULL, NULL);
// Not sure what to do about this... Does not appear to be an explicit hardware cache version...
msr_ret |= v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);