X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmx_ctrl_regs.c;h=8a7c3bfaed8a32fa57feb237d71a45a8c9879e3e;hb=0246f0904a4800dbe1e8e23332d49b468a58f751;hp=00fd7e9fcddddf247369200b6bf77227f24db5c3;hpb=ed8feff1d5dd6bf028cd5ba0960ec125505d7597;p=palacios.git diff --git a/palacios/src/palacios/vmx_ctrl_regs.c b/palacios/src/palacios/vmx_ctrl_regs.c index 00fd7e9..8a7c3bf 100644 --- a/palacios/src/palacios/vmx_ctrl_regs.c +++ b/palacios/src/palacios/vmx_ctrl_regs.c @@ -25,69 +25,113 @@ #include #include #include -#include +#include -static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual cr_qual); -static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val); +#ifndef V3_CONFIG_DEBUG_VMX +#undef PrintDebug +#define PrintDebug(fmt, args...) +#endif + +static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual); +static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val, struct vmx_exit_info * exit_info); static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg); static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg); -int v3_vmx_handle_cr0_access(struct guest_info * info) { - struct vmx_exit_cr_qual cr_qual; - - vmcs_read(VMCS_EXIT_QUAL, &(cr_qual.value)); +int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual, struct vmx_exit_info * exit_info) { - if (cr_qual.access_type < 2) { + if (cr_qual->access_type < 2) { v3_reg_t * reg = get_reg_ptr(info, cr_qual); - if (cr_qual.access_type == 0) { + if (cr_qual->access_type == 0) { - if (handle_mov_to_cr0(info, reg) != 0) { - PrintError("Could not handle CR0 write\n"); + if (handle_mov_to_cr0(info, reg, exit_info) != 0) { + PrintError(info->vm_info, info, "Could not handle CR0 write\n"); return -1; } } else { // Mov from cr - PrintError("Mov From CR0 not handled\n"); + PrintError(info->vm_info, info, "Mov From CR0 not handled\n"); return -1; } return 0; } - PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual.access_type); + PrintError(info->vm_info, info, "Invalid CR0 Access type?? (type=%d)\n", cr_qual->access_type); return -1; } -int v3_vmx_handle_cr3_access(struct guest_info * info) { - struct vmx_exit_cr_qual cr_qual; - - vmcs_read(VMCS_EXIT_QUAL, &(cr_qual.value)); +int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) { - if (cr_qual.access_type < 2) { + if (cr_qual->access_type < 2) { v3_reg_t * reg = get_reg_ptr(info, cr_qual); - if (cr_qual.access_type == 0) { + if (cr_qual->access_type == 0) { return handle_mov_to_cr3(info, reg); } else { return handle_mov_from_cr3(info, reg); } } - PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual.access_type); + PrintError(info->vm_info, info, "Invalid CR3 Access type?? (type=%d)\n", cr_qual->access_type); + return -1; +} + +int v3_vmx_handle_cr4_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) { + if (cr_qual->access_type < 2) { + + if (cr_qual->access_type == 0) { + if (v3_handle_cr4_write(info) != 0) { + PrintError(info->vm_info, info, "Could not handle CR4 write\n"); + return -1; + } + info->ctrl_regs.cr4 |= 0x2000; // no VMX allowed in guest, so mask CR4.VMXE + } else { + if (v3_handle_cr4_read(info) != 0) { + PrintError(info->vm_info, info, "Could not handle CR4 read\n"); + return -1; + } + } + + return 0; + } + + PrintError(info->vm_info, info, "Invalid CR4 Access type?? (type=%d)\n", cr_qual->access_type); + return -1; +} + +int v3_vmx_handle_cr8_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) { + if (cr_qual->access_type < 2) { + + if (cr_qual->access_type == 0) { + if (v3_handle_cr8_write(info) != 0) { + PrintError(info->vm_info, info, "Could not handle CR8 write\n"); + return -1; + } + } else { + if (v3_handle_cr8_read(info) != 0) { + PrintError(info->vm_info, info, "Could not handle CR8 read\n"); + return -1; + } + } + + return 0; + } + + PrintError(info->vm_info, info, "Invalid CR8 Access type?? (type=%d)\n", cr_qual->access_type); return -1; } static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) { - int instr_len = 0; if (info->shdw_pg_mode == SHADOW_PAGING) { /* - PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n", + PrintDebug(info->vm_info, info, "Old Guest CR3=%p, Old Shadow CR3=%p\n", (void *)info->ctrl_regs.cr3, (void *)info->shdw_pg_state.guest_cr3); */ + if (info->cpu_mode == LONG) { info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg; } else { @@ -97,29 +141,27 @@ static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) { if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) { if (v3_activate_shadow_pt(info) == -1) { - PrintError("Failed to activate 32 bit shadow page table\n"); + PrintError(info->vm_info, info, "Failed to activate 32 bit shadow page table\n"); return -1; } } /* - PrintDebug("New guest CR3=%p, New shadow CR3=%p\n", + PrintDebug(info->vm_info, info, "New guest CR3=%p, New shadow CR3=%p\n", (void *)info->ctrl_regs.cr3, (void *)info->shdw_pg_state.guest_cr3); */ } else if (info->shdw_pg_mode == NESTED_PAGING) { - PrintError("Nested paging not available in VMX right now!\n"); + PrintError(info->vm_info, info, "Nested paging not available in VMX right now!\n"); return -1; } - vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len); - info->rip += instr_len; return 0; } static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) { - int instr_len = 0; + if (info->shdw_pg_mode == SHADOW_PAGING) { @@ -132,110 +174,143 @@ static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) { } } else { - PrintError("Unhandled paging mode\n"); + PrintError(info->vm_info, info, "Unhandled paging mode\n"); return -1; } - vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len); - info->rip += instr_len; - return 0; } -static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0) { +static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struct vmx_exit_info * exit_info) { struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0); struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0); struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0; struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data; uint_t paging_transition = 0; - int instr_len = 0; + extern v3_cpu_arch_t v3_mach_type; + - /* - PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n", + PrintDebug(info->vm_info, info, "Mov to CR0\n"); + PrintDebug(info->vm_info, info, "Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n", (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0); - */ - if (new_shdw_cr0->pe != shdw_cr0->pe) { + if ((new_shdw_cr0->pe != shdw_cr0->pe) && (vmx_info->assist_state != VMXASSIST_DISABLED)) { /* - PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0); - PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0); - PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0); + PrintDebug(info->vm_info, info, "Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0); + PrintDebug(info->vm_info, info, "Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0); + PrintDebug(info->vm_info, info, "New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0); */ + if (v3_vmxassist_ctx_switch(info) != 0) { - PrintError("Unable to execute VMXASSIST context switch!\n"); + PrintError(info->vm_info, info, "Unable to execute VMXASSIST context switch!\n"); return -1; } - - v3_load_vmcs_guest_state(info); - - if (vmx_info->state == VMXASSIST_ENABLED) { - PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)info->rip); + + if (vmx_info->assist_state == VMXASSIST_ON) { + PrintDebug(info->vm_info, info, "Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip); } else { - PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n", - (void *)info->rip); + PrintDebug(info->vm_info, info, "Leaving VMXASSIST and entering protected mode at RIP: %p\n", + (void *)(addr_t)info->rip); } - // vmx assist sets the new cr values itself - return 0; - } - - if (new_shdw_cr0->pg != shdw_cr0->pg) { - paging_transition = 1; - } - - // The shadow always reflects the new value - *shdw_cr0 = *new_shdw_cr0; - - // We don't care about most of the flags, so lets go for it - // and set them to the guest values - *guest_cr0 = *shdw_cr0; - - // Except PG, PE, and NE, which are always set - guest_cr0->pe = 1; - guest_cr0->pg = 1; - guest_cr0->ne = 1; - - if (paging_transition) { - // Paging transition - - if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) { - struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer); - - if (guest_efer->lme == 1) { - // PrintDebug("Enabling long mode\n"); + // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe + exit_info->instr_len = 0; - guest_efer->lma = 1; - guest_efer->lme = 1; + // v3_vmx_restore_vmcs(info); + // v3_print_vmcs(info); - vmx_info->entry_ctrls.guest_ia32e = 1; - } - - // PrintDebug("Activating Shadow Page tables\n"); - - if (v3_activate_shadow_pt(info) == -1) { - PrintError("Failed to activate shadow page tables\n"); - return -1; - } + } else { - } else if (v3_activate_passthrough_pt(info) == -1) { - PrintError("Failed to activate passthrough page tables\n"); - return -1; - } + if (new_shdw_cr0->pg != shdw_cr0->pg) { + paging_transition = 1; + } + + + // Except PG, PE, and NE, which are always set + if ((info->shdw_pg_mode == SHADOW_PAGING) || + (v3_mach_type != V3_VMX_EPT_UG_CPU)) { + + // The shadow always reflects the new value + *shdw_cr0 = *new_shdw_cr0; + + + // We don't care about most of the flags, so lets go for it + // and set them to the guest values + *guest_cr0 = *shdw_cr0; + + guest_cr0->pe = 1; + guest_cr0->pg = 1; + } else { + // Unrestricted guest + // *(uint32_t *)shdw_cr0 = (0x00000020 & *(uint32_t *)new_shdw_cr0); + + *guest_cr0 = *new_shdw_cr0; + + guest_cr0->cd = 0; + } + + guest_cr0->ne = 1; + guest_cr0->et = 1; + + + if (paging_transition) { + // Paging transition + + if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) { + struct efer_64 * vm_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer); + struct efer_64 * hw_efer = (struct efer_64 *)&(info->ctrl_regs.efer); + + if (vmx_info->assist_state != VMXASSIST_DISABLED) { + if (vm_efer->lme) { + PrintDebug(info->vm_info, info, "Enabling long mode\n"); + + hw_efer->lma = 1; + hw_efer->lme = 1; + + vmx_info->entry_ctrls.guest_ia32e = 1; + } + } else { + if (hw_efer->lme) { + PrintDebug(info->vm_info, info, "Enabling long mode\n"); + + hw_efer->lma = 1; + + vmx_info->entry_ctrls.guest_ia32e = 1; + } + } + + // PrintDebug(info->vm_info, info, "Activating Shadow Page tables\n"); + + if (info->shdw_pg_mode == SHADOW_PAGING) { + if (v3_activate_shadow_pt(info) == -1) { + PrintError(info->vm_info, info, "Failed to activate shadow page tables\n"); + return -1; + } + } + + } else { + + if (info->shdw_pg_mode == SHADOW_PAGING) { + if (v3_activate_passthrough_pt(info) == -1) { + PrintError(info->vm_info, info, "Failed to activate passthrough page tables\n"); + return -1; + } + } else { + // This is hideous... Let's hope that the 1to1 page table has not been nuked... + info->ctrl_regs.cr3 = VMXASSIST_1to1_PT; + } + } + } } - - // PE loads its own RIP, otherwise we need to skip ahead an instruction - vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len); - info->rip += instr_len; - return 0; } -static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual cr_qual) { +static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) { v3_reg_t * reg = NULL; - switch (cr_qual.gpr) { + switch (cr_qual->gpr) { case 0: reg = &(info->vm_regs.rax); break;