X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_ctrl_regs.c;h=add91ff5ea4842adb3a3ad538877aa00ad32d5a0;hb=c0ecfba627c1d6c3f46d59bd4e5e6f883a494dc4;hp=d897fa0166637464ad60250e73033ee9a51091c6;hpb=87bd3921fe6a17138bdba3a41b501a8d620cb48a;p=palacios.git diff --git a/palacios/src/palacios/vmm_ctrl_regs.c b/palacios/src/palacios/vmm_ctrl_regs.c index d897fa0..add91ff 100644 --- a/palacios/src/palacios/vmm_ctrl_regs.c +++ b/palacios/src/palacios/vmm_ctrl_regs.c @@ -23,9 +23,10 @@ #include #include #include +#include +#include - -#ifndef DEBUG_CTRL_REGS +#ifndef V3_CONFIG_DEBUG_CTRL_REGS #undef PrintDebug #define PrintDebug(fmt, args...) #endif @@ -39,42 +40,47 @@ static int handle_mov_to_cr0(struct guest_info * info, struct x86_instr * dec_in // First Attempt = 494 lines // current = 106 lines int v3_handle_cr0_write(struct guest_info * info) { - uchar_t instr[15]; - int ret; - struct x86_instr dec_instr; - - if (info->mem_mode == PHYSICAL_MEM) { - ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); - } else { - ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); - } - - if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { - PrintError("Could not decode instruction\n"); - return -1; - } - - - if (dec_instr.op_type == V3_OP_LMSW) { - if (handle_lmsw(info, &dec_instr) == -1) { - return -1; + uchar_t instr[15]; + int ret; + struct x86_instr dec_instr; + + if (info->mem_mode == PHYSICAL_MEM) { + ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } else { + ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); } - } else if (dec_instr.op_type == V3_OP_MOV2CR) { - if (handle_mov_to_cr0(info, &dec_instr) == -1) { - return -1; + + if (ret!=15) { + PrintError(info->vm_info, info, "Could not read instruction\n"); + return -1; } - } else if (dec_instr.op_type == V3_OP_CLTS) { - if (handle_clts(info, &dec_instr) == -1) { - return -1; + + if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { + PrintError(info->vm_info, info, "Could not decode instruction\n"); + return -1; } - } else { - PrintError("Unhandled opcode in handle_cr0_write\n"); - return -1; - } - info->rip += dec_instr.instr_length; - - return 0; + + if (dec_instr.op_type == V3_OP_LMSW) { + if (handle_lmsw(info, &dec_instr) == -1) { + return -1; + } + } else if (dec_instr.op_type == V3_OP_MOV2CR) { + if (handle_mov_to_cr0(info, &dec_instr) == -1) { + return -1; + } + } else if (dec_instr.op_type == V3_OP_CLTS) { + if (handle_clts(info, &dec_instr) == -1) { + return -1; + } + } else { + PrintError(info->vm_info, info, "Unhandled opcode in handle_cr0_write\n"); + return -1; + } + + info->rip += dec_instr.instr_length; + + return 0; } @@ -84,124 +90,132 @@ int v3_handle_cr0_write(struct guest_info * info) { // The hardware does a format check to make sure the high bits are zero // Because of this we can ignore the high 32 bits here static int handle_mov_to_cr0(struct guest_info * info, struct x86_instr * dec_instr) { - // 32 bit registers - struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0); - struct cr0_32 * new_cr0 = (struct cr0_32 *)(dec_instr->src_operand.operand); - struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0); - uint_t paging_transition = 0; - - PrintDebug("MOV2CR0 (MODE=%s)\n", v3_cpu_mode_to_str(info->cpu_mode)); - - PrintDebug("OperandVal = %x, length=%d\n", *(uint_t *)new_cr0, dec_instr->src_operand.size); - - PrintDebug("Old CR0=%x\n", *(uint_t *)shadow_cr0); - PrintDebug("Old Guest CR0=%x\n", *(uint_t *)guest_cr0); - - - // We detect if this is a paging transition - if (guest_cr0->pg != new_cr0->pg) { - paging_transition = 1; - } - - // Guest always sees the value they wrote - *guest_cr0 = *new_cr0; - - // This value must always be set to 1 - guest_cr0->et = 1; - - // Set the shadow register to catch non-virtualized flags - *shadow_cr0 = *guest_cr0; - - // Paging is always enabled - shadow_cr0->pg = 1; - - // Was there a paging transition - // Meaning we need to change the page tables - if (paging_transition) { - if (v3_get_mem_mode(info) == VIRTUAL_MEM) { - - struct efer_64 * guest_efer = (struct efer_64 *)&(info->guest_efer); - struct efer_64 * shadow_efer = (struct efer_64 *)&(info->ctrl_regs.efer); - - // Check long mode LME to set LME - if (guest_efer->lme == 1) { - PrintDebug("Enabing Long Mode\n"); - guest_efer->lma = 1; - - shadow_efer->lma = 1; - shadow_efer->lme = 1; - - PrintDebug("New EFER %p\n", (void *)*(addr_t *)(shadow_efer)); - } + // 32 bit registers + struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0); + struct cr0_32 * new_cr0 = (struct cr0_32 *)(dec_instr->src_operand.operand); + struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0); + uint_t paging_transition = 0; + + PrintDebug(info->vm_info, info, "MOV2CR0 (MODE=%s)\n", v3_cpu_mode_to_str(info->cpu_mode)); + + PrintDebug(info->vm_info, info, "OperandVal = %x, length=%d\n", *(uint_t *)new_cr0, dec_instr->src_operand.size); + + PrintDebug(info->vm_info, info, "Old CR0=%x\n", *(uint_t *)shadow_cr0); + PrintDebug(info->vm_info, info, "Old Guest CR0=%x\n", *(uint_t *)guest_cr0); + + + // We detect if this is a paging transition + if (guest_cr0->pg != new_cr0->pg) { + paging_transition = 1; + } + + // Guest always sees the value they wrote + *guest_cr0 = *new_cr0; + + // This value must always be set to 1 + guest_cr0->et = 1; + + // Set the shadow register to catch non-virtualized flags + *shadow_cr0 = *guest_cr0; + + // Paging is always enabled + shadow_cr0->pg = 1; - PrintDebug("Activating Shadow Page Tables\n"); - - if (v3_activate_shadow_pt(info) == -1) { - PrintError("Failed to activate shadow page tables\n"); - return -1; - } - } else { - - if (v3_activate_passthrough_pt(info) == -1) { - PrintError("Failed to activate passthrough page tables\n"); - return -1; - } + if (guest_cr0->pg == 0) { + // If paging is not enabled by the guest, then we always enable write-protect to catch memory hooks + shadow_cr0->wp = 1; } - } - - - PrintDebug("New Guest CR0=%x\n",*(uint_t *)guest_cr0); - PrintDebug("New CR0=%x\n", *(uint_t *)shadow_cr0); - - return 0; + + // Was there a paging transition + // Meaning we need to change the page tables + if (paging_transition) { + if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) { + + struct efer_64 * guest_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer); + struct efer_64 * shadow_efer = (struct efer_64 *)&(info->ctrl_regs.efer); + + // Check long mode LME to set LME + if (guest_efer->lme == 1) { + PrintDebug(info->vm_info, info, "Enabing Long Mode\n"); + guest_efer->lma = 1; + + shadow_efer->lma = 1; + shadow_efer->lme = 1; + + PrintDebug(info->vm_info, info, "New EFER %p\n", (void *)*(addr_t *)(shadow_efer)); + } + + PrintDebug(info->vm_info, info, "Activating Shadow Page Tables\n"); + + if (v3_activate_shadow_pt(info) == -1) { + PrintError(info->vm_info, info, "Failed to activate shadow page tables\n"); + return -1; + } + } else { + + shadow_cr0->wp = 1; + + if (v3_activate_passthrough_pt(info) == -1) { + PrintError(info->vm_info, info, "Failed to activate passthrough page tables\n"); + return -1; + } + } + } + + + PrintDebug(info->vm_info, info, "New Guest CR0=%x\n",*(uint_t *)guest_cr0); + PrintDebug(info->vm_info, info, "New CR0=%x\n", *(uint_t *)shadow_cr0); + + return 0; } static int handle_clts(struct guest_info * info, struct x86_instr * dec_instr) { - // CLTS - struct cr0_32 * real_cr0 = (struct cr0_32*)&(info->ctrl_regs.cr0); - - real_cr0->ts = 0; - - if (info->shdw_pg_mode == SHADOW_PAGING) { - struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0); - guest_cr0->ts = 0; - } - return 0; + // CLTS + struct cr0_32 * real_cr0 = (struct cr0_32*)&(info->ctrl_regs.cr0); + + real_cr0->ts = 0; + + if (info->shdw_pg_mode == SHADOW_PAGING) { + struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0); + guest_cr0->ts = 0; + } + return 0; } static int handle_lmsw(struct guest_info * info, struct x86_instr * dec_instr) { - struct cr0_real * real_cr0 = (struct cr0_real*)&(info->ctrl_regs.cr0); - struct cr0_real * new_cr0 = (struct cr0_real *)(dec_instr->src_operand.operand); - uchar_t new_cr0_val; - - PrintDebug("LMSW\n"); - - new_cr0_val = (*(char*)(new_cr0)) & 0x0f; - - PrintDebug("OperandVal = %x\n", new_cr0_val); - - // We can just copy the new value through - // we don't need to virtualize the lower 4 bits - PrintDebug("Old CR0=%x\n", *(uint_t *)real_cr0); - *(uchar_t*)real_cr0 &= 0xf0; - *(uchar_t*)real_cr0 |= new_cr0_val; - PrintDebug("New CR0=%x\n", *(uint_t *)real_cr0); - - - // If Shadow paging is enabled we push the changes to the virtualized copy of cr0 - if (info->shdw_pg_mode == SHADOW_PAGING) { - struct cr0_real * guest_cr0 = (struct cr0_real*)&(info->shdw_pg_state.guest_cr0); - - PrintDebug("Old Guest CR0=%x\n", *(uint_t *)guest_cr0); - *(uchar_t*)guest_cr0 &= 0xf0; - *(uchar_t*)guest_cr0 |= new_cr0_val; - PrintDebug("New Guest CR0=%x\n", *(uint_t *)guest_cr0); - } - return 0; + struct cr0_real * real_cr0 = (struct cr0_real *)&(info->ctrl_regs.cr0); + // XED is a mess, and basically reverses the operand order for an LMSW + struct cr0_real * new_cr0 = (struct cr0_real *)(dec_instr->dst_operand.operand); + uchar_t new_cr0_val; + + PrintDebug(info->vm_info, info, "LMSW\n"); + + new_cr0_val = (*(char*)(new_cr0)) & 0x0f; + + PrintDebug(info->vm_info, info, "OperandVal = %x\n", new_cr0_val); + + // We can just copy the new value through + // we don't need to virtualize the lower 4 bits + PrintDebug(info->vm_info, info, "Old CR0=%x\n", *(uint_t *)real_cr0); + *(uchar_t*)real_cr0 &= 0xf0; + *(uchar_t*)real_cr0 |= new_cr0_val; + PrintDebug(info->vm_info, info, "New CR0=%x\n", *(uint_t *)real_cr0); + + + // If Shadow paging is enabled we push the changes to the virtualized copy of cr0 + if (info->shdw_pg_mode == SHADOW_PAGING) { + struct cr0_real * guest_cr0 = (struct cr0_real*)&(info->shdw_pg_state.guest_cr0); + + PrintDebug(info->vm_info, info, "Old Guest CR0=%x\n", *(uint_t *)guest_cr0); + *(uchar_t*)guest_cr0 &= 0xf0; + *(uchar_t*)guest_cr0 |= new_cr0_val; + PrintDebug(info->vm_info, info, "New Guest CR0=%x\n", *(uint_t *)guest_cr0); + } + return 0; } @@ -211,57 +225,76 @@ static int handle_lmsw(struct guest_info * info, struct x86_instr * dec_instr) { // First attempt = 253 lines // current = 51 lines int v3_handle_cr0_read(struct guest_info * info) { - uchar_t instr[15]; - int ret; - struct x86_instr dec_instr; - - if (info->mem_mode == PHYSICAL_MEM) { - ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); - } else { - ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); - } - - - if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { - PrintError("Could not decode instruction\n"); - return -1; - } - - if (dec_instr.op_type == V3_OP_MOVCR2) { - struct cr0_32 * dst_reg = (struct cr0_32 *)(dec_instr.dst_operand.operand); - struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0); - - PrintDebug("MOVCR2 (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode)); - - if (info->shdw_pg_mode == SHADOW_PAGING) { - struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0); - *dst_reg = *guest_cr0; - } else { - *dst_reg = *shadow_cr0; + uchar_t instr[15]; + int ret; + struct x86_instr dec_instr; + + if (info->mem_mode == PHYSICAL_MEM) { + ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } else { + ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); } - PrintDebug("Shadow CR0: %x\n", *(uint_t*)shadow_cr0); - PrintDebug("returned CR0: %x\n", *(uint_t*)dst_reg); - } else if (dec_instr.op_type == V3_OP_SMSW) { - struct cr0_real * shadow_cr0 = (struct cr0_real *)&(info->ctrl_regs.cr0); - struct cr0_real * dst_reg = (struct cr0_real *)(dec_instr.dst_operand.operand); - char cr0_val = *(char*)shadow_cr0 & 0x0f; + if (ret!=15) { + PrintError(info->vm_info, info, "Could not read instruction\n"); + return -1; + } - PrintDebug("SMSW\n"); - - // The lower 4 bits of the guest/shadow CR0 are mapped through - // We can treat nested and shadow paging the same here - *(char *)dst_reg &= 0xf0; - *(char *)dst_reg |= cr0_val; + if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { + PrintError(info->vm_info, info, "Could not decode instruction\n"); + return -1; + } - } else { - PrintError("Unhandled opcode in handle_cr0_read\n"); - return -1; - } + if (dec_instr.op_type == V3_OP_MOVCR2) { + PrintDebug(info->vm_info, info, "MOVCR2 (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode)); + + if ((v3_get_vm_cpu_mode(info) == LONG) || + (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) { + struct cr0_64 * dst_reg = (struct cr0_64 *)(dec_instr.dst_operand.operand); + + if (info->shdw_pg_mode == SHADOW_PAGING) { + struct cr0_64 * guest_cr0 = (struct cr0_64 *)&(info->shdw_pg_state.guest_cr0); + *dst_reg = *guest_cr0; + } else { + struct cr0_64 * shadow_cr0 = (struct cr0_64 *)&(info->ctrl_regs.cr0); + *dst_reg = *shadow_cr0; + } + + PrintDebug(info->vm_info, info, "returned CR0: %p\n", (void *)*(addr_t *)dst_reg); + } else { + struct cr0_32 * dst_reg = (struct cr0_32 *)(dec_instr.dst_operand.operand); + + if (info->shdw_pg_mode == SHADOW_PAGING) { + struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0); + *dst_reg = *guest_cr0; + } else { + struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0); + *dst_reg = *shadow_cr0; + } + + PrintDebug(info->vm_info, info, "returned CR0: %x\n", *(uint_t*)dst_reg); + } - info->rip += dec_instr.instr_length; + } else if (dec_instr.op_type == V3_OP_SMSW) { + struct cr0_real * shadow_cr0 = (struct cr0_real *)&(info->ctrl_regs.cr0); + struct cr0_real * dst_reg = (struct cr0_real *)(dec_instr.dst_operand.operand); + char cr0_val = *(char*)shadow_cr0 & 0x0f; + + PrintDebug(info->vm_info, info, "SMSW\n"); + + // The lower 4 bits of the guest/shadow CR0 are mapped through + // We can treat nested and shadow paging the same here + *(char *)dst_reg &= 0xf0; + *(char *)dst_reg |= cr0_val; + + } else { + PrintError(info->vm_info, info, "Unhandled opcode in handle_cr0_read\n"); + return -1; + } + + info->rip += dec_instr.instr_length; - return 0; + return 0; } @@ -270,75 +303,81 @@ int v3_handle_cr0_read(struct guest_info * info) { // First Attempt = 256 lines // current = 65 lines int v3_handle_cr3_write(struct guest_info * info) { - int ret; - uchar_t instr[15]; - struct x86_instr dec_instr; - - if (info->mem_mode == PHYSICAL_MEM) { - ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); - } else { - ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); - } - - if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { - PrintError("Could not decode instruction\n"); - return -1; - } - - if (dec_instr.op_type == V3_OP_MOV2CR) { - PrintDebug("MOV2CR3 (cpu_mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode)); - - if (info->shdw_pg_mode == SHADOW_PAGING) { - PrintDebug("Old Shadow CR3=%p; Old Guest CR3=%p\n", - (void *)(addr_t)(info->ctrl_regs.cr3), - (void*)(addr_t)(info->shdw_pg_state.guest_cr3)); - + int ret; + uchar_t instr[15]; + struct x86_instr dec_instr; - // We update the guest CR3 - if (info->cpu_mode == LONG) { - struct cr3_64 * new_cr3 = (struct cr3_64 *)(dec_instr.src_operand.operand); - struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3); - *guest_cr3 = *new_cr3; - } else { - struct cr3_32 * new_cr3 = (struct cr3_32 *)(dec_instr.src_operand.operand); - struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3); - *guest_cr3 = *new_cr3; - } - - // If Paging is enabled in the guest then we need to change the shadow page tables - if (info->mem_mode == VIRTUAL_MEM) { - if (v3_activate_shadow_pt(info) == -1) { - PrintError("Failed to activate 32 bit shadow page table\n"); - return -1; + if (info->mem_mode == PHYSICAL_MEM) { + ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } else { + ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } + + if (ret!=15) { + PrintError(info->vm_info, info, "Could not read instruction\n"); + return -1; + } + + if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { + PrintError(info->vm_info, info, "Could not decode instruction\n"); + return -1; + } + + if (dec_instr.op_type == V3_OP_MOV2CR) { + PrintDebug(info->vm_info, info, "MOV2CR3 (cpu_mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode)); + + if (info->shdw_pg_mode == SHADOW_PAGING) { + PrintDebug(info->vm_info, info, "Old Shadow CR3=%p; Old Guest CR3=%p\n", + (void *)(addr_t)(info->ctrl_regs.cr3), + (void*)(addr_t)(info->shdw_pg_state.guest_cr3)); + + + // We update the guest CR3 + if (info->cpu_mode == LONG) { + struct cr3_64 * new_cr3 = (struct cr3_64 *)(dec_instr.src_operand.operand); + struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3); + *guest_cr3 = *new_cr3; + } else { + struct cr3_32 * new_cr3 = (struct cr3_32 *)(dec_instr.src_operand.operand); + struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3); + *guest_cr3 = *new_cr3; + } + + + // If Paging is enabled in the guest then we need to change the shadow page tables + if (info->mem_mode == VIRTUAL_MEM) { + if (v3_activate_shadow_pt(info) == -1) { + PrintError(info->vm_info, info, "Failed to activate 32 bit shadow page table\n"); + return -1; + } + } + + PrintDebug(info->vm_info, info, "New Shadow CR3=%p; New Guest CR3=%p\n", + (void *)(addr_t)(info->ctrl_regs.cr3), + (void*)(addr_t)(info->shdw_pg_state.guest_cr3)); + + } else if (info->shdw_pg_mode == NESTED_PAGING) { + + // This is just a passthrough operation which we probably don't need here + if (info->cpu_mode == LONG) { + struct cr3_64 * new_cr3 = (struct cr3_64 *)(dec_instr.src_operand.operand); + struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3); + *guest_cr3 = *new_cr3; + } else { + struct cr3_32 * new_cr3 = (struct cr3_32 *)(dec_instr.src_operand.operand); + struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3); + *guest_cr3 = *new_cr3; + } + } - } - - PrintDebug("New Shadow CR3=%p; New Guest CR3=%p\n", - (void *)(addr_t)(info->ctrl_regs.cr3), - (void*)(addr_t)(info->shdw_pg_state.guest_cr3)); - - } else if (info->shdw_pg_mode == NESTED_PAGING) { - - // This is just a passthrough operation which we probably don't need here - if (info->cpu_mode == LONG) { - struct cr3_64 * new_cr3 = (struct cr3_64 *)(dec_instr.src_operand.operand); - struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3); - *guest_cr3 = *new_cr3; - } else { - struct cr3_32 * new_cr3 = (struct cr3_32 *)(dec_instr.src_operand.operand); - struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3); - *guest_cr3 = *new_cr3; - } - + } else { + PrintError(info->vm_info, info, "Unhandled opcode in handle_cr3_write\n"); + return -1; } - } else { - PrintError("Unhandled opcode in handle_cr3_write\n"); - return -1; - } - - info->rip += dec_instr.instr_length; - - return 0; + + info->rip += dec_instr.instr_length; + + return 0; } @@ -346,221 +385,449 @@ int v3_handle_cr3_write(struct guest_info * info) { // first attempt = 156 lines // current = 36 lines int v3_handle_cr3_read(struct guest_info * info) { - uchar_t instr[15]; - int ret; - struct x86_instr dec_instr; + uchar_t instr[15]; + int ret; + struct x86_instr dec_instr; + + if (info->mem_mode == PHYSICAL_MEM) { + ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } else { + ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } + + if (ret!=15) { + PrintError(info->vm_info, info, "Could not read instruction\n"); + return -1; + } + + if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { + PrintError(info->vm_info, info, "Could not decode instruction\n"); + return -1; + } + + if (dec_instr.op_type == V3_OP_MOVCR2) { + PrintDebug(info->vm_info, info, "MOVCR32 (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode)); + + if (info->shdw_pg_mode == SHADOW_PAGING) { + + if ((v3_get_vm_cpu_mode(info) == LONG) || + (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) { + struct cr3_64 * dst_reg = (struct cr3_64 *)(dec_instr.dst_operand.operand); + struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3); + *dst_reg = *guest_cr3; + } else { + struct cr3_32 * dst_reg = (struct cr3_32 *)(dec_instr.dst_operand.operand); + struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3); + *dst_reg = *guest_cr3; + } + + } else if (info->shdw_pg_mode == NESTED_PAGING) { + + // This is just a passthrough operation which we probably don't need here + if ((v3_get_vm_cpu_mode(info) == LONG) || + (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) { + struct cr3_64 * dst_reg = (struct cr3_64 *)(dec_instr.dst_operand.operand); + struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3); + *dst_reg = *guest_cr3; + } else { + struct cr3_32 * dst_reg = (struct cr3_32 *)(dec_instr.dst_operand.operand); + struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3); + *dst_reg = *guest_cr3; + } + } + + } else { + PrintError(info->vm_info, info, "Unhandled opcode in handle_cr3_read\n"); + return -1; + } + + info->rip += dec_instr.instr_length; + + return 0; +} - if (info->mem_mode == PHYSICAL_MEM) { - ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); - } else { - ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); - } - if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { - PrintError("Could not decode instruction\n"); - return -1; - } +//return guest cr4 - shadow PAE is always on +int v3_handle_cr4_read(struct guest_info * info) { + uchar_t instr[15]; + int ret; + struct x86_instr dec_instr; + + if (info->mem_mode == PHYSICAL_MEM) { + ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } else { + ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } + + if (ret!=15) { + PrintError(info->vm_info, info, "Could not read instruction\n"); + return -1; + } + + if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { + PrintError(info->vm_info, info, "Could not decode instruction\n"); + return -1; + } + if (dec_instr.op_type != V3_OP_MOVCR2) { + PrintError(info->vm_info, info, "Invalid opcode in read CR4\n"); + return -1; + } + + if (info->shdw_pg_mode == SHADOW_PAGING) { + + if ((v3_get_vm_cpu_mode(info) == LONG) || + (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) { + struct cr4_64 * dst_reg = (struct cr4_64 *)(dec_instr.dst_operand.operand); + struct cr4_64 * guest_cr4 = (struct cr4_64 *)&(info->ctrl_regs.cr4); + *dst_reg = *guest_cr4; + } + else { + struct cr4_32 * dst_reg = (struct cr4_32 *)(dec_instr.dst_operand.operand); + struct cr4_32 * guest_cr4 = (struct cr4_32 *)&(info->shdw_pg_state.guest_cr4); + *dst_reg = *guest_cr4; + } + + } else if (info->shdw_pg_mode == NESTED_PAGING) { + + + if ((v3_get_vm_cpu_mode(info) == LONG) || + (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) { + struct cr4_64 * dst_reg = (struct cr4_64 *)(dec_instr.dst_operand.operand); + struct cr4_64 * guest_cr4 = (struct cr4_64 *)&(info->ctrl_regs.cr4); + *dst_reg = *guest_cr4; + } else { + struct cr4_32 * dst_reg = (struct cr4_32 *)(dec_instr.dst_operand.operand); + struct cr4_32 * guest_cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4); + *dst_reg = *guest_cr4; + } + } + + info->rip += dec_instr.instr_length; + return 0; +} - if (dec_instr.op_type == V3_OP_MOVCR2) { - PrintDebug("MOVCR32 (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode)); - if (info->shdw_pg_mode == SHADOW_PAGING) { +int v3_handle_cr4_write(struct guest_info * info) { + uchar_t instr[15]; + int ret; + int flush_tlb=0; + struct x86_instr dec_instr; + v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(info); + + if (info->mem_mode == PHYSICAL_MEM) { + ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } else { + ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } + + if (ret!=15) { + PrintError(info->vm_info, info, "Could not read instruction\n"); + return -1; + } + - if ((v3_get_cpu_mode(info) == LONG) || - (v3_get_cpu_mode(info) == LONG_32_COMPAT)) { - struct cr3_64 * dst_reg = (struct cr3_64 *)(dec_instr.dst_operand.operand); - struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3); - *dst_reg = *guest_cr3; - } else { - struct cr3_32 * dst_reg = (struct cr3_32 *)(dec_instr.dst_operand.operand); - struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3); - *dst_reg = *guest_cr3; - } + if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { + PrintError(info->vm_info, info, "Could not decode instruction\n"); + return -1; + } + + if (dec_instr.op_type != V3_OP_MOV2CR) { + PrintError(info->vm_info, info, "Invalid opcode in write to CR4\n"); + return -1; + } + + // Check to see if we need to flush the tlb + - } else if (info->shdw_pg_mode == NESTED_PAGING) { + if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) { + struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand); + struct cr4_32 * cr4 = (struct cr4_32 *)&(info->shdw_pg_state.guest_cr4); + + // if pse, pge, or pae have changed while PG (in any mode) is on + // the side effect is a TLB flush, which means we need to + // toss the current shadow page tables too + // + // + // TODO - PAE FLAG needs to be special cased + if ((cr4->pse != new_cr4->pse) || + (cr4->pge != new_cr4->pge) || + (cr4->pae != new_cr4->pae)) { + PrintDebug(info->vm_info, info, "Handling PSE/PGE/PAE -> TLBFlush case, flag set\n"); + flush_tlb = 1; + + } + } + - // This is just a passthrough operation which we probably don't need here - if ((v3_get_cpu_mode(info) == LONG) || - (v3_get_cpu_mode(info) == LONG_32_COMPAT)) { - struct cr3_64 * dst_reg = (struct cr3_64 *)(dec_instr.dst_operand.operand); - struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3); - *dst_reg = *guest_cr3; - } else { - struct cr3_32 * dst_reg = (struct cr3_32 *)(dec_instr.dst_operand.operand); - struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3); - *dst_reg = *guest_cr3; - } + if ((cpu_mode == PROTECTED) || (cpu_mode == PROTECTED_PAE)) { + struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand); + struct cr4_32 * shadow_cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4); + struct cr4_32 * guest_cr4 = (struct cr4_32 *)&(info->shdw_pg_state.guest_cr4); + PrintDebug(info->vm_info, info, "OperandVal = %x, length = %d\n", *(uint_t *)new_cr4, dec_instr.src_operand.size); + PrintDebug(info->vm_info, info, "Old guest CR4=%x\n", *(uint_t *)guest_cr4); + + if ((info->shdw_pg_mode == SHADOW_PAGING)) { + if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) { + + if ((guest_cr4->pae == 0) && (new_cr4->pae == 1)) { + PrintDebug(info->vm_info, info, "Creating PAE passthrough tables\n"); + + // create 32 bit PAE direct map page table + if (v3_reset_passthrough_pts(info) == -1) { + PrintError(info->vm_info, info, "Could not create 32 bit PAE passthrough pages tables\n"); + return -1; + } + + // reset cr3 to new page tables + info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt); + + } else if ((guest_cr4->pae == 1) && (new_cr4->pae == 0)) { + // Create passthrough standard 32bit pagetables + PrintError(info->vm_info, info, "Switching From PAE to Protected mode not supported\n"); + return -1; + } + } + } + + *guest_cr4 = *new_cr4; + *shadow_cr4 = *guest_cr4; + shadow_cr4->pae = 1; // always on for the shadow pager + PrintDebug(info->vm_info, info, "New guest CR4=%x and shadow CR4=%x\n", *(uint_t *)guest_cr4,*(uint_t*)shadow_cr4); + + } else if ((cpu_mode == LONG) || (cpu_mode == LONG_32_COMPAT)) { + struct cr4_64 * new_cr4 = (struct cr4_64 *)(dec_instr.src_operand.operand); + struct cr4_64 * cr4 = (struct cr4_64 *)&(info->ctrl_regs.cr4); + + PrintDebug(info->vm_info, info, "Old CR4=%p\n", (void *)*(addr_t *)cr4); + PrintDebug(info->vm_info, info, "New CR4=%p\n", (void *)*(addr_t *)new_cr4); + + if (new_cr4->pae == 0) { + // cannot turn off PAE in long mode GPF the guest + PrintError(info->vm_info, info, "Cannot disable PAE in long mode, should send GPF\n"); + return -1; + } + + *cr4 = *new_cr4; + + } else { + PrintError(info->vm_info, info, "CR4 write not supported in CPU_MODE: %s\n", v3_cpu_mode_to_str(cpu_mode)); + return -1; } + + if (info->shdw_pg_mode == SHADOW_PAGING) { + if (flush_tlb) { + PrintDebug(info->vm_info, info, "Handling PSE/PGE/PAE -> TLBFlush (doing flush now!)\n"); + if (v3_activate_shadow_pt(info) == -1) { + PrintError(info->vm_info, info, "Failed to activate shadow page tables when emulating TLB flush in handling cr4 write\n"); + return -1; + } + } + } + + info->rip += dec_instr.instr_length; + return 0; +} - } else { - PrintError("Unhandled opcode in handle_cr3_read\n"); - return -1; - } - info->rip += dec_instr.instr_length; +/* + The CR8 and APIC TPR interaction are kind of crazy. - return 0; -} + CR8 mandates that the priority class is in bits 3:0 + The interaction of CR8 and an actual APIC is somewhat implementation dependent, but + a basic current APIC has the priority class at 7:4 and the *subclass* at 3:0 -// We don't need to virtualize CR4, all we need is to detect the activation of PAE -int v3_handle_cr4_read(struct guest_info * info) { - // PrintError("CR4 Read not handled\n"); - // Do nothing... - return 0; -} + The APIC TPR (both fields) can be written as the APIC register + A write to CR8 sets the priority class field, and should zero the subclass + A read from CR8 gets just the priority class field -int v3_handle_cr4_write(struct guest_info * info) { - uchar_t instr[15]; - int ret; - struct x86_instr dec_instr; - v3_vm_cpu_mode_t cpu_mode = v3_get_cpu_mode(info); + In the apic_tpr storage location, we have: - if (info->mem_mode == PHYSICAL_MEM) { - ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); - } else { - ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); - } + zeros [class] [subclass] - if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { - PrintError("Could not decode instruction\n"); - return -1; - } + Because of this, an APIC implementation should use apic_tpr to store its TPR + In fact, it *should* do this, otherwise its TPR may get out of sync with the architected TPR - if (dec_instr.op_type != V3_OP_MOV2CR) { - PrintError("Invalid opcode in write to CR4\n"); - return -1; - } + On a CR8 read, we return just - if ((cpu_mode == PROTECTED) || (cpu_mode == PROTECTED_PAE)) { - struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand); - struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4); - - PrintDebug("OperandVal = %x, length = %d\n", *(uint_t *)new_cr4, dec_instr.src_operand.size); - PrintDebug("Old CR4=%x\n", *(uint_t *)cr4); + zeros 0000 [class] + + On a CR8 write, we set the register to - if ((info->shdw_pg_mode == SHADOW_PAGING) && - (v3_get_mem_mode(info) == PHYSICAL_MEM)) { + zeros [class] 0000 - if ((cr4->pae == 0) && (new_cr4->pae == 1)) { - PrintDebug("Creating PAE passthrough tables\n"); +*/ - // Delete the old 32 bit direct map page tables - delete_page_tables_32((pde32_t *)V3_VAddr((void *)(info->direct_map_pt))); +int v3_handle_cr8_write(struct guest_info * info) { + int ret; + uchar_t instr[15]; + struct x86_instr dec_instr; + + if (info->mem_mode == PHYSICAL_MEM) { + ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } else { + ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } + + if (ret!=15) { + PrintError(info->vm_info, info, "Could not read instruction\n"); + return -1; + } + + if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { + PrintError(info->vm_info, info, "Could not decode instruction\n"); + return -1; + } + + if (dec_instr.op_type == V3_OP_MOV2CR) { + PrintDebug(info->vm_info, info, "MOV2CR8 (cpu_mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode)); + + if ((info->cpu_mode == LONG) || + (info->cpu_mode == LONG_32_COMPAT)) { + uint64_t *val = (uint64_t *)(dec_instr.src_operand.operand); - // create 32 bit PAE direct map page table - info->direct_map_pt = (addr_t)V3_PAddr(create_passthrough_pts_32PAE(info)); + info->ctrl_regs.apic_tpr = (*val & 0xf) << 4; - // reset cr3 to new page tables - info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt); + V3_Print(info->vm_info, info, "Write of CR8 sets apic_tpr to 0x%llx\n",info->ctrl_regs.apic_tpr); - } else if ((cr4->pae == 1) && (new_cr4->pae == 0)) { - // Create passthrough standard 32bit pagetables + } else { + // probably should raise exception here + } + } else { + PrintError(info->vm_info, info, "Unhandled opcode in handle_cr8_write\n"); return -1; - } } + + info->rip += dec_instr.instr_length; + + return 0; +} - *cr4 = *new_cr4; - PrintDebug("New CR4=%x\n", *(uint_t *)cr4); - - } else if ((cpu_mode == LONG) || (cpu_mode == LONG_32_COMPAT)) { - struct cr4_64 * new_cr4 = (struct cr4_64 *)(dec_instr.src_operand.operand); - struct cr4_64 * cr4 = (struct cr4_64 *)&(info->ctrl_regs.cr4); - PrintDebug("Old CR4=%p\n", (void *)*(addr_t *)cr4); - PrintDebug("New CR4=%p\n", (void *)*(addr_t *)new_cr4); - if (new_cr4->pae == 0) { - // cannot turn off PAE in long mode GPF the guest - PrintError("Cannot disable PAE in long mode, sending GPF\n"); - return -1; +int v3_handle_cr8_read(struct guest_info * info) { + uchar_t instr[15]; + int ret; + struct x86_instr dec_instr; + + if (info->mem_mode == PHYSICAL_MEM) { + ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } else { + ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); } + + if (ret!=15) { + PrintError(info->vm_info, info, "Could not read instruction\n"); + return -1; + } + + if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { + PrintError(info->vm_info, info, "Could not decode instruction\n"); + return -1; + } + + if (dec_instr.op_type == V3_OP_MOVCR2) { + PrintDebug(info->vm_info, info, "MOVCR82 (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode)); + + if ((info->cpu_mode == LONG) || + (info->cpu_mode == LONG_32_COMPAT)) { + uint64_t *dst_reg = (uint64_t *)(dec_instr.dst_operand.operand); - *cr4 = *new_cr4; + *dst_reg = (info->ctrl_regs.apic_tpr >> 4) & 0xf; - } else { - PrintError("CR4 write not supported in CPU_MODE: %s\n", v3_cpu_mode_to_str(cpu_mode)); - return -1; - } + V3_Print(info->vm_info, info, "Read of CR8 (apic_tpr) returns 0x%llx\n",*dst_reg); - info->rip += dec_instr.instr_length; - return 0; + } else { + // probably should raise exception + } + + } else { + PrintError(info->vm_info, info, "Unhandled opcode in handle_cr8_read\n"); + return -1; + } + + info->rip += dec_instr.instr_length; + + return 0; } -int v3_handle_efer_read(uint_t msr, struct v3_msr * dst, void * priv_data) { - struct guest_info * info = (struct guest_info *)(priv_data); - PrintDebug("EFER Read HI=%x LO=%x\n", info->guest_efer.hi, info->guest_efer.lo); +int v3_handle_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * dst, void * priv_data) { + PrintDebug(core->vm_info, core, "EFER Read HI=%x LO=%x\n", core->shdw_pg_state.guest_efer.hi, core->shdw_pg_state.guest_efer.lo); + + dst->value = core->shdw_pg_state.guest_efer.value; + + return 0; +} - dst->value = info->guest_efer.value; - info->rip += 2; // WRMSR/RDMSR are two byte operands - return 0; -} +int v3_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) { + struct v3_msr * vm_efer = &(core->shdw_pg_state.guest_efer); + struct efer_64 * hw_efer = (struct efer_64 *)&(core->ctrl_regs.efer); + struct efer_64 old_hw_efer = *((struct efer_64 *)&core->ctrl_regs.efer); + + PrintDebug(core->vm_info, core, "EFER Write HI=%x LO=%x\n", src.hi, src.lo); + + // Set EFER value seen by guest if it reads EFER + vm_efer->value = src.value; + + // Set EFER value seen by hardware while the guest is running + *(uint64_t *)hw_efer = src.value; + + // We have gotten here either because we are using + // shadow paging, or we are using nested paging on SVM + // In the latter case, we don't need to do anything + // like the following + if (core->shdw_pg_mode == SHADOW_PAGING) { + // Catch unsupported features + if ((old_hw_efer.lme == 1) && (hw_efer->lme == 0)) { + PrintError(core->vm_info, core, "Disabling long mode once it has been enabled is not supported\n"); + return -1; + } + + // Set LME and LMA bits seen by hardware + if (old_hw_efer.lme == 0) { + // Long mode was not previously enabled, so the lme bit cannot + // be set yet. It will be set later when the guest sets CR0.PG + // to enable paging. + hw_efer->lme = 0; + } else { + // Long mode was previously enabled. Ensure LMA bit is set. + // VMX does not automatically set LMA, and this should not affect SVM. + hw_efer->lma = 1; + } + } + + + PrintDebug(core->vm_info, core, "RIP=%p\n", (void *)core->rip); + PrintDebug(core->vm_info, core, "New EFER value HW(hi=%p), VM(hi=%p)\n", (void *)*(uint64_t *)hw_efer, (void *)vm_efer->value); -int v3_handle_efer_write(uint_t msr, struct v3_msr src, void * priv_data) { - struct guest_info * info = (struct guest_info *)(priv_data); - //struct efer_64 * new_efer = (struct efer_64 *)&(src.value); - // struct efer_64 * shadow_efer = (struct efer_64 *)&(info->ctrl_regs.efer); - struct v3_msr * guest_efer = &(info->guest_efer); + return 0; +} - PrintDebug("EFER Write\n"); - PrintDebug("EFER Write Values: HI=%x LO=%x\n", src.hi, src.lo); - //PrintDebug("Old EFER=%p\n", (void *)*(addr_t*)(shadow_efer)); +int v3_handle_vm_cr_read(struct guest_info * core, uint_t msr, struct v3_msr * dst, void * priv_data) { + /* tell the guest that the BIOS disabled SVM, that way it doesn't get + * confused by the fact that CPUID reports SVM as available but it still + * cannot be used + */ + dst->value = SVM_VM_CR_MSR_lock | SVM_VM_CR_MSR_svmdis; + PrintDebug(core->vm_info, core, "VM_CR Read HI=%x LO=%x\n", dst->hi, dst->lo); + return 0; +} - // We virtualize the guests efer to hide the SVME and LMA bits - guest_efer->value = src.value; +int v3_handle_vm_cr_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) { + PrintDebug(core->vm_info, core, "VM_CR Write\n"); + PrintDebug(core->vm_info, core, "VM_CR Write Values: HI=%x LO=%x\n", src.hi, src.lo); - - // We have to handle long mode writes.... - - /* - if ((info->shdw_pg_mode == SHADOW_PAGING) && - (v3_get_mem_mode(info) == PHYSICAL_MEM)) { - - if ((shadow_efer->lme == 0) && (new_efer->lme == 1)) { - PrintDebug("Transition to longmode\n"); - PrintDebug("Creating Passthrough 64 bit page tables\n"); - - // Delete the old 32 bit direct map page tables - - PrintDebug("Deleting old PAE Page tables\n"); - PrintError("JRL BUG?: Will the old page tables always be in PAE format??\n"); - delete_page_tables_32PAE((pdpe32pae_t *)V3_VAddr((void *)(info->direct_map_pt))); - - // create 64 bit direct map page table - info->direct_map_pt = (addr_t)V3_PAddr(create_passthrough_pts_64(info)); - - // reset cr3 to new page tables - info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt); - - // We mark the Long Mode active because we have paging enabled - // We do this in new_efer because we copy the msr in full below - // new_efer->lma = 1; - - } else if ((shadow_efer->lme == 1) && (new_efer->lme == 0)) { - // transition out of long mode - //((struct efer_64 *)&(info->guest_efer.value))->lme = 0; - //((struct efer_64 *)&(info->guest_efer.value))->lma = 0; - - return -1; - } - - // accept all changes to the efer, but make sure that the SVME bit is set... (SVM specific) - *shadow_efer = *new_efer; - shadow_efer->svme = 1; - - - - PrintDebug("New EFER=%p\n", (void *)*(addr_t *)(shadow_efer)); - } else { - PrintError("Write to EFER in NESTED_PAGING or VIRTUAL_MEM mode not supported\n"); - // Should probably just check for a long mode transition, and bomb out if it is - return -1; - } - */ - info->rip += 2; // WRMSR/RDMSR are two byte operands - - return 0; + /* writes to LOCK and SVMDIS are silently ignored (according to the spec), + * other writes indicate the guest wants to use some feature we haven't + * implemented + */ + if (src.value & ~(SVM_VM_CR_MSR_lock | SVM_VM_CR_MSR_svmdis)) { + PrintDebug(core->vm_info, core, "VM_CR write sets unsupported bits: HI=%x LO=%x\n", src.hi, src.lo); + return -1; + } + + return 0; }