From: Jack Lange Date: Tue, 28 Oct 2008 23:54:50 +0000 (-0500) Subject: added more 64 bit support, X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?p=palacios.git;a=commitdiff_plain;h=b81af0a653e471ba2aa240c5135bd113467587ca added more 64 bit support, moved around the control register handlers to segregate the different CPU modes NOTE: They currently work, but they are wrong... --- diff --git a/palacios/include/palacios/vm_guest.h b/palacios/include/palacios/vm_guest.h index ea2252d..10a95aa 100644 --- a/palacios/include/palacios/vm_guest.h +++ b/palacios/include/palacios/vm_guest.h @@ -35,7 +35,6 @@ - struct v3_gprs { v3_reg_t rdi; v3_reg_t rsi; @@ -148,6 +147,9 @@ struct guest_info { v3_vm_operating_mode_t run_state; void * vmm_data; + + struct v3_msr guest_efer; + /* TEMP */ //ullong_t exit_tsc; diff --git a/palacios/include/palacios/vmm_decoder.h b/palacios/include/palacios/vmm_decoder.h index df10150..cfe5ff7 100644 --- a/palacios/include/palacios/vmm_decoder.h +++ b/palacios/include/palacios/vmm_decoder.h @@ -263,6 +263,7 @@ static inline addr_t get_addr_linear(struct guest_info * info, addr_t addr, stru case PROTECTED: case PROTECTED_PAE: + case LONG_32_COMPAT: return addr + seg->base; break; @@ -270,10 +271,10 @@ static inline addr_t get_addr_linear(struct guest_info * info, addr_t addr, stru // In long mode the segment bases are disregarded (forced to 0), unless using // FS or GS, then the base addresses are added return addr + seg->base; - case LONG_32_COMPAT: + case LONG_16_COMPAT: default: - PrintError("Unsupported Address Mode\n"); + PrintError("Unsupported CPU Mode: %d\n", info->cpu_mode); return -1; } } diff --git a/palacios/include/palacios/vmm_paging.h b/palacios/include/palacios/vmm_paging.h index e400554..d59b7f9 100644 --- a/palacios/include/palacios/vmm_paging.h +++ b/palacios/include/palacios/vmm_paging.h @@ -163,9 +163,9 @@ the host state in the vmcs before entering the guest. -#define CR3_TO_PDE32(cr3) ((pde32_t *)V3_VAddr((void *)(((ulong_t)cr3) & 0xfffff000))) +#define CR3_TO_PDE32(cr3) ((pde32_t *)V3_VAddr((void *)(addr_t)(((ulong_t)cr3) & 0xfffff000))) #define CR3_TO_PDPTRE(cr3) (V3_VAddr((void *)(((ulong_t)cr3) & 0xffffffe0))) -#define CR3_TO_PML4E64(cr3) ((pml4e64_t *)V3_VAddr((void *)(((ullong_t)cr3) & 0x000ffffffffff000LL))) +#define CR3_TO_PML4E64(cr3) ((pml4e64_t *)V3_VAddr((void *)(addr_t)(((ullong_t)cr3) & 0x000ffffffffff000LL))) diff --git a/palacios/src/palacios/svm.c b/palacios/src/palacios/svm.c index ee2c4fd..f60129f 100644 --- a/palacios/src/palacios/svm.c +++ b/palacios/src/palacios/svm.c @@ -90,6 +90,7 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) { */ guest_state->efer |= EFER_MSR_svm_enable; + vm_info->guest_efer.value = 0x0LL; v3_hook_msr(vm_info, EFER_MSR, NULL, /*&v3_handle_efer_read,*/ @@ -147,6 +148,7 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) { guest_state->cr0 = 0x60000010; + guest_state->cs.selector = 0xf000; guest_state->cs.limit=0xffff; guest_state->cs.base = 0x0000000f0000LL; diff --git a/palacios/src/palacios/vmm_ctrl_regs.c b/palacios/src/palacios/vmm_ctrl_regs.c index b3d3356..afdd374 100644 --- a/palacios/src/palacios/vmm_ctrl_regs.c +++ b/palacios/src/palacios/vmm_ctrl_regs.c @@ -39,6 +39,22 @@ #endif +static int handle_lmsw(struct guest_info * info, struct x86_instr * dec_instr); +static int handle_clts(struct guest_info * info, struct x86_instr * dec_instr); + +static int handle_mov_to_cr0(struct guest_info * info, struct x86_instr * dec_instr); +static int handle_mov_to_cr0_32(struct guest_info * info, struct x86_instr * dec_instr); +static int handle_mov_to_cr0_32pae(struct guest_info * info, struct x86_instr * dec_instr); +static int handle_mov_to_cr0_64(struct guest_info * info, struct x86_instr * dec_instr); +static int handle_mov_to_cr0_64compat(struct guest_info * info, struct x86_instr * dec_instr); + +static int handle_mov_to_cr3_32(struct guest_info * info, struct x86_instr * dec_instr); +static int handle_mov_to_cr3_32pae(struct guest_info * info, struct x86_instr * dec_instr); +static int handle_mov_to_cr3_64(struct guest_info * info, struct x86_instr * dec_instr); +static int handle_mov_to_cr3_64compat(struct guest_info * info, struct x86_instr * dec_instr); + + + // First Attempt = 494 lines // current = 106 lines int v3_handle_cr0_write(struct guest_info * info) { @@ -65,87 +81,24 @@ int v3_handle_cr0_write(struct guest_info * info) { return -1; } - if (v3_opcode_cmp(V3_OPCODE_LMSW, (const uchar_t *)(dec_instr.opcode)) == 0) { - struct cr0_real *real_cr0 = (struct cr0_real*)&(info->ctrl_regs.cr0); - struct cr0_real *new_cr0 = (struct cr0_real *)(dec_instr.src_operand.operand); - uchar_t new_cr0_val; - - PrintDebug("LMSW\n"); - - new_cr0_val = (*(char*)(new_cr0)) & 0x0f; - - PrintDebug("OperandVal = %x\n", new_cr0_val); - - PrintDebug("Old CR0=%x\n", *(uint_t *)real_cr0); - *(uchar_t*)real_cr0 &= 0xf0; - *(uchar_t*)real_cr0 |= new_cr0_val; - PrintDebug("New CR0=%x\n", *(uint_t *)real_cr0); - - if (info->shdw_pg_mode == SHADOW_PAGING) { - struct cr0_real * shadow_cr0 = (struct cr0_real*)&(info->shdw_pg_state.guest_cr0); - - PrintDebug(" Old Shadow CR0=%x\n", *(uint_t *)shadow_cr0); - *(uchar_t*)shadow_cr0 &= 0xf0; - *(uchar_t*)shadow_cr0 |= new_cr0_val; - PrintDebug("New Shadow CR0=%x\n", *(uint_t *)shadow_cr0); + if (handle_lmsw(info, &dec_instr) == -1) { + return -1; } - } else if (v3_opcode_cmp(V3_OPCODE_MOV2CR, (const uchar_t *)(dec_instr.opcode)) == 0) { - PrintDebug("MOV2CR0\n"); - if (info->cpu_mode == LONG) { - // 64 bit registers - // Set efer.lma = 1 + } else if (v3_opcode_cmp(V3_OPCODE_MOV2CR, (const uchar_t *)(dec_instr.opcode)) == 0) { - PrintError("Long mode currently not handled\n"); + if (handle_mov_to_cr0(info, &dec_instr) == -1) { return -1; - } else { - // 32 bit registers - struct cr0_32 *real_cr0 = (struct cr0_32*)&(info->ctrl_regs.cr0); - struct cr0_32 *new_cr0= (struct cr0_32 *)(dec_instr.src_operand.operand); - - PrintDebug("OperandVal = %x, length=%d\n", *(uint_t *)new_cr0, dec_instr.src_operand.size); - - - PrintDebug("Old CR0=%x\n", *(uint_t *)real_cr0); - *real_cr0 = *new_cr0; - - - if (info->shdw_pg_mode == SHADOW_PAGING) { - struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0); - - PrintDebug("Old Shadow CR0=%x\n", *(uint_t *)shadow_cr0); - - real_cr0->et = 1; - - *shadow_cr0 = *new_cr0; - shadow_cr0->et = 1; - - if (v3_get_mem_mode(info) == VIRTUAL_MEM) { - struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.shadow_cr3); - PrintDebug("Setting up Shadow Page Table\n"); - info->ctrl_regs.cr3 = *(addr_t*)shadow_cr3; - } else { - info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt); - real_cr0->pg = 1; - } - - PrintDebug("New Shadow CR0=%x\n",*(uint_t *)shadow_cr0); - } - PrintDebug("New CR0=%x\n", *(uint_t *)real_cr0); } } else if (v3_opcode_cmp(V3_OPCODE_CLTS, (const uchar_t *)(dec_instr.opcode)) == 0) { - // CLTS - struct cr0_32 *real_cr0 = (struct cr0_32*)&(info->ctrl_regs.cr0); - - real_cr0->ts = 0; - if (info->shdw_pg_mode == SHADOW_PAGING) { - struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0); - shadow_cr0->ts = 0; + if (handle_clts(info, &dec_instr) == -1) { + return -1; } + } else { PrintError("Unhandled opcode in handle_cr0_write\n"); return -1; @@ -157,6 +110,137 @@ int v3_handle_cr0_write(struct guest_info * info) { } + + +static int handle_mov_to_cr0(struct guest_info * info, struct x86_instr * dec_instr) { + PrintDebug("MOV2CR0\n"); + + switch (info->cpu_mode) { + case REAL: + case PROTECTED: + return handle_mov_to_cr0_32(info, dec_instr); + case PROTECTED_PAE: + return handle_mov_to_cr0_32pae(info, dec_instr); + case LONG: + return handle_mov_to_cr0_64(info, dec_instr); + case LONG_32_COMPAT: + return handle_mov_to_cr0_64compat(info, dec_instr); + default: + PrintError("Invalid CPU Operating Mode: %d\n", info->cpu_mode); + return -1; + + } +} + +static int handle_mov_to_cr0_32pae(struct guest_info * info, struct x86_instr * dec_instr) { + PrintError("32 bit PAE mov to CR0 not implemented\n"); + return -1; +} + +static int handle_mov_to_cr0_64(struct guest_info * info, struct x86_instr * dec_instr) { + PrintError("64 bit mov to CR0 not implemented\n"); + return -1; +} + +static int handle_mov_to_cr0_64compat(struct guest_info * info, struct x86_instr * dec_instr) { + PrintError("64 bit compatibility mode move to CR0 not implemented\n"); + return -1; +} + + +static int handle_mov_to_cr0_32(struct guest_info * info, struct x86_instr * dec_instr) { + + // 32 bit registers + struct cr0_32 *real_cr0 = (struct cr0_32*)&(info->ctrl_regs.cr0); + struct cr0_32 *new_cr0= (struct cr0_32 *)(dec_instr->src_operand.operand); + + PrintDebug("OperandVal = %x, length=%d\n", *(uint_t *)new_cr0, dec_instr->src_operand.size); + + + PrintDebug("Old CR0=%x\n", *(uint_t *)real_cr0); + *real_cr0 = *new_cr0; + + + if (info->shdw_pg_mode == SHADOW_PAGING) { + struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0); + + PrintDebug("Old Shadow CR0=%x\n", *(uint_t *)shadow_cr0); + + real_cr0->et = 1; + + *shadow_cr0 = *new_cr0; + shadow_cr0->et = 1; + + if (v3_get_mem_mode(info) == VIRTUAL_MEM) { + struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.shadow_cr3); + PrintDebug("Setting up Shadow Page Table\n"); + info->ctrl_regs.cr3 = *(addr_t*)shadow_cr3; + } else { + info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt); + real_cr0->pg = 1; + } + + PrintDebug("New Shadow CR0=%x\n",*(uint_t *)shadow_cr0); + } + PrintDebug("New CR0=%x\n", *(uint_t *)real_cr0); + + return 0; +} + + + + +static int handle_clts(struct guest_info * info, struct x86_instr * dec_instr) { + // CLTS + struct cr0_32 *real_cr0 = (struct cr0_32*)&(info->ctrl_regs.cr0); + + real_cr0->ts = 0; + + if (info->shdw_pg_mode == SHADOW_PAGING) { + struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0); + shadow_cr0->ts = 0; + } + return 0; +} + + +static int handle_lmsw(struct guest_info * info, struct x86_instr * dec_instr) { + struct cr0_real *real_cr0 = (struct cr0_real*)&(info->ctrl_regs.cr0); + struct cr0_real *new_cr0 = (struct cr0_real *)(dec_instr->src_operand.operand); + uchar_t new_cr0_val; + + PrintDebug("LMSW\n"); + + new_cr0_val = (*(char*)(new_cr0)) & 0x0f; + + PrintDebug("OperandVal = %x\n", new_cr0_val); + + PrintDebug("Old CR0=%x\n", *(uint_t *)real_cr0); + *(uchar_t*)real_cr0 &= 0xf0; + *(uchar_t*)real_cr0 |= new_cr0_val; + PrintDebug("New CR0=%x\n", *(uint_t *)real_cr0); + + + if (info->shdw_pg_mode == SHADOW_PAGING) { + struct cr0_real * shadow_cr0 = (struct cr0_real*)&(info->shdw_pg_state.guest_cr0); + + PrintDebug(" Old Shadow CR0=%x\n", *(uint_t *)shadow_cr0); + *(uchar_t*)shadow_cr0 &= 0xf0; + *(uchar_t*)shadow_cr0 |= new_cr0_val; + PrintDebug("New Shadow CR0=%x\n", *(uint_t *)shadow_cr0); + } + return 0; +} + + + + + + + + + + // First attempt = 253 lines // current = 51 lines int v3_handle_cr0_read(struct guest_info * info) { @@ -222,6 +306,7 @@ int v3_handle_cr0_read(struct guest_info * info) { + // First Attempt = 256 lines // current = 65 lines int v3_handle_cr3_write(struct guest_info * info) { @@ -235,84 +320,138 @@ int v3_handle_cr3_write(struct guest_info * info) { ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); } - /* The IFetch will already have faulted in the necessary bytes for the full instruction - if (ret != 15) { - // I think we should inject a GPF into the guest - PrintError("Could not read instruction (ret=%d)\n", ret); - return -1; - } - */ - if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { PrintError("Could not decode instruction\n"); return -1; } if (v3_opcode_cmp(V3_OPCODE_MOV2CR, (const uchar_t *)(dec_instr.opcode)) == 0) { - PrintDebug("MOV2CR3\n"); - PrintDebug("CR3 at 0x%p\n", &(info->ctrl_regs.cr3)); + if (info->mem_mode == PHYSICAL_MEM) { + // All we do is update the guest CR3 - if (info->shdw_pg_mode == SHADOW_PAGING) { - struct cr3_32 * new_cr3 = (struct cr3_32 *)(dec_instr.src_operand.operand); - struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3); - struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.shadow_cr3); - int cached = 0; - - - PrintDebug("Old Shadow CR3=%x; Old Guest CR3=%x\n", - *(uint_t*)shadow_cr3, *(uint_t*)guest_cr3); - + if (info->cpu_mode == LONG) { + struct cr3_64 * new_cr3 = (struct cr3_64 *)(dec_instr.src_operand.operand); + struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3); + *guest_cr3 = *new_cr3; + } else { + struct cr3_32 * new_cr3 = (struct cr3_32 *)(dec_instr.src_operand.operand); + struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3); + *guest_cr3 = *new_cr3; + } + } else { - cached = v3_cache_page_tables32(info, (addr_t)V3_PAddr((void *)(addr_t)CR3_TO_PDE32((void *)*(addr_t *)new_cr3))); - - if (cached == -1) { - PrintError("CR3 Cache failed\n"); - return -1; - } else if (cached == 0) { - addr_t shadow_pt; - - if(info->mem_mode == VIRTUAL_MEM) { - PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 ); - delete_page_tables_32((pde32_t *)CR3_TO_PDE32(*(uint_t*)shadow_cr3)); + switch (info->cpu_mode) { + case PROTECTED: + if (handle_mov_to_cr3_32(info, &dec_instr) == -1) { + return -1; + } + case PROTECTED_PAE: + if (handle_mov_to_cr3_32pae(info, &dec_instr) == -1) { + return -1; + } + case LONG: + if (handle_mov_to_cr3_64(info, &dec_instr) == -1) { + return -1; + } + case LONG_32_COMPAT: + if (handle_mov_to_cr3_64compat(info, &dec_instr) == -1) { + return -1; } - - shadow_pt = v3_create_new_shadow_pt(); - - shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PD32_BASE_ADDR(shadow_pt)); - PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pdt_base_addr ); - //PrintDebugPageTables( (pde32_t *)CR3_TO_PDE32(*(uint_t*)shadow_cr3) ); + default: + PrintError("Unhandled CPU mode: %d\n", info->cpu_mode); + return -1; + } + } + } else { + PrintError("Unhandled opcode in handle_cr3_write\n"); + return -1; + } + info->rip += dec_instr.instr_length; - } else { - PrintDebug("Reusing cached shadow Page table\n"); + return 0; +} + + + + + + + +static int handle_mov_to_cr3_32(struct guest_info * info, struct x86_instr * dec_instr) { + PrintDebug("CR3 at 0x%p\n", &(info->ctrl_regs.cr3)); + + if (info->shdw_pg_mode == SHADOW_PAGING) { + struct cr3_32 * new_cr3 = (struct cr3_32 *)(dec_instr->src_operand.operand); + struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3); + struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.shadow_cr3); + int cached = 0; + + + PrintDebug("Old Shadow CR3=%x; Old Guest CR3=%x\n", + *(uint_t*)shadow_cr3, *(uint_t*)guest_cr3); + + + + cached = v3_cache_page_tables32(info, (addr_t)V3_PAddr((void *)(addr_t)CR3_TO_PDE32((void *)*(addr_t *)new_cr3))); + + if (cached == -1) { + PrintError("CR3 Cache failed\n"); + return -1; + } else if (cached == 0) { + addr_t shadow_pt; + + if(info->mem_mode == VIRTUAL_MEM) { + PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 ); + delete_page_tables_32((pde32_t *)CR3_TO_PDE32(*(uint_t*)shadow_cr3)); } + shadow_pt = v3_create_new_shadow_pt(); - shadow_cr3->pwt = new_cr3->pwt; - shadow_cr3->pcd = new_cr3->pcd; + shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PD32_BASE_ADDR(shadow_pt)); + PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pdt_base_addr ); + //PrintDebugPageTables( (pde32_t *)CR3_TO_PDE32(*(uint_t*)shadow_cr3) ); - // What the hell... - *guest_cr3 = *new_cr3; - PrintDebug("New Shadow CR3=%x; New Guest CR3=%x\n", - *(uint_t*)shadow_cr3, *(uint_t*)guest_cr3); - - if (info->mem_mode == VIRTUAL_MEM) { - // If we aren't in paged mode then we have to preserve the identity mapped CR3 - info->ctrl_regs.cr3 = *(addr_t*)shadow_cr3; - } + } else { + PrintDebug("Reusing cached shadow Page table\n"); + } + + + shadow_cr3->pwt = new_cr3->pwt; + shadow_cr3->pcd = new_cr3->pcd; + + // What the hell... + *guest_cr3 = *new_cr3; + + PrintDebug("New Shadow CR3=%x; New Guest CR3=%x\n", + *(uint_t*)shadow_cr3, *(uint_t*)guest_cr3); + + if (info->mem_mode == VIRTUAL_MEM) { + // If we aren't in paged mode then we have to preserve the identity mapped CR3 + info->ctrl_regs.cr3 = *(addr_t*)shadow_cr3; } - } else { - PrintError("Unhandled opcode in handle_cr3_write\n"); - return -1; } + return 0; +} - info->rip += dec_instr.instr_length; - return 0; +static int handle_mov_to_cr3_32pae(struct guest_info * info, struct x86_instr * dec_instr) { + PrintError("32 Bit PAE mode Mov to CR3 not implemented\n"); + return -1; +} + +static int handle_mov_to_cr3_64(struct guest_info * info, struct x86_instr * dec_instr) { + PrintError("Long mode Mov to CR3 not implemented\n"); + return -1; +} + +static int handle_mov_to_cr3_64compat(struct guest_info * info, struct x86_instr * dec_instr) { + PrintError("Long compatiblity mode move to CR3 not implemented\n"); + return -1; } @@ -469,11 +608,16 @@ int v3_handle_efer_write(uint_t msr, struct v3_msr src, void * priv_data) { info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt); - // Does this mean we will have to fully virtualize a shadow EFER?? + // Does this mean we will have to fully virtualize a shadow EFER?? (yes it does) + ((struct efer_64 *)&(info->guest_efer.value))->lme = 1; + new_efer->lma = 1; } else if ((old_efer->lme == 1) && (new_efer->lme == 0)) { // transition out of long mode + //((struct efer_64 *)&(info->guest_efer.value))->lme = 0; + //((struct efer_64 *)&(info->guest_efer.value))->lma = 0; + return -1; } diff --git a/palacios/src/palacios/vmm_shadow_paging.c b/palacios/src/palacios/vmm_shadow_paging.c index faa48e1..c12b485 100644 --- a/palacios/src/palacios/vmm_shadow_paging.c +++ b/palacios/src/palacios/vmm_shadow_paging.c @@ -82,6 +82,9 @@ int v3_init_shadow_page_state(struct guest_info * info) { state->guest_cr3 = 0; state->shadow_cr3 = 0; + state->guest_cr0 = 0; + + state->cr3_cache = create_hashtable(0, &cr3_hash_fn, &cr3_equals); @@ -144,6 +147,16 @@ int cache_page_tables32(struct guest_info * info, addr_t pde) { } */ + +int v3_cache_page_tables(struct guest_info * info, addr_t cr3) { + switch(v3_get_cpu_mode(info)) { + case PROTECTED: + return v3_cache_page_tables32(info, (addr_t)V3_PAddr((void *)CR3_TO_PDE32(cr3))); + default: + return -1; + } +} + int v3_cache_page_tables32(struct guest_info * info, addr_t pde) { struct shadow_page_state * state = &(info->shdw_pg_state); addr_t pde_host_addr; diff --git a/palacios/src/palacios/vmm_xed.c b/palacios/src/palacios/vmm_xed.c index cc25dc5..e6fa06c 100644 --- a/palacios/src/palacios/vmm_xed.c +++ b/palacios/src/palacios/vmm_xed.c @@ -106,6 +106,7 @@ static int set_decoder_mode(struct guest_info * info, xed_state_t * state) { break; case PROTECTED: case PROTECTED_PAE: + case LONG_32_COMPAT: if (state->mmode != XED_MACHINE_MODE_LEGACY_32) { xed_state_init(state, XED_MACHINE_MODE_LEGACY_32, @@ -119,6 +120,7 @@ static int set_decoder_mode(struct guest_info * info, xed_state_t * state) { } break; default: + PrintError("Unsupported CPU mode: %d\n", info->cpu_mode); return -1; } return 0;