From 66919433c010eaa6f4b8c64625b737fe54fae79b Mon Sep 17 00:00:00 2001 From: Alexander Sadovskiy Date: Wed, 16 Nov 2011 21:28:48 +0400 Subject: [PATCH 26/32] Improved vpid and invept support + cosmetic changes fixes in vmx.c, vmx_lowlevel.h: + added invvpid instruction support + enabled VPID and INVEPT for UG case + moved __invvpid and __invept to more logical place - vmx_lowlevel.h and renamed them to vmx_invvpid and vmx_invept XXX: it's still not checked if current CPU supports VPID, INVEPT, etc.. fixes in vmx_ctrl_regs.c: + many cosmetic changes such as replacing tabs and other indents with 4 space chars + improved comments and logic of very unclear function handle_mov_to_cr0 --- palacios/include/palacios/vmx_lowlevel.h | 30 +++- palacios/src/palacios/vmx.c | 28 +-- palacios/src/palacios/vmx_ctrl_regs.c | 397 ++++++++++++++---------------- 3 files changed, 225 insertions(+), 230 deletions(-) diff --git a/palacios/include/palacios/vmx_lowlevel.h b/palacios/include/palacios/vmx_lowlevel.h index fd63406..9e079d7 100644 --- a/palacios/include/palacios/vmx_lowlevel.h +++ b/palacios/include/palacios/vmx_lowlevel.h @@ -46,7 +46,13 @@ #define VMWRITE_OPCODE ".byte 0x0f,0x79;" #define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4;" #define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7;" /* reg=/6 */ - +#define INVEPT_OPCODE ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" +#define INVVPID_OPCODE ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" +#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0 +#define VMX_EPT_EXTENT_CONTEXT 1 +#define VMX_EPT_EXTENT_GLOBAL 2 +#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 +#define VMX_VPID_EXTENT_ALL_CONTEXT 2 /* Mod/rm definitions for intel registers/memory */ #define EAX_ECX_MODRM ".byte 0xc1;" @@ -55,9 +61,31 @@ // %eax with /7 reg #define EAX_07_MODRM ".byte 0x38;" +static inline void vmx_invept(int ext, uint64_t eptp, uint64_t gpa) +{ + struct { + uint64_t eptp, gpa; + } operand = {eptp, gpa}; + asm volatile (INVEPT_OPCODE + /* CF==1 or ZF==1 --> rc = -1 */ + "; ja 1f ; ud2 ; 1:\n" + : : "a" (&operand), "c" (ext) : "cc", "memory"); +} - +static inline void vmx_invvpid(int ext, uint16_t vpid, uint64_t gva) +{ + struct { + uint64_t vpid : 16; + uint64_t rsvd : 48; + uint64_t gva; + } operand = { vpid, 0, gva }; + + asm volatile (INVVPID_OPCODE + /* CF==1 or ZF==1 --> rc = -1 */ + "; ja 1f ; ud2 ; 1:" + : : "a"(&operand), "c"(ext) : "cc", "memory"); +} static inline int vmcs_clear(addr_t vmcs_ptr) { uint64_t vmcs_ptr_64 __attribute__ ((aligned(8))) = (uint64_t)vmcs_ptr; diff --git a/palacios/src/palacios/vmx.c b/palacios/src/palacios/vmx.c index 256c75b..836a460 100644 --- a/palacios/src/palacios/vmx.c +++ b/palacios/src/palacios/vmx.c @@ -94,23 +94,6 @@ static addr_t allocate_vmcs() { return (addr_t)V3_PAddr((void *)vmcs_page); } -#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0 -#define VMX_EPT_EXTENT_CONTEXT 1 -#define VMX_EPT_EXTENT_GLOBAL 2 -#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" -static inline void __invept(int ext, uint64_t eptp, uint64_t gpa) -{ - struct { - uint64_t eptp, gpa; - } operand = {eptp, gpa}; - - asm volatile (".byte 0x66, 0x0f, 0x38, 0x80, 0x08" - /* CF==1 or ZF==1 --> rc = -1 */ - "; ja 1f ; ud2 ; 1:\n" - : : "a" (&operand), "c" (ext) : "cc", "memory"); -} - - static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) { int vmx_ret = 0; @@ -305,10 +288,8 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) vmx_state->sec_proc_ctrls.enable_vpid = 1; vmcs_write(VMCS_VPID, core->vcpu_id + 1); - - __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); - - + vmx_invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, core->vcpu_id + 1, 0); // vcpu, really? + vmx_invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); if (v3_init_ept(core, &hw_info) == -1) { PrintError("Error initializing EPT\n"); @@ -398,6 +379,11 @@ static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging vmx_state->sec_proc_ctrls.unrstrct_guest = 1; // enable unrestricted guest operation + vmx_state->sec_proc_ctrls.enable_vpid = 1; + vmcs_write(VMCS_VPID, core->vcpu_id + 1); + vmx_invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, core->vcpu_id + 1, 0); // vcpu, really? + vmx_invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); + /* Disable shadow paging stuff */ vmx_state->pri_proc_ctrls.cr3_ld_exit = 0; diff --git a/palacios/src/palacios/vmx_ctrl_regs.c b/palacios/src/palacios/vmx_ctrl_regs.c index ed4de70..419c258 100644 --- a/palacios/src/palacios/vmx_ctrl_regs.c +++ b/palacios/src/palacios/vmx_ctrl_regs.c @@ -1,4 +1,3 @@ - /* * This file is part of the Palacios Virtual Machine Monitor developed * by the V3VEE Project with funding from the United States National @@ -43,7 +42,7 @@ int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * if (cr_qual->access_type < 2) { v3_reg_t * reg = get_reg_ptr(info, cr_qual); - + if (cr_qual->access_type == 0) { if (handle_mov_to_cr0(info, reg, exit_info) != 0) { @@ -52,8 +51,8 @@ int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * } } else { // Mov from cr - PrintError("Mov From CR0 not handled\n"); - return -1; + PrintError("Mov From CR0 not handled\n"); + return -1; } return 0; @@ -79,46 +78,42 @@ int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * return -1; } - static int ept_handle_cr4_write(struct guest_info * info, v3_reg_t * reg); int v3_vmx_handle_cr4_access(struct guest_info * info, struct vmx_exit_info * exit_info) { struct vmx_exit_cr_qual * cr_qual = (struct vmx_exit_cr_qual *)&(exit_info->exit_qual); if (cr_qual->access_type < 2) { + if(info->shdw_pg_mode == SHADOW_PAGING) { + if (cr_qual->access_type == 0) { + if (v3_handle_cr4_write(info) != 0) { + PrintError("Could not handle CR4 write\n"); + return -1; + } + // need to update shadow CR4 since it is not updated inside v3_handle_cr4_write + struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data; + vmx_info->guest_cr4 = info->ctrl_regs.cr4 &= ~CR4_VMXE; - if(info->shdw_pg_mode == SHADOW_PAGING) { - if (cr_qual->access_type == 0) { - if (v3_handle_cr4_write(info) != 0) { - PrintError("Could not handle CR4 write\n"); - return -1; + info->ctrl_regs.cr4 |= CR4_VMXE; // no VMX allowed in guest, so mask CR4.VMXE + } else { + if (v3_handle_cr4_read(info) != 0) { + PrintError("Could not handle CR4 read\n"); + return -1; + } } - - // need to update shadow CR4 since it is not updated inside v3_handle_cr4_write - struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data; - vmx_info->guest_cr4 = info->ctrl_regs.cr4 &= ~CR4_VMXE; - - info->ctrl_regs.cr4 |= CR4_VMXE; // no VMX allowed in guest, so mask CR4.VMXE } else { - if (v3_handle_cr4_read(info) != 0) { - PrintError("Could not handle CR4 read\n"); - return -1; - } - } - } else { - v3_reg_t * reg = get_reg_ptr(info, cr_qual); - if(cr_qual->access_type == 0) { - if (ept_handle_cr4_write(info, reg) != 0) { - PrintError("Could not handle CR4 write\n"); + v3_reg_t * reg = get_reg_ptr(info, cr_qual); + if(cr_qual->access_type == 0) { + if (ept_handle_cr4_write(info, reg) != 0) { + PrintError("Could not handle CR4 write\n"); + return -1; + } + info->rip += exit_info->instr_len; + } else { + PrintError("Impossible exit due to read of CR4 in VMX!\n"); return -1; } - info->rip += exit_info->instr_len; - } else { - PrintError("Impossible exit due to read of CR4 in VMX!\n"); - return -1; } - } - - return 0; + return 0; } PrintError("Invalid CR4 Access type?? (type=%d)\n", cr_qual->access_type); @@ -138,7 +133,7 @@ static int ept_handle_cr4_write(struct guest_info * info, v3_reg_t * reg) { // update shadow anyway *guest_cr4 = *new_cr4; if((info->mem_mode == VIRTUAL_MEM) || (v3_cpu_types[info->pcpu_id] == V3_VMX_EPT_UG_CPU)) { - // we have paging enabled, so only care about VMXE + // we have paging enabled (or don't care about paging due to UG), so only care about VMXE *hw_cr4 = *new_cr4; *(uint32_t*)hw_cr4 |= CR4_VMXE; PrintDebug("CR4 update: new value %x\n", *(uint32_t*)hw_cr4); @@ -153,11 +148,11 @@ static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) { if (info->shdw_pg_mode == SHADOW_PAGING) { - /* - PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n", - (void *)info->ctrl_regs.cr3, - (void *)info->shdw_pg_state.guest_cr3); - */ + /* + PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n", + (void *)info->ctrl_regs.cr3, + (void *)info->shdw_pg_state.guest_cr3); + */ if (info->cpu_mode == LONG) { info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg; @@ -165,18 +160,17 @@ static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) { info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg; } - if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) { if (v3_activate_shadow_pt(info) == -1) { PrintError("Failed to activate 32 bit shadow page table\n"); return -1; } } - /* + /* PrintDebug("New guest CR3=%p, New shadow CR3=%p\n", - (void *)info->ctrl_regs.cr3, - (void *)info->shdw_pg_state.guest_cr3); - */ + (void *)info->ctrl_regs.cr3, + (void *)info->shdw_pg_state.guest_cr3); + */ } else if (info->shdw_pg_mode == NESTED_PAGING) { if(info->mem_mode == VIRTUAL_MEM) { PrintError("Mov to CR3 with paging enabled in EPT mode is impossible!\n"); @@ -186,30 +180,22 @@ static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) { PrintDebug("Guest CR3 value %x cached until paging is enabled.\n", (uint32_t)*cr3_reg); } - - return 0; } static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) { - - if (info->shdw_pg_mode == SHADOW_PAGING) { - if ((v3_get_vm_cpu_mode(info) == LONG) || - (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) { - + (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) { *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3; } else { *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3; } - } else { *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3; PrintDebug("Guest reads cached value in EPT mode.\n"); } - return 0; } @@ -220,125 +206,122 @@ static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struc struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data; uint_t paging_transition = 0; - /* - PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n", - (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0); - */ - - // we don't need vmxassist in case of UG mode + PrintDebug("VCPU[%d]: Old shadow CR0: 0x%x, New shadow CR0: 0x%x, Guest CR0: 0x%x\n", + info->vcpu, *(uint32_t *)shdw_cr0, (uint32_t)*new_cr0, *(uint32_t *)guest_cr0); + + /* First of all we check if CR0.PE (protected mode enabled) was changed. + * If it's not UG mode, then we have to call VMXAssist, + * else we don't have to react this change. + * + * XXX: But most OSes modify both CR0.PE and CR0.PG via one MOV + * (e.g.: EAX = CR0; EAX |= 0x80000001; CR0 = EAX; ), so paging transition (w/o UG) + * is also handled (in most cases) here and not in the "else" case below. + */ if ((new_shdw_cr0->pe != shdw_cr0->pe) && (v3_cpu_types[info->pcpu_id] != V3_VMX_EPT_UG_CPU)) { - /* - PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0); - PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0); - PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0); - */ - if (v3_vmxassist_ctx_switch(info) != 0) { PrintError("Unable to execute VMXASSIST context switch!\n"); return -1; } - if (vmx_info->assist_state == VMXASSIST_ENABLED) { PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip); } else { PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n", - (void *)(addr_t)info->rip); + (void *)(addr_t)info->rip); } + // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe + exit_info->instr_len = 0; - // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe - exit_info->instr_len = 0; + // Do we still need these two commented lines below + // or it's time to remove them? + // v3_vmx_restore_vmcs(info); + // v3_print_vmcs(info); + } else { + if (new_shdw_cr0->pg != shdw_cr0->pg) { + paging_transition = 1; + } - // v3_vmx_restore_vmcs(info); - // v3_print_vmcs(info); + // The shadow always reflects the new value + *shdw_cr0 = *new_shdw_cr0; - } else { + // We don't care about most of the flags, so lets go for it + // and set them to the guest values, except.. + *guest_cr0 = *shdw_cr0; + // may be we should also force CR0.WP here? + if(v3_cpu_types[info->pcpu_id] == V3_VMX_EPT_UG_CPU) { + // ..except NE, that must be always set in vmx operation + guest_cr0->ne = 1; + } else { + // ..except PG, PE and NE, they must be always set in vmx operation w/o UG + guest_cr0->pe = 1; + guest_cr0->pg = 1; + guest_cr0->ne = 1; + } - if (new_shdw_cr0->pg != shdw_cr0->pg) { - paging_transition = 1; - } - - // The shadow always reflects the new value - *shdw_cr0 = *new_shdw_cr0; - - // We don't care about most of the flags, so lets go for it - // and set them to the guest values - *guest_cr0 = *shdw_cr0; - - if(v3_cpu_types[info->pcpu_id] == V3_VMX_EPT_UG_CPU) { - // Except NE, must be always set in vmx operation - guest_cr0->ne = 1; - } else { - // Except PG, PE, and NE, which are always set - guest_cr0->pe = 1; - guest_cr0->pg = 1; - guest_cr0->ne = 1; - } - - if ((paging_transition)) { - // Paging transition - // PrintDebug("Paging transition\n"); - - if ((v3_get_vm_mem_mode(info) == VIRTUAL_MEM) || (v3_cpu_types[info->pcpu_id] == V3_VMX_EPT_UG_CPU)) { - struct efer_64 * vm_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer); - struct efer_64 * hw_efer = (struct efer_64 *)&(info->ctrl_regs.efer); - - if (vm_efer->lme) { - // PrintDebug("Enabling long mode\n"); - vm_efer->lma = 1; - hw_efer->lma = 1; - hw_efer->lme = 1; - - vmx_info->entry_ctrls.guest_ia32e = 1; - } - - // PrintDebug("Activating Shadow Page tables\n"); - - if (info->shdw_pg_mode == SHADOW_PAGING) { - if (v3_activate_shadow_pt(info) == -1) { - PrintError("Failed to activate shadow page tables\n"); - return -1; - } - } else { - if(v3_cpu_types[info->pcpu_id] != V3_VMX_EPT_UG_CPU) { - // EPT mode, need to update cached CR3, CR4 - info->ctrl_regs.cr3 = info->shdw_pg_state.guest_cr3; - // Disable CR3 exits as guest enabled paging - vmx_info->pri_proc_ctrls.cr3_ld_exit = 0; - vmx_info->pri_proc_ctrls.cr3_str_exit = 0; - if(v3_update_vmcs_ctrl_fields(info)) return -1; - // write guest CR4 - struct cr4_32 *guest_cr4 = (struct cr4_32 *)&vmx_info->guest_cr4; - struct cr4_32 *hw_cr4 = (struct cr4_32 *)&info->ctrl_regs.cr4; - *hw_cr4 = *guest_cr4; - *(uint32_t*)hw_cr4 |= CR4_VMXE; - PrintDebug("Guest enables paging. Restored cached CR3 (%x), CR4 (%x)." - " Guest EFER %x, guest HW EFER %x\n", (uint32_t)info->ctrl_regs.cr3, *(uint32_t*)guest_cr4, - *(uint32_t*)vm_efer, *(uint32_t*)hw_efer); - } else { - PrintDebug("Guest (unrestricted) enables paging. Leaving CR3, CR4 and EFER as set by guest\n"); - } - - } - - } else { - - if (info->shdw_pg_mode == SHADOW_PAGING) { - if (v3_activate_passthrough_pt(info) == -1) { - PrintError("Failed to activate passthrough page tables\n"); - return -1; - } - } else { - // we need to restore VMXASSIST. XXX -#define GUEST_CR0 0x80010031 -#define GUEST_CR4 0x00002010 - *(uint32_t*)guest_cr0 = GUEST_CR0; - info->ctrl_regs.cr4 = GUEST_CR4; - // This is hideous... Let's hope that the 1to1 page table has not been nuked... - info->ctrl_regs.cr3 = VMXASSIST_1to1_PT; - PrintError("Guest disables paging. Is everything OK?\n"); - } - } - } + if (paging_transition) { + // PrintDebug("Paging transition\n"); + if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) { + struct efer_64 * vm_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer); + struct efer_64 * hw_efer = (struct efer_64 *)&(info->ctrl_regs.efer); + + if (vm_efer->lme) { + PrintDebug("Guest enables long mode\n"); + + vm_efer->lma = 1; + hw_efer->lma = 1; + hw_efer->lme = 1; + /* this flag has strange name in Intel manuals but the meaning is very clear + * if you look into Bochs or KVM source code: you should set it before + * entering and clear after exiting long mode. + * XXX: we don't handle exit from lmode at all, sorry + */ + vmx_info->entry_ctrls.guest_ia32e = 1; + } + + // PrintDebug("Activating Shadow Page tables\n"); + + if (info->shdw_pg_mode == SHADOW_PAGING) { + if (v3_activate_shadow_pt(info) == -1) { + PrintError("Failed to activate shadow page tables\n"); + return -1; + } + } else if(v3_cpu_types[info->pcpu_id] != V3_VMX_EPT_UG_CPU) { + // EPT mode, need to update cached CR3, CR4 + info->ctrl_regs.cr3 = info->shdw_pg_state.guest_cr3; + // Disable CR3 exits as guest enabled paging + vmx_info->pri_proc_ctrls.cr3_ld_exit = 0; + vmx_info->pri_proc_ctrls.cr3_str_exit = 0; + if(v3_update_vmcs_ctrl_fields(info)) return -1; + // write guest CR4 + struct cr4_32 *guest_cr4 = (struct cr4_32 *)&vmx_info->guest_cr4; + struct cr4_32 *hw_cr4 = (struct cr4_32 *)&info->ctrl_regs.cr4; + *hw_cr4 = *guest_cr4; + *(uint32_t*)hw_cr4 |= CR4_VMXE; + PrintDebug("Guest enables paging. Restored cached CR3 (%x), CR4 (%x)." + " Guest EFER %x, guest HW EFER %x\n", + (uint32_t)info->ctrl_regs.cr3, *(uint32_t*)guest_cr4, + *(uint32_t*)vm_efer, *(uint32_t*)hw_efer); + } else { + PrintDebug("Guest (unrestricted) enables paging. Leaving CR3, CR4 and EFER as set by guest\n"); + } + } else { + if (info->shdw_pg_mode == SHADOW_PAGING) { + if (v3_activate_passthrough_pt(info) == -1) { + PrintError("Failed to activate passthrough page tables\n"); + return -1; + } + } else { + if(v3_cpu_types[info->pcpu_id] != V3_VMX_EPT_UG_CPU) { + // we need to restore VMXASSIST. XXX + *(uint32_t*)guest_cr0 = 0x80010031; // GUEST_CR0 + info->ctrl_regs.cr4 = 0x00002010; // GUEST_CR4 + // This is hideous... Let's hope that the 1to1 page table has not been nuked... + info->ctrl_regs.cr3 = VMXASSIST_1to1_PT; + } + PrintError("Guest disables paging. Is everything OK?\n"); + return 0; + } + } + } } return 0; @@ -348,57 +331,55 @@ static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual v3_reg_t * reg = NULL; switch (cr_qual->gpr) { - case 0: - reg = &(info->vm_regs.rax); - break; - case 1: - reg = &(info->vm_regs.rcx); - break; - case 2: - reg = &(info->vm_regs.rdx); - break; - case 3: - reg = &(info->vm_regs.rbx); - break; - case 4: - reg = &(info->vm_regs.rsp); - break; - case 5: - reg = &(info->vm_regs.rbp); - break; - case 6: - reg = &(info->vm_regs.rsi); - break; - case 7: - reg = &(info->vm_regs.rdi); - break; - case 8: - reg = &(info->vm_regs.r8); - break; - case 9: - reg = &(info->vm_regs.r9); - break; - case 10: - reg = &(info->vm_regs.r10); - break; - case 11: - reg = &(info->vm_regs.r11); - break; - case 12: - reg = &(info->vm_regs.r11); - break; - case 13: - reg = &(info->vm_regs.r13); - break; - case 14: - reg = &(info->vm_regs.r14); - break; - case 15: - reg = &(info->vm_regs.r15); - break; + case 0: + reg = &(info->vm_regs.rax); + break; + case 1: + reg = &(info->vm_regs.rcx); + break; + case 2: + reg = &(info->vm_regs.rdx); + break; + case 3: + reg = &(info->vm_regs.rbx); + break; + case 4: + reg = &(info->vm_regs.rsp); + break; + case 5: + reg = &(info->vm_regs.rbp); + break; + case 6: + reg = &(info->vm_regs.rsi); + break; + case 7: + reg = &(info->vm_regs.rdi); + break; + case 8: + reg = &(info->vm_regs.r8); + break; + case 9: + reg = &(info->vm_regs.r9); + break; + case 10: + reg = &(info->vm_regs.r10); + break; + case 11: + reg = &(info->vm_regs.r11); + break; + case 12: + reg = &(info->vm_regs.r11); + break; + case 13: + reg = &(info->vm_regs.r13); + break; + case 14: + reg = &(info->vm_regs.r14); + break; + case 15: + reg = &(info->vm_regs.r15); + break; } return reg; } - - -- 1.7.5.4