X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmcs.c;h=154bed82cfb52b2587ff8ba09fb37196b0b69252;hb=1fa090b14d99831af81c9d62aa47243ac1f0b9a5;hp=9156e236888ed59555935143706e39118929aa8b;hpb=5bf6d0c260240e314876a2fca8e3fd56bd6a1029;p=palacios.git diff --git a/palacios/src/palacios/vmcs.c b/palacios/src/palacios/vmcs.c index 9156e23..154bed8 100644 --- a/palacios/src/palacios/vmcs.c +++ b/palacios/src/palacios/vmcs.c @@ -227,7 +227,14 @@ int v3_vmx_save_vmcs(struct guest_info * info) { check_vmcs_read(VMCS_GUEST_RFLAGS, &(info->ctrl_regs.rflags)); if (((struct vmx_data *)info->vmm_data)->ia32e_avail) { +#ifdef __V3_64BIT__ check_vmcs_read(VMCS_GUEST_EFER, &(info->ctrl_regs.efer)); +#else + uint32_t hi, lo; + check_vmcs_read(VMCS_GUEST_EFER, &hi); + check_vmcs_read(VMCS_GUEST_EFER_HIGH, &lo); + info->ctrl_regs.efer = ((uint64_t) hi << 32) | lo; +#endif } error = v3_read_vmcs_segments(&(info->segments)); @@ -270,24 +277,45 @@ int v3_update_vmcs_host_state(struct guest_info * info) { struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data); struct v3_msr tmp_msr; +#ifdef __V3_64BIT__ __asm__ __volatile__ ( "movq %%cr0, %0; " : "=q"(tmp) : ); +#else + __asm__ __volatile__ ( "movl %%cr0, %0; " + : "=q"(tmp) + : + ); +#endif vmx_ret |= check_vmcs_write(VMCS_HOST_CR0, tmp); +#ifdef __V3_64BIT__ __asm__ __volatile__ ( "movq %%cr3, %0; " : "=q"(tmp) : ); +#else + __asm__ __volatile__ ( "movl %%cr3, %0; " + : "=q"(tmp) + : + ); +#endif vmx_ret |= check_vmcs_write(VMCS_HOST_CR3, tmp); +#ifdef __V3_64BIT__ __asm__ __volatile__ ( "movq %%cr4, %0; " : "=q"(tmp) : ); +#else + __asm__ __volatile__ ( "movl %%cr4, %0; " + : "=q"(tmp) + : + ); +#endif vmx_ret |= check_vmcs_write(VMCS_HOST_CR4, tmp); @@ -309,40 +337,82 @@ int v3_update_vmcs_host_state(struct guest_info * info) { +#ifdef __V3_64BIT__ __asm__ __volatile__ ( "movq %%cs, %0; " : "=q"(tmp) : ); +#else + __asm__ __volatile__ ( "movl %%cs, %0; " + : "=q"(tmp) + : + ); +#endif vmx_ret |= check_vmcs_write(VMCS_HOST_CS_SELECTOR, tmp); +#ifdef __V3_64BIT__ __asm__ __volatile__ ( "movq %%ss, %0; " : "=q"(tmp) : ); +#else + __asm__ __volatile__ ( "movl %%ss, %0; " + : "=q"(tmp) + : + ); +#endif vmx_ret |= check_vmcs_write(VMCS_HOST_SS_SELECTOR, tmp); +#ifdef __V3_64BIT__ __asm__ __volatile__ ( "movq %%ds, %0; " : "=q"(tmp) : ); +#else + __asm__ __volatile__ ( "movl %%ds, %0; " + : "=q"(tmp) + : + ); +#endif vmx_ret |= check_vmcs_write(VMCS_HOST_DS_SELECTOR, tmp); +#ifdef __V3_64BIT__ __asm__ __volatile__ ( "movq %%es, %0; " : "=q"(tmp) : ); +#else + __asm__ __volatile__ ( "movl %%es, %0; " + : "=q"(tmp) + : + ); +#endif vmx_ret |= check_vmcs_write(VMCS_HOST_ES_SELECTOR, tmp); +#ifdef __V3_64BIT__ __asm__ __volatile__ ( "movq %%fs, %0; " : "=q"(tmp) : ); +#else + __asm__ __volatile__ ( "movl %%fs, %0; " + : "=q"(tmp) + : + ); +#endif vmx_ret |= check_vmcs_write(VMCS_HOST_FS_SELECTOR, tmp); +#ifdef __V3_64BIT__ __asm__ __volatile__ ( "movq %%gs, %0; " : "=q"(tmp) : ); +#else + __asm__ __volatile__ ( "movl %%gs, %0; " + : "=q"(tmp) + : + ); +#endif vmx_ret |= check_vmcs_write(VMCS_HOST_GS_SELECTOR, tmp); vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);