struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
struct v3_msr tmp_msr;
+#ifdef __V3_64BIT__
__asm__ __volatile__ ( "movq %%cr0, %0; "
: "=q"(tmp)
:
);
+#else
+ __asm__ __volatile__ ( "movl %%cr0, %0; "
+ : "=q"(tmp)
+ :
+ );
+#endif
vmx_ret |= check_vmcs_write(VMCS_HOST_CR0, tmp);
+#ifdef __V3_64BIT__
__asm__ __volatile__ ( "movq %%cr3, %0; "
: "=q"(tmp)
:
);
+#else
+ __asm__ __volatile__ ( "movl %%cr3, %0; "
+ : "=q"(tmp)
+ :
+ );
+#endif
vmx_ret |= check_vmcs_write(VMCS_HOST_CR3, tmp);
+#ifdef __V3_64BIT__
__asm__ __volatile__ ( "movq %%cr4, %0; "
: "=q"(tmp)
:
);
+#else
+ __asm__ __volatile__ ( "movl %%cr4, %0; "
+ : "=q"(tmp)
+ :
+ );
+#endif
vmx_ret |= check_vmcs_write(VMCS_HOST_CR4, tmp);
+#ifdef __V3_64BIT__
__asm__ __volatile__ ( "movq %%cs, %0; "
: "=q"(tmp)
:
);
+#else
+ __asm__ __volatile__ ( "movl %%cs, %0; "
+ : "=q"(tmp)
+ :
+ );
+#endif
vmx_ret |= check_vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
+#ifdef __V3_64BIT__
__asm__ __volatile__ ( "movq %%ss, %0; "
: "=q"(tmp)
:
);
+#else
+ __asm__ __volatile__ ( "movl %%ss, %0; "
+ : "=q"(tmp)
+ :
+ );
+#endif
vmx_ret |= check_vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
+#ifdef __V3_64BIT__
__asm__ __volatile__ ( "movq %%ds, %0; "
: "=q"(tmp)
:
);
+#else
+ __asm__ __volatile__ ( "movl %%ds, %0; "
+ : "=q"(tmp)
+ :
+ );
+#endif
vmx_ret |= check_vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
+#ifdef __V3_64BIT__
__asm__ __volatile__ ( "movq %%es, %0; "
: "=q"(tmp)
:
);
+#else
+ __asm__ __volatile__ ( "movl %%es, %0; "
+ : "=q"(tmp)
+ :
+ );
+#endif
vmx_ret |= check_vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
+#ifdef __V3_64BIT__
__asm__ __volatile__ ( "movq %%fs, %0; "
: "=q"(tmp)
:
);
+#else
+ __asm__ __volatile__ ( "movl %%fs, %0; "
+ : "=q"(tmp)
+ :
+ );
+#endif
vmx_ret |= check_vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
+#ifdef __V3_64BIT__
__asm__ __volatile__ ( "movq %%gs, %0; "
: "=q"(tmp)
:
);
+#else
+ __asm__ __volatile__ ( "movl %%gs, %0; "
+ : "=q"(tmp)
+ :
+ );
+#endif
vmx_ret |= check_vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
int_info.valid = 1;
#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)info->rip);
+ PrintDebug("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
#endif
check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
PrintDebug("Injecting Interrupt %d at exit %u(EIP=%p)\n",
info->intr_state.irq_vector,
(uint32_t)info->num_exits,
- (void *)info->rip);
+ (void *)(addr_t)info->rip);
#endif
check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
);
#elif __V3_32BIT__
__asm__ __volatile__ (
- "movq %%cr4, %%ecx;"
- "orq $0x00002000, %%ecx;"
- "movq %%ecx, %0;"
+ "movl %%cr4, %%ecx;"
+ "orl $0x00002000, %%ecx;"
+ "movl %%ecx, %0;"
: "=m"(ret)
:
: "%ecx"
if ((~ret & tmp_msr.value) == 0) {
__asm__ __volatile__ (
- "movq %0, %%cr4;"
+ "movl %0, %%cr4;"
:
: "q"(ret)
);
}
__asm__ __volatile__ (
- "movq %%cr0, %%ecx; "
- "orq $0x00000020,%%ecx; "
- "movq %%ecx, %%cr0;"
+ "movl %%cr0, %%ecx; "
+ "orl $0x00000020,%%ecx; "
+ "movl %%ecx, %%cr0;"
:
:
: "%ecx"
}
if (vmx_info->state == VMXASSIST_ENABLED) {
- PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)info->rip);
+ PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip);
} else {
PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
- (void *)info->rip);
+ (void *)(addr_t)info->rip);
}
// PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe