+static struct vmcs_data* vmxon_ptr;
//
//
+static int update_vmcs_host_state(struct guest_info * info) {
+ addr_t tmp;
+
+ struct {
+ uint16 limit;
+ addr_t base;
+ } __attribute__((packed)) tmp_seg;
+
+
+ struct v3_msr tmp_msr;
+
+ __asm__ __volatile__ ( "movq %%cr0, %1; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(HOST_CR0, tmp);
+
+
+ __asm__ __volatile__ ( "movq %%cr3, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(HOST_CR3, tmp);
+
+
+ __asm__ __volatile__ ( "movq %%cr4, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(HOST_CR4, tmp);
+
+
+
+
+ __asm__ __volatile__ ("sgdt (%0); "
+ :
+ :"q"(&tmp_seg)
+ : "memory"
+ );
+ vmcs_write(HOST_GDTR_BASE, tmp_seg.base);
+
+
+ __asm__ __volatile__ ("sidt (%0); "
+ :
+ :"q"(&tmp_seg)
+ : "memory"
+ );
+ vmcs_write(HOST_IDTR_BASE, tmp_seg.base);
+
+
+ __asm__ __volatile__ ("str (%0); "
+ :
+ :"q"(&tmp_seg)
+ : "memory"
+ );
+ vmcs_write(HOST_TR_BASE, tmp_seg.base);
+
+
+#define FS_BASE_MSR 0xc0000100
+#define GS_BASE_MSR 0xc0000101
+
+ // FS.BASE MSR
+ v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmcs_write(HOST_FS_BASE, tmp_msr.value);
+
+ // GS.BASE MSR
+ v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmcs_write(HOST_GS_BASE, tmp_msr.value);
+
+
+
+ __asm__ __volatile__ ( "movq %%cs, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%ss, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%ds, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%fs, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%gs, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%tr, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(VMCS_HOST_TR_SELECTOR, tmp);
+
+
+#define SYSENTER_CS_MSR 0x00000174
+#define SYSENTER_ESP_MSR 0x00000175
+#define SYSENTER_EIP_MSR 0x00000176
+
+ // SYSENTER CS MSR
+ v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmcs_write(HOST_IA32_SYSENTER_CS, tmp_msr.value);
+
+ // SYSENTER_ESP MSR
+ v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmcs_write(HOST_IA32_SYSENTER_ESP, tmp_msr.value);
+
+
+ // SYSENTER_EIP MSR
+ v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmcs_write(HOST_IA32_SYSENTER_EIP, tmp_msr.value);
+
+
+ // RIP
+ // RSP
+
+ return 0;
+
+}
+
+
+
// For the 32 bit reserved bit fields
// MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
-static void init_vmcs_bios(vmcs_t * vmcs, struct guest_info * vm_info) {
+static void init_vmcs_bios(struct guest_info * vm_info)
+{
+
}
struct vmx_data* data;
PrintDebug("Allocating vmx_data\n");
- data = (struct vmx_data*)V3_Malloc(sizeof(vmx_data));
+ data = (struct vmx_data*)V3_Malloc(sizeof(struct vmx_data));
PrintDebug("Allocating VMCS\n");
data->vmcs = allocate_vmcs();
info->vmm_data = (void*)data;
- PrintDebug("Initializing VMCS (addr=%p)\n", (void *)info->vmm_data);
- init_vmcs_bios((vmcs_t *)(info->vmm_data), info);
+ PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
+ init_vmcs_bios((struct vmx_data*)(info->vmm_data), info);
v3_post_config_guest(info, config_ptr);
-static int start_svm_guest(struct guest_info *info) {
+static int start_vmx_guest(struct guest_info *info) {
+ struct vmx_data* vmx_data = (struct vmx_data*)info->vmm_data;
+ int vmx_ret;
+
+ // Have to do a whole lot of flag setting here
+ vmx_ret = vmcs_clear(vmx_data->vmcs);
+ if(vmx_ret != VMX_SUCCESS) {
+ PrintDebug("VMCS Clear failed\n");
+ return -1;
+ }
+ vmx_ret = vmcs_load(vmx_data->vmcs);
+ if(vmx_ret != VMX_SUCCESS) {
+ PrintDebug("Executing VMPTRLD\n");
+ return -1;
+ }
+
+ // Setup guest state
return -1;
}
}
} else {
- PrintDebug("VMX not supported on this cpu\n");
- return 0;
+ PrintDebug("VMX not supported on this cpu\n");
+ return 0;
}
return 1;
void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
v3_msr_t basic_msr;
- // Setup the host state save area
- void * host_state = V3_AllocPages(1);
-
- v3_get_msr(VMX_BASIC_MSR, &(basic_msr.hi), &(basic_msr.lo));
-
- *(uint32_t *)host_state = ((struct vmx_basic_msr *)basic_msr.value)->revision;
- PrintDebug("VMX revision: 0x%p\n", host_state);
-
__asm__ __volatile__ (
"movl %%cr4, %%ebx; "
"orl %%ebx, 0x00002000; "
"movl %%ebx, %%cr0"
);
+ // Setup VMXON Region
+ vmxon_ptr = allocate_vmcs();
+ PrintDebug("VMX revision: 0x%p\n", (void*)vmxon_ptr);
- if (v3_enable_vmx(host_state) == 0) {
- PrintDebug("VMX Enabled\n");
+ if (v3_enable_vmx(vmxon_ptr) == 0) {
+ PrintDebug("VMX Enabled\n");
} else {
- PrintError("VMX initialization failure\n");
- return;
+ PrintError("VMX initialization failure\n");
+ return;
}
if (has_vmx_nested_paging() == 1) {
- v3_cpu_type = V3_VMX_EPT_CPU;
+ v3_cpu_type = V3_VMX_EPT_CPU;
} else {
- v3_cpu_type = V3_VMX_CPU;
+ v3_cpu_type = V3_VMX_CPU;
}
// Setup the VMX specific vmm operations