#include <palacios/vmcs.h>
#include <palacios/vmm.h>
#include <palacios/vmx_lowlevel.h>
+#include <palacios/vmm_lowlevel.h>
+#include <palacios/vmm_config.h>
//
//
//
-
+#if 0
#include <palacios/vmm_util.h>
#include <palacios/vmm_string.h>
-
extern uint_t VMCS_LAUNCH();
extern uint_t Init_VMCS_HostState();
extern uint_t Init_VMCS_GuestState();
//
//
+#endif
+
+static int update_vmcs_host_state(struct guest_info * info) {
+ addr_t tmp;
+
+ struct {
+ uint16_t limit;
+ addr_t base;
+ } __attribute__((packed)) tmp_seg;
+
+
+ struct v3_msr tmp_msr;
+
+ __asm__ __volatile__ ( "movq %%cr0, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(HOST_CR0, tmp);
+
+
+ __asm__ __volatile__ ( "movq %%cr3, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(HOST_CR3, tmp);
+
+
+ __asm__ __volatile__ ( "movq %%cr4, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(HOST_CR4, tmp);
+
+
+
+
+ __asm__ __volatile__ ("sgdt (%0); "
+ :
+ :"q"(&tmp_seg)
+ : "memory"
+ );
+ vmcs_write(HOST_GDTR_BASE, tmp_seg.base);
+
+
+ __asm__ __volatile__ ("sidt (%0); "
+ :
+ :"q"(&tmp_seg)
+ : "memory"
+ );
+ vmcs_write(HOST_IDTR_BASE, tmp_seg.base);
+
+ /* How do we handle this...?
+ __asm__ __volatile__ ("str (%0); "
+ :
+ :"q"(&tmp_seg)
+ : "memory"
+ );
+ vmcs_write(HOST_TR_BASE, tmp_seg.base);
+ */
+
+#define FS_BASE_MSR 0xc0000100
+#define GS_BASE_MSR 0xc0000101
+
+ // FS.BASE MSR
+ v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmcs_write(HOST_FS_BASE, tmp_msr.value);
+
+ // GS.BASE MSR
+ v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmcs_write(HOST_GS_BASE, tmp_msr.value);
+
+
+
+ __asm__ __volatile__ ( "movq %%cs, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%ss, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%ds, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%fs, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%gs, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "str %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmcs_write(VMCS_HOST_TR_SELECTOR, tmp);
+
+
+#define SYSENTER_CS_MSR 0x00000174
+#define SYSENTER_ESP_MSR 0x00000175
+#define SYSENTER_EIP_MSR 0x00000176
+
+ // SYSENTER CS MSR
+ v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmcs_write(HOST_IA32_SYSENTER_CS, tmp_msr.value);
+
+ // SYSENTER_ESP MSR
+ v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmcs_write(HOST_IA32_SYSENTER_ESP, tmp_msr.value);
+
+
+ // SYSENTER_EIP MSR
+ v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmcs_write(HOST_IA32_SYSENTER_EIP, tmp_msr.value);
+
+
+ // RIP
+ // RSP
+
+ return 0;
+
+}
+
+
+
+
+
+static struct vmcs_data* vmxon_ptr;
+
+#if 0
// For the 32 bit reserved bit fields
// MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo);
- PrintDebug("MSR %x = %x : %x \n", msr_num, msr.hi, msr.lo);
+ PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
val &= mask_msr.lo;
val &= mask_msr.hi;
}
+
static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
v3_msr_t msr0, msr1;
addr_t msr0_val, msr1_val;
msr0_val = msr0.value;
msr1_val = msr1.value;
- PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, msr0_val, msr_num1, msr1_val);
+ PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
val &= msr0_val;
val &= msr1_val;
return val;
}
+static int setup_base_host_state() {
+
+
+
+ // vmwrite(HOST_IDTR_BASE,
+
+
+}
+
+#endif
-static vmcs_data* allocate_vmcs() {
+static struct vmcs_data* allocate_vmcs() {
reg_ex_t msr;
- vmcs_data* vmcs_page = (vmcs_data*)V3_VAddr(V3_AllocPages(1));
+ struct vmcs_data* vmcs_page = (struct vmcs_data*)V3_VAddr(V3_AllocPages(1));
memset(vmcs_page, 0, 4096);
v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
- vmcs_page->revision = ((struct vmx_basic_msr)msr).revision;
+ vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision;
return vmcs_page;
}
-static void init_vmcs_bios(vmcs_t * vmcs, struct guest_info * vm_info) {
+static void init_vmcs_bios(struct guest_info * vm_info)
+{
+
}
struct vmx_data* data;
PrintDebug("Allocating vmx_data\n");
- data = (struct vmx_data*)V3_Malloc(sizeof(vmx_data));
+ data = (struct vmx_data*)V3_Malloc(sizeof(struct vmx_data));
PrintDebug("Allocating VMCS\n");
data->vmcs = allocate_vmcs();
info->vmm_data = (void*)data;
- PrintDebug("Initializing VMCS (addr=%p)\n", (void *)info->vmm_data);
- init_vmcs_bios((vmcs_t *)(info->vmm_data), info);
+ PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
+ init_vmcs_bios(info);
v3_post_config_guest(info, config_ptr);
-static int start_svm_guest(struct guest_info *info) {
+static int start_vmx_guest(struct guest_info *info) {
+ struct vmx_data* vmx_data = (struct vmx_data*)info->vmm_data;
+ int vmx_ret;
+
+ // Have to do a whole lot of flag setting here
+ vmx_ret = vmcs_clear(vmx_data->vmcs);
+ if(vmx_ret != VMX_SUCCESS) {
+ PrintDebug("VMCS Clear failed\n");
+ return -1;
+ }
+ vmx_ret = vmcs_load(vmx_data->vmcs);
+ if(vmx_ret != VMX_SUCCESS) {
+ PrintDebug("Executing VMPTRLD\n");
+ return -1;
+ }
+
+
+ update_vmcs_host_state(info);
+
+ // Setup guest state
return -1;
}
int v3_is_vmx_capable() {
- uint_t ret;
v3_msr_t feature_msr;
addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
- v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
+ v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
+
+ PrintDebug("ECX: %p\n", (void*)ecx);
if (ecx & CPUID_1_ECX_VTXFLAG) {
- v3_get_msr(IA32_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
+ v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
- PrintTrace("MSRREGlow: 0x%.8x\n", feature_msr.lo);
+ PrintTrace("MSRREGlow: 0x%.8x\n", feature_msr.lo);
- if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
- PrintDebug("VMX is locked -- enable in the BIOS\n");
- return 0;
- }
+ if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
+ PrintDebug("VMX is locked -- enable in the BIOS\n");
+ return 0;
+ }
} else {
- PrintDebug("VMX not supported on this cpu\n");
- return 0;
+ PrintDebug("VMX not supported on this cpu\n");
+ return 0;
}
return 1;
};
-static int setup_base_host_state() {
- uint8_t gdt[10];
-
-
-
- // vmwrite(HOST_IDTR_BASE,
-
-
-}
+void v3_init_vmx(struct v3_ctrl_ops * vmm_ops) {
+ extern v3_cpu_arch_t v3_cpu_type;
-void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
- v3_msr_t basic_msr;
-
- // Setup the host state save area
- void * host_state = V3_AllocPages(1);
-
- v3_get_msr(VMX_BASIC_MSR, &(basic_msr.hi), &(basic_msr.lo));
- *(uint32_t *)host_state = ((struct vmx_basic_msr *)basic_msr.value)->revision;
-
- PrintDebug("VMX revision: 0x%p\n", host_state);
-
__asm__ __volatile__ (
- "movl %%cr4, %%ebx; "
- "orl %%ebx, 0x00002000; "
- "movl %%ebx, %%cr4"
+ "movq %%cr4, %%rbx; "
+ "orq $0x00002000,%%rbx; "
+ "movq %%rbx, %%cr4;"
+ :
+ :
+ : "%rbx"
);
// Should check and return Error here....
__asm__ __volatile__ (
- "movl %%cr0, %%ebx; "
- "orl %%ebx, 0x00000020; "
- "movl %%ebx, %%cr0"
+ "movq %%cr0, %%rbx; "
+ "orq $0x00000020,%%rbx; "
+ "movq %%rbx, %%cr0;"
+ :
+ :
+ : "%rbx"
);
+ // Setup VMXON Region
+ vmxon_ptr = allocate_vmcs();
+ PrintDebug("VMX revision: 0x%p\n", (void*)vmxon_ptr);
- if (v3_enable_vmx(host_state) == 0) {
- PrintDebug("VMX Enabled\n");
+ if (v3_enable_vmx(vmxon_ptr) == 0) {
+ PrintDebug("VMX Enabled\n");
} else {
- PrintError("VMX initialization failure\n");
- return;
+ PrintError("VMX initialization failure\n");
+ return;
}
if (has_vmx_nested_paging() == 1) {
- v3_cpu_type = V3_VMX_EPT_CPU;
+ v3_cpu_type = V3_VMX_EPT_CPU;
} else {
- v3_cpu_type = V3_VMX_CPU;
+ v3_cpu_type = V3_VMX_CPU;
}
// Setup the VMX specific vmm operations