// we assume symmetric cores, so if core 0 has nested paging they all do
if ((v3_mach_type == V3_SVM_REV3_CPU) ||
(v3_mach_type == V3_VMX_EPT_CPU) ||
- (v3_mach_type == V3_VMX_EPT_UG_CPU)) {
+ (v3_mach_type == V3_VMX_EPT_UG_CPU)) {
+
+ V3_Print("Setting paging mode to NESTED\n");
info->shdw_pg_mode = NESTED_PAGING;
} else {
PrintError("Nested paging not supported on this hardware. Defaulting to shadow paging\n");
info->shdw_pg_mode = SHADOW_PAGING;
}
} else if ((strcasecmp(pg_mode, "shadow") == 0)) {
+ V3_Print("Setting paging mode to SHADOW\n");
info->shdw_pg_mode = SHADOW_PAGING;
} else {
PrintError("Invalid paging mode (%s) specified in configuration. Defaulting to shadow paging\n", pg_mode);
info->shdw_pg_mode = SHADOW_PAGING;
}
} else {
- PrintDebug("No paging type specified in configuration. Defaulting to shadow paging\n");
+ V3_Print("No paging type specified in configuration. Defaulting to shadow paging\n");
info->shdw_pg_mode = SHADOW_PAGING;
}
return -1;
}
- v3_init_core(info);
+ if (v3_init_core(info) == -1) {
+ PrintError("Error Initializing Core\n");
+ return -1;
+ }
if (info->vm_info->vm_class == V3_PC_VM) {
if (pre_config_pc_core(info, core_cfg) == -1) {
static int post_config_vm(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
- vm->run_state = VM_STOPPED;
+
// Configure the memory map for the guest
if (setup_memory_map(vm, cfg) == -1) {
}
+ vm->run_state = VM_STOPPED;
+
return 0;
}
static int post_config_core(struct guest_info * info, v3_cfg_tree_t * cfg) {
- info->core_run_state = CORE_STOPPED;
if (v3_init_core_extensions(info) == -1) {
PrintError("Error intializing extension core states\n");
static struct v3_vm_info * allocate_guest(int num_cores) {
int guest_state_size = sizeof(struct v3_vm_info) + (sizeof(struct guest_info) * num_cores);
struct v3_vm_info * vm = V3_Malloc(guest_state_size);
+ int i = 0;
memset(vm, 0, guest_state_size);
vm->num_cores = num_cores;
+ for (i = 0; i < num_cores; i++) {
+ vm->cores[i].core_run_state = CORE_INVALID;
+ }
+
+ vm->run_state = VM_INVALID;
+
return vm;
}
/* These fields contain the hardware feature sets supported by the local CPU */
static struct vmx_hw_info hw_info;
-extern v3_cpu_arch_t v3_cpu_types[];
+extern v3_cpu_arch_t v3_mach_type;
static addr_t host_vmcs_ptrs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
return (addr_t)V3_PAddr((void *)vmcs_page);
}
-/*
+#if 0
static int debug_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * src, void * priv_data) {
struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
- V3_Print("\n\nEFER READ\n");
+ V3_Print("\n\nEFER READ (val = %p)\n", (void *)efer->value);
v3_print_guest_state(core);
+ v3_print_vmcs();
+
src->value = efer->value;
return 0;
static int debug_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
- V3_Print("\n\nEFER WRITE\n");
+ V3_Print("\n\nEFER WRITE (old_val = %p) (new_val = %p)\n", (void *)efer->value, (void *)src.value);
v3_print_guest_state(core);
+ v3_print_vmcs();
efer->value = src.value;
- {
- struct vmx_data * vmx_state = core->vmm_data;
-
- V3_Print("Trapping page faults and GPFs\n");
- vmx_state->excp_bmap.pf = 1;
- vmx_state->excp_bmap.gp = 1;
-
- check_vmcs_write(VMCS_EXCP_BITMAP, vmx_state->excp_bmap.value);
- }
-
return 0;
}
-*/
+#endif
static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) {
vmx_state->sec_proc_ctrls.value = hw_info.sec_proc_ctrls.def_val;
/* Print Control MSRs */
- PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)hw_info.cr0.value);
- PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)hw_info.cr4.value);
+ V3_Print("CR0 MSR: req_val=%p, req_mask=%p\n", (void *)(addr_t)hw_info.cr0.req_val, (void *)(addr_t)hw_info.cr0.req_mask);
+ V3_Print("CR4 MSR: req_val=%p, req_mask=%p\n", (void *)(addr_t)hw_info.cr4.req_val, (void *)(addr_t)hw_info.cr4.req_mask);
vmx_state->entry_ctrls.ld_pat = 1;
/* Temporary GPF trap */
- // vmx_state->excp_bmap.gp = 1;
+ // vmx_state->excp_bmap.gp = 1;
// Setup Guests initial PAT field
vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
#define CR0_PE 0x00000001
#define CR0_PG 0x80000000
#define CR0_WP 0x00010000 // To ensure mem hooks work
- vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
+#define CR0_NE 0x00000020
+ vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP | CR0_NE));
// Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
core);
} else if ((core->shdw_pg_mode == NESTED_PAGING) &&
- (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_CPU)) {
+ (v3_mach_type == V3_VMX_EPT_CPU)) {
#define CR0_PE 0x00000001
#define CR0_PG 0x80000000
#define CR0_WP 0x00010000 // To ensure mem hooks work
- vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
+#define CR0_NE 0x00000020
+ vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP | CR0_NE));
// vmx_state->pinbased_ctrls |= NMI_EXIT;
v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
} else if ((core->shdw_pg_mode == NESTED_PAGING) &&
- (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
+ (v3_mach_type == V3_VMX_EPT_UG_CPU)) {
int i = 0;
// For now we will assume that unrestricted guest mode is assured w/ EPT
core->rip = 0xfff0;
core->vm_regs.rdx = 0x00000f00;
core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
- core->ctrl_regs.cr0 = 0x00000030;
+ core->ctrl_regs.cr0 = 0x60010030;
core->ctrl_regs.cr4 = 0x00002010; // Enable VMX and PSE flag
// Cause VM_EXIT whenever the CR4.VMXE bit is set
vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
-
+#define CR0_NE 0x00000020
+ vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, CR0_NE);
+ //((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->ne = 1;
if (v3_init_ept(core, &hw_info) == -1) {
PrintError("Error initializing EPT\n");
}
// Hook all accesses to EFER register
- //v3_hook_msr(core->vm_info, EFER_MSR, &debug_efer_read, &debug_efer_write, core);
+ // v3_hook_msr(core->vm_info, EFER_MSR, &debug_efer_read, &debug_efer_write, core);
v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
} else {
- PrintError("Invalid Virtual paging mode\n");
+ PrintError("Invalid Virtual paging mode (pg_mode=%d) (mach_type=%d)\n", core->shdw_pg_mode, v3_mach_type);
return -1;
}
return 0;
}
-int v3_init_vmx_vmcs(struct guest_info * core, v3_vm_class_t vm_class) {
+
+static void __init_vmx_vmcs(void * arg) {
+ struct guest_info * core = arg;
struct vmx_data * vmx_state = NULL;
int vmx_ret = 0;
if (vmx_ret != VMX_SUCCESS) {
PrintError("VMCLEAR failed\n");
- return -1;
+ return;
}
- if (vm_class == V3_PC_VM) {
+ if (core->vm_info->vm_class == V3_PC_VM) {
PrintDebug("Initializing VMCS\n");
if (init_vmcs_bios(core, vmx_state) == -1) {
PrintError("Error initializing VMCS to BIOS state\n");
- return -1;
+ return;
}
} else {
PrintError("Invalid VM Class\n");
- return -1;
+ return;
}
PrintDebug("Serializing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
+ core->core_run_state = CORE_STOPPED;
+ return;
+}
+
+
+
+int v3_init_vmx_vmcs(struct guest_info * core, v3_vm_class_t vm_class) {
+ extern v3_cpu_arch_t v3_cpu_types[];
+
+ if (v3_cpu_types[V3_Get_CPU()] == V3_INVALID_CPU) {
+ int i = 0;
+
+ for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
+ if (v3_cpu_types[i] != V3_INVALID_CPU) {
+ break;
+ }
+ }
+
+ if (i == V3_CONFIG_MAX_CPUS) {
+ PrintError("Could not find VALID CPU for VMX guest initialization\n");
+ return -1;
+ }
+
+ V3_Call_On_CPU(i, __init_vmx_vmcs, core);
+
+ } else {
+ __init_vmx_vmcs(core);
+ }
+
+ if (core->core_run_state != CORE_STOPPED) {
+ PrintError("Error initializing VMX Core\n");
+ return -1;
+ }
+
return 0;
}
// init vmcs bios
if ((core->shdw_pg_mode == NESTED_PAGING) &&
- (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
+ (v3_mach_type == V3_VMX_EPT_UG_CPU)) {
// easy
core->rip = 0;
core->segments.cs.selector = rip << 8;
void v3_init_vmx_cpu(int cpu_id) {
addr_t vmx_on_region = 0;
extern v3_cpu_arch_t v3_mach_type;
+ extern v3_cpu_arch_t v3_cpu_types[];
if (v3_mach_type == V3_INVALID_CPU) {
if (v3_init_vmx_hw(&hw_info) == -1) {
struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
uint_t paging_transition = 0;
+ extern v3_cpu_arch_t v3_mach_type;
- /*
- PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
- (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
- */
- if (new_shdw_cr0->pe != shdw_cr0->pe) {
+ V3_Print("Mov to CR0\n");
+ V3_Print("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
+ (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
+
+ if ((new_shdw_cr0->pe != shdw_cr0->pe) && (vmx_info->assist_state != VMXASSIST_DISABLED)) {
/*
PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
return -1;
}
- if (vmx_info->assist_state == VMXASSIST_ENABLED) {
+ if (vmx_info->assist_state == VMXASSIST_ON) {
PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip);
} else {
PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
if (new_shdw_cr0->pg != shdw_cr0->pg) {
paging_transition = 1;
}
-
- // The shadow always reflects the new value
- *shdw_cr0 = *new_shdw_cr0;
-
- // We don't care about most of the flags, so lets go for it
- // and set them to the guest values
- *guest_cr0 = *shdw_cr0;
+
// Except PG, PE, and NE, which are always set
- guest_cr0->pe = 1;
- guest_cr0->pg = 1;
- guest_cr0->ne = 1;
+ if ((info->shdw_pg_mode == SHADOW_PAGING) ||
+ (v3_mach_type != V3_VMX_EPT_UG_CPU)) {
+
+ // The shadow always reflects the new value
+ *shdw_cr0 = *new_shdw_cr0;
+
+
+ // We don't care about most of the flags, so lets go for it
+ // and set them to the guest values
+ *guest_cr0 = *shdw_cr0;
+
+ guest_cr0->pe = 1;
+ guest_cr0->pg = 1;
+ guest_cr0->ne = 1;
+ } else {
+ // Unrestricted guest
+ *(uint32_t *)shdw_cr0 = (0x00000020 & *(uint32_t *)new_shdw_cr0);
+
+ *guest_cr0 = *new_shdw_cr0;
+ guest_cr0->ne = 1;
+ }
+
- if ((paging_transition)) {
+ if (paging_transition) {
// Paging transition
if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {