#include <palacios/vm_guest_mem.h>
#include <palacios/vmm_ctrl_regs.h>
#include <palacios/vmm_lowlevel.h>
+#include <palacios/vmm_debug.h>
vmx_ret |= check_vmcs_write(VMCS_EXIT_CTRLS, arch_data->exit_ctrls.value);
vmx_ret |= check_vmcs_write(VMCS_ENTRY_CTRLS, arch_data->entry_ctrls.value);
+ vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, arch_data->excp_bmap.value);
+
+ if (info->shdw_pg_mode == NESTED_PAGING) {
+ vmx_ret |= check_vmcs_write(VMCS_EPT_PTR, info->direct_map_pt);
+ }
return vmx_ret;
}
check_vmcs_read(VMCS_GUEST_DR7, &(info->dbg_regs.dr7));
check_vmcs_read(VMCS_GUEST_RFLAGS, &(info->ctrl_regs.rflags));
- if (((struct vmx_data *)info->vmm_data)->ia32e_avail) {
+
#ifdef __V3_64BIT__
- check_vmcs_read(VMCS_GUEST_EFER, &(info->ctrl_regs.efer));
-#else
- uint32_t hi, lo;
- check_vmcs_read(VMCS_GUEST_EFER, &hi);
- check_vmcs_read(VMCS_GUEST_EFER_HIGH, &lo);
- info->ctrl_regs.efer = ((uint64_t) hi << 32) | lo;
+ check_vmcs_read(VMCS_GUEST_EFER, &(info->ctrl_regs.efer));
+ check_vmcs_read(VMCS_ENTRY_CTRLS, &(vmx_info->entry_ctrls.value));
#endif
- }
-
+
error = v3_read_vmcs_segments(&(info->segments));
+ /* Save MSRs from MSR SAVE Area (whereever that is...)*/
+
+ info->msrs.star = vmx_info->msr_area->guest_star.hi;
+ info->msrs.star <<= 32;
+ info->msrs.star |= vmx_info->msr_area->guest_star.lo;
+
+ info->msrs.lstar = vmx_info->msr_area->guest_lstar.hi;
+ info->msrs.lstar <<= 32;
+ info->msrs.lstar |= vmx_info->msr_area->guest_lstar.lo;
+
+ info->msrs.sfmask = vmx_info->msr_area->guest_fmask.hi;
+ info->msrs.sfmask <<= 32;
+ info->msrs.sfmask |= vmx_info->msr_area->guest_fmask.lo;
+
+ info->msrs.kern_gs_base = vmx_info->msr_area->guest_kern_gs.hi;
+ info->msrs.kern_gs_base <<= 32;
+ info->msrs.kern_gs_base |= vmx_info->msr_area->guest_kern_gs.lo;
+
+
return error;
}
check_vmcs_write(VMCS_GUEST_RFLAGS, info->ctrl_regs.rflags);
- if (((struct vmx_data *)info->vmm_data)->ia32e_avail) {
- check_vmcs_write(VMCS_GUEST_EFER, info->ctrl_regs.efer);
- }
+#ifdef __V3_64BIT__
+ check_vmcs_write(VMCS_GUEST_EFER, info->ctrl_regs.efer);
+ check_vmcs_write(VMCS_ENTRY_CTRLS, vmx_info->entry_ctrls.value);
+#endif
error = v3_write_vmcs_segments(&(info->segments));
+ /* Restore MSRs from MSR SAVE Area (whereever that is...)*/
+
+ vmx_info->msr_area->guest_star.hi = (info->msrs.star >> 32);
+ vmx_info->msr_area->guest_star.lo = (info->msrs.star & 0xffffffff);
+
+ vmx_info->msr_area->guest_lstar.hi = (info->msrs.lstar >> 32);
+ vmx_info->msr_area->guest_lstar.lo = (info->msrs.lstar & 0xffffffff);
+
+ vmx_info->msr_area->guest_fmask.hi = (info->msrs.sfmask >> 32);
+ vmx_info->msr_area->guest_fmask.lo = (info->msrs.sfmask & 0xffffffff);
+
+ vmx_info->msr_area->guest_kern_gs.hi = (info->msrs.kern_gs_base >> 32);
+ vmx_info->msr_area->guest_kern_gs.lo = (info->msrs.kern_gs_base & 0xffffffff);
+
return error;
}
int v3_update_vmcs_host_state(struct guest_info * info) {
int vmx_ret = 0;
addr_t tmp;
- struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
struct v3_msr tmp_msr;
+ addr_t gdtr_base;
+ struct {
+ uint16_t selector;
+ addr_t base;
+ } __attribute__((packed)) tmp_seg;
#ifdef __V3_64BIT__
__asm__ __volatile__ ( "movq %%cr0, %0; "
vmx_ret |= check_vmcs_write(VMCS_HOST_CR4, tmp);
+ __asm__ __volatile__(
+ "sgdt (%0);"
+ :
+ : "q"(&tmp_seg)
+ : "memory"
+ );
+ gdtr_base = tmp_seg.base;
+ vmx_ret |= check_vmcs_write(VMCS_HOST_GDTR_BASE, tmp_seg.base);
+
+ __asm__ __volatile__(
+ "sidt (%0);"
+ :
+ : "q"(&tmp_seg)
+ : "memory"
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, tmp_seg.base);
+
+ __asm__ __volatile__(
+ "str (%0);"
+ :
+ : "q"(&tmp_seg)
+ : "memory"
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, tmp_seg.selector);
+
+ /* The GDTR *index* is bits 3-15 of the selector. */
+ {
+ struct tss_descriptor * desc = NULL;
+ desc = (struct tss_descriptor *)(gdtr_base + (8 * (tmp_seg.selector >> 3)));
+
+ tmp_seg.base = ((desc->base1) |
+ (desc->base2 << 16) |
+ (desc->base3 << 24) |
+#ifdef __V3_64BIT__
+ ((uint64_t)desc->base4 << 32)
+#else
+ (0)
+#endif
+ );
- vmx_ret |= check_vmcs_write(VMCS_HOST_GDTR_BASE, arch_data->host_state.gdtr.base);
- vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
- vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
-
-#define FS_BASE_MSR 0xc0000100
-#define GS_BASE_MSR 0xc0000101
-
- // FS.BASE MSR
- v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);
-
- // GS.BASE MSR
- v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);
-
+ vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, tmp_seg.base);
+ }
#ifdef __V3_64BIT__
#endif
vmx_ret |= check_vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
- vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
-
#define SYSENTER_CS_MSR 0x00000174
#define SYSENTER_ESP_MSR 0x00000175
#define SYSENTER_EIP_MSR 0x00000176
+#define FS_BASE_MSR 0xc0000100
+#define GS_BASE_MSR 0xc0000101
+#define EFER_MSR 0xc0000080
+
// SYSENTER CS MSR
v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
+
+ // FS.BASE MSR
+ v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);
+
+ // GS.BASE MSR
+ v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);
+
+
+ // EFER
+ v3_get_msr(EFER_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_EFER, tmp_msr.value);
+
+ // PERF GLOBAL CONTROL
+
+ // PAT
+
+ v3_get_msr(IA32_PAT_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_PAT, tmp_msr.value);
+
+
+ // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
+ {
+ struct vmx_data * vmx_state = (struct vmx_data *)info->vmm_data;
+ struct vmcs_msr_save_area * msr_entries = vmx_state->msr_area;
+
+
+ v3_get_msr(IA32_STAR_MSR, &(msr_entries->host_star.hi), &(msr_entries->host_star.lo));
+ v3_get_msr(IA32_LSTAR_MSR, &(msr_entries->host_lstar.hi), &(msr_entries->host_lstar.lo));
+ v3_get_msr(IA32_FMASK_MSR, &(msr_entries->host_fmask.hi), &(msr_entries->host_fmask.lo));
+ v3_get_msr(IA32_KERN_GS_BASE_MSR, &(msr_entries->host_kern_gs.hi), &(msr_entries->host_kern_gs.lo));
+ }
+
+
+
+
+
return vmx_ret;
}
print_vmcs_field(VMCS_GUEST_CR4);
print_vmcs_field(VMCS_GUEST_DR7);
+ // if save IA32_EFER
+ print_vmcs_field(VMCS_GUEST_EFER);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_EFER_HIGH);
+#endif
+
PrintDebug("\n");
print_vmcs_field(VMCS_GUEST_SYSENTER_ESP);
print_vmcs_field(VMCS_GUEST_SYSENTER_EIP);
+
+ // if save IA32_PAT
+ print_vmcs_field(VMCS_GUEST_PAT);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_PAT_HIGH);
+#endif
+
+ //if load IA32_PERF_GLOBAL_CTRL
print_vmcs_field(VMCS_GUEST_PERF_GLOBAL_CTRL);
#ifdef __V3_32BIT__
print_vmcs_field(VMCS_GUEST_PERF_GLOBAL_CTRL_HIGH);
print_vmcs_field(VMCS_GUEST_SMBASE);
+
+
PrintDebug("GUEST_NON_REGISTER_STATE\n");
print_vmcs_field(VMCS_GUEST_ACTIVITY_STATE);
print_vmcs_field(VMCS_GUEST_INT_STATE);
print_vmcs_field(VMCS_GUEST_PENDING_DBG_EXCP);
+ // if VMX preempt timer
+ print_vmcs_field(VMCS_PREEMPT_TIMER);
+
}
static void print_host_state()
print_vmcs_field(VMCS_HOST_CR3);
print_vmcs_field(VMCS_HOST_CR4);
+
+
+ // if load IA32_EFER
+ print_vmcs_field(VMCS_HOST_EFER);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_HOST_EFER_HIGH);
+#endif
+
+
PrintDebug("\n");
print_vmcs_field(VMCS_HOST_CS_SELECTOR);
print_vmcs_field(VMCS_HOST_SS_SELECTOR);
print_vmcs_field(VMCS_HOST_SYSENTER_ESP);
print_vmcs_field(VMCS_HOST_SYSENTER_EIP);
+
+ // if load IA32_PAT
+ print_vmcs_field(VMCS_HOST_PAT);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_HOST_PAT_HIGH);
+#endif
+
+ // if load IA32_PERF_GLOBAL_CTRL
print_vmcs_field(VMCS_HOST_PERF_GLOBAL_CTRL);
#ifdef __V3_32BIT__
print_vmcs_field(VMCS_HOST_PERF_GLOBAL_CTRL_HIGH);
PrintDebug("VMCS_EXEC_CTRL_FIELDS\n");
print_vmcs_field(VMCS_PIN_CTRLS);
print_vmcs_field(VMCS_PROC_CTRLS);
+
+ // if activate secondary controls
print_vmcs_field(VMCS_SEC_PROC_CTRLS);
print_vmcs_field(VMCS_EXCP_BITMAP);
print_vmcs_field(VMCS_CR3_TGT_VAL_2);
print_vmcs_field(VMCS_CR3_TGT_VAL_3);
+ // Check max number of CR3 targets... may continue...
+
+
PrintDebug("\n");
+ // if virtualize apic accesses
print_vmcs_field(VMCS_APIC_ACCESS_ADDR);
#ifdef __V3_32BIT__
print_vmcs_field(VMCS_APIC_ACCESS_ADDR_HIGH);
#endif
+ // if use tpr shadow
print_vmcs_field(VMCS_VAPIC_ADDR);
#ifdef __V3_32BIT__
print_vmcs_field(VMCS_VAPIC_ADDR_HIGH);
#endif
+ // if use tpr shadow
print_vmcs_field(VMCS_TPR_THRESHOLD);
+
+ // if use MSR bitmaps
print_vmcs_field(VMCS_MSR_BITMAP);
#ifdef __V3_32BIT__
print_vmcs_field(VMCS_MSR_BITMAP_HIGH);
#ifdef __V3_32BIT__
print_vmcs_field(VMCS_EXEC_PTR_HIGH);
#endif
+
+
+}
+
+static void print_ept_state() {
+ V3_Print("VMCS EPT INFO\n");
+
+ // if enable vpid
+ print_vmcs_field(VMCS_VPID);
+
+ print_vmcs_field(VMCS_EPT_PTR);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_EPT_PTR_HIGH);
+#endif
+
+ print_vmcs_field(VMCS_GUEST_PHYS_ADDR);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_PHYS_ADDR_HIGH);
+#endif
+
+
+
+ print_vmcs_field(VMCS_GUEST_PDPTE0);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_PDPTE0_HIGH);
+#endif
+
+ print_vmcs_field(VMCS_GUEST_PDPTE1);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_PDPTE1_HIGH);
+#endif
+
+ print_vmcs_field(VMCS_GUEST_PDPTE2);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_PDPTE2_HIGH);
+#endif
+
+ print_vmcs_field(VMCS_GUEST_PDPTE3);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_PDPTE3_HIGH);
+#endif
+
+
+
}
print_vmcs_field(VMCS_EXIT_MSR_LOAD_ADDR_HIGH);
#endif
+
+ // if pause loop exiting
+ print_vmcs_field(VMCS_PLE_GAP);
+ print_vmcs_field(VMCS_PLE_WINDOW);
+
}
print_guest_state();
print_host_state();
+ print_ept_state();
+
print_exec_ctrls();
print_exit_ctrls();
print_entry_ctrls();