#include <palacios/vm_guest_mem.h>
#include <palacios/vmm_ctrl_regs.h>
#include <palacios/vmm_lowlevel.h>
+#include <palacios/vmm_debug.h>
ret = vmcs_write(field, val);
if (ret != VMX_SUCCESS) {
- PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
+ PrintError(VM_NONE, VCORE_NONE, "VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
return 1;
}
ret = vmcs_read(field, val);
if (ret != VMX_SUCCESS) {
- PrintError("VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
+ PrintError(VM_NONE, VCORE_NONE, "VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
}
return ret;
vmx_ret |= check_vmcs_write(VMCS_ENTRY_CTRLS, arch_data->entry_ctrls.value);
vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, arch_data->excp_bmap.value);
+ if (info->shdw_pg_mode == NESTED_PAGING) {
+ vmx_ret |= check_vmcs_write(VMCS_EPT_PTR, info->direct_map_pt);
+ }
+
return vmx_ret;
}
check_vmcs_read(VMCS_CR0_READ_SHDW, &(info->shdw_pg_state.guest_cr0));
check_vmcs_read(VMCS_GUEST_CR3, &(info->ctrl_regs.cr3));
check_vmcs_read(VMCS_GUEST_CR4, &(info->ctrl_regs.cr4));
- check_vmcs_read(VMCS_CR4_READ_SHDW, &(vmx_info->guest_cr4));
+ check_vmcs_read(VMCS_CR4_READ_SHDW, &(info->shdw_pg_state.guest_cr4));
check_vmcs_read(VMCS_GUEST_DR7, &(info->dbg_regs.dr7));
check_vmcs_read(VMCS_GUEST_RFLAGS, &(info->ctrl_regs.rflags));
- if (((struct vmx_data *)info->vmm_data)->ia32e_avail) {
+
#ifdef __V3_64BIT__
- check_vmcs_read(VMCS_GUEST_EFER, &(info->ctrl_regs.efer));
-#else
- uint32_t hi, lo;
- check_vmcs_read(VMCS_GUEST_EFER, &hi);
- check_vmcs_read(VMCS_GUEST_EFER_HIGH, &lo);
- info->ctrl_regs.efer = ((uint64_t) hi << 32) | lo;
+ check_vmcs_read(VMCS_GUEST_EFER, &(info->ctrl_regs.efer));
+ check_vmcs_read(VMCS_ENTRY_CTRLS, &(vmx_info->entry_ctrls.value));
#endif
- }
-
+
error = v3_read_vmcs_segments(&(info->segments));
+ /* Save MSRs from MSR SAVE Area (whereever that is...)*/
+
+ info->msrs.star = vmx_info->msr_area->guest_star.hi;
+ info->msrs.star <<= 32;
+ info->msrs.star |= vmx_info->msr_area->guest_star.lo;
+
+ info->msrs.lstar = vmx_info->msr_area->guest_lstar.hi;
+ info->msrs.lstar <<= 32;
+ info->msrs.lstar |= vmx_info->msr_area->guest_lstar.lo;
+
+ info->msrs.sfmask = vmx_info->msr_area->guest_fmask.hi;
+ info->msrs.sfmask <<= 32;
+ info->msrs.sfmask |= vmx_info->msr_area->guest_fmask.lo;
+
+ info->msrs.kern_gs_base = vmx_info->msr_area->guest_kern_gs.hi;
+ info->msrs.kern_gs_base <<= 32;
+ info->msrs.kern_gs_base |= vmx_info->msr_area->guest_kern_gs.lo;
+
+
return error;
}
check_vmcs_write(VMCS_GUEST_RFLAGS, info->ctrl_regs.rflags);
- if (((struct vmx_data *)info->vmm_data)->ia32e_avail) {
- check_vmcs_write(VMCS_GUEST_EFER, info->ctrl_regs.efer);
- }
+#ifdef __V3_64BIT__
+ check_vmcs_write(VMCS_GUEST_EFER, info->ctrl_regs.efer);
+ check_vmcs_write(VMCS_ENTRY_CTRLS, vmx_info->entry_ctrls.value);
+#endif
error = v3_write_vmcs_segments(&(info->segments));
+ /* Restore MSRs from MSR SAVE Area (whereever that is...)*/
+
+ vmx_info->msr_area->guest_star.hi = (info->msrs.star >> 32);
+ vmx_info->msr_area->guest_star.lo = (info->msrs.star & 0xffffffff);
+
+ vmx_info->msr_area->guest_lstar.hi = (info->msrs.lstar >> 32);
+ vmx_info->msr_area->guest_lstar.lo = (info->msrs.lstar & 0xffffffff);
+
+ vmx_info->msr_area->guest_fmask.hi = (info->msrs.sfmask >> 32);
+ vmx_info->msr_area->guest_fmask.lo = (info->msrs.sfmask & 0xffffffff);
+
+ vmx_info->msr_area->guest_kern_gs.hi = (info->msrs.kern_gs_base >> 32);
+ vmx_info->msr_area->guest_kern_gs.lo = (info->msrs.kern_gs_base & 0xffffffff);
+
return error;
}
int v3_update_vmcs_host_state(struct guest_info * info) {
int vmx_ret = 0;
addr_t tmp;
- struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
struct v3_msr tmp_msr;
+ addr_t gdtr_base;
+ struct {
+ uint16_t selector;
+ addr_t base;
+ } __attribute__((packed)) tmp_seg;
#ifdef __V3_64BIT__
__asm__ __volatile__ ( "movq %%cr0, %0; "
vmx_ret |= check_vmcs_write(VMCS_HOST_CR4, tmp);
+ __asm__ __volatile__(
+ "sgdt (%0);"
+ :
+ : "q"(&tmp_seg)
+ : "memory"
+ );
+ gdtr_base = tmp_seg.base;
+ vmx_ret |= check_vmcs_write(VMCS_HOST_GDTR_BASE, tmp_seg.base);
+
+ __asm__ __volatile__(
+ "sidt (%0);"
+ :
+ : "q"(&tmp_seg)
+ : "memory"
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, tmp_seg.base);
+
+ __asm__ __volatile__(
+ "str (%0);"
+ :
+ : "q"(&tmp_seg)
+ : "memory"
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, tmp_seg.selector);
+
+ /* The GDTR *index* is bits 3-15 of the selector. */
+ {
+ struct tss_descriptor * desc = NULL;
+ desc = (struct tss_descriptor *)(gdtr_base + (8 * (tmp_seg.selector >> 3)));
+
+ tmp_seg.base = ((desc->base1) |
+ (desc->base2 << 16) |
+ (desc->base3 << 24) |
+#ifdef __V3_64BIT__
+ ((uint64_t)desc->base4 << 32)
+#else
+ (0)
+#endif
+ );
- vmx_ret |= check_vmcs_write(VMCS_HOST_GDTR_BASE, arch_data->host_state.gdtr.base);
- vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
- vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
-
-#define FS_BASE_MSR 0xc0000100
-#define GS_BASE_MSR 0xc0000101
-
- // FS.BASE MSR
- v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);
-
- // GS.BASE MSR
- v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);
-
+ vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, tmp_seg.base);
+ }
#ifdef __V3_64BIT__
#endif
vmx_ret |= check_vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
- vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
-
#define SYSENTER_CS_MSR 0x00000174
#define SYSENTER_ESP_MSR 0x00000175
#define SYSENTER_EIP_MSR 0x00000176
+#define FS_BASE_MSR 0xc0000100
+#define GS_BASE_MSR 0xc0000101
+#define EFER_MSR 0xc0000080
+
// SYSENTER CS MSR
v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
+
+ // FS.BASE MSR
+ v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);
+
+ // GS.BASE MSR
+ v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);
+
+
+ // EFER
+ v3_get_msr(EFER_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_EFER, tmp_msr.value);
+
+ // PERF GLOBAL CONTROL
+
+ // PAT
+
+ v3_get_msr(IA32_PAT_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_PAT, tmp_msr.value);
+
+
+ // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
+ {
+ struct vmx_data * vmx_state = (struct vmx_data *)info->vmm_data;
+ struct vmcs_msr_save_area * msr_entries = vmx_state->msr_area;
+
+
+ v3_get_msr(IA32_STAR_MSR, &(msr_entries->host_star.hi), &(msr_entries->host_star.lo));
+ v3_get_msr(IA32_LSTAR_MSR, &(msr_entries->host_lstar.hi), &(msr_entries->host_lstar.lo));
+ v3_get_msr(IA32_FMASK_MSR, &(msr_entries->host_fmask.hi), &(msr_entries->host_fmask.lo));
+ v3_get_msr(IA32_KERN_GS_BASE_MSR, &(msr_entries->host_kern_gs.hi), &(msr_entries->host_kern_gs.lo));
+ }
+
+
+
+
+
return vmx_ret;
}
addr_t val;
if (vmcs_read(vmcs_index, &val) != VMX_SUCCESS) {
- PrintError("VMCS_READ error for %s\n", v3_vmcs_field_to_str(vmcs_index));
+ PrintError(VM_NONE, VCORE_NONE, "VMCS_READ error for %s\n", v3_vmcs_field_to_str(vmcs_index));
return;
};
if (len == 2) {
- PrintDebug("\t%s: 0x%.4x\n", v3_vmcs_field_to_str(vmcs_index), (uint16_t)val);
+ PrintDebug(VM_NONE, VCORE_NONE, "\t%s: 0x%.4x\n", v3_vmcs_field_to_str(vmcs_index), (uint16_t)val);
} else if (len == 4) {
- PrintDebug("\t%s: 0x%.8x\n", v3_vmcs_field_to_str(vmcs_index), (uint32_t)val);
+ PrintDebug(VM_NONE, VCORE_NONE, "\t%s: 0x%.8x\n", v3_vmcs_field_to_str(vmcs_index), (uint32_t)val);
} else if (len == 8) {
- PrintDebug("\t%s: 0x%p\n", v3_vmcs_field_to_str(vmcs_index), (void *)(addr_t)val);
+ PrintDebug(VM_NONE, VCORE_NONE, "\t%s: 0x%p\n", v3_vmcs_field_to_str(vmcs_index), (void *)(addr_t)val);
}
}
v3_print_segments(&segs);
- PrintDebug(" ==> CS\n");
+ PrintDebug(VM_NONE, VCORE_NONE, " ==> CS\n");
print_vmcs_field(VMCS_GUEST_CS_SELECTOR);
print_vmcs_field(VMCS_GUEST_CS_BASE);
print_vmcs_field(VMCS_GUEST_CS_LIMIT);
print_vmcs_field(VMCS_GUEST_CS_ACCESS);
- PrintDebug(" ==> SS\n");
+ PrintDebug(VM_NONE, VCORE_NONE, " ==> SS\n");
print_vmcs_field(VMCS_GUEST_SS_SELECTOR);
print_vmcs_field(VMCS_GUEST_SS_BASE);
print_vmcs_field(VMCS_GUEST_SS_LIMIT);
print_vmcs_field(VMCS_GUEST_SS_ACCESS);
- PrintDebug(" ==> DS\n");
+ PrintDebug(VM_NONE, VCORE_NONE, " ==> DS\n");
print_vmcs_field(VMCS_GUEST_DS_SELECTOR);
print_vmcs_field(VMCS_GUEST_DS_BASE);
print_vmcs_field(VMCS_GUEST_DS_LIMIT);
print_vmcs_field(VMCS_GUEST_DS_ACCESS);
- PrintDebug(" ==> ES\n");
+ PrintDebug(VM_NONE, VCORE_NONE, " ==> ES\n");
print_vmcs_field(VMCS_GUEST_ES_SELECTOR);
print_vmcs_field(VMCS_GUEST_ES_BASE);
print_vmcs_field(VMCS_GUEST_ES_LIMIT);
print_vmcs_field(VMCS_GUEST_ES_ACCESS);
- PrintDebug(" ==> FS\n");
+ PrintDebug(VM_NONE, VCORE_NONE, " ==> FS\n");
print_vmcs_field(VMCS_GUEST_FS_SELECTOR);
print_vmcs_field(VMCS_GUEST_FS_BASE);
print_vmcs_field(VMCS_GUEST_FS_LIMIT);
print_vmcs_field(VMCS_GUEST_FS_ACCESS);
- PrintDebug(" ==> GS\n");
+ PrintDebug(VM_NONE, VCORE_NONE, " ==> GS\n");
print_vmcs_field(VMCS_GUEST_GS_SELECTOR);
print_vmcs_field(VMCS_GUEST_GS_BASE);
print_vmcs_field(VMCS_GUEST_GS_LIMIT);
print_vmcs_field(VMCS_GUEST_GS_ACCESS);
- PrintDebug(" ==> LDTR\n");
+ PrintDebug(VM_NONE, VCORE_NONE, " ==> LDTR\n");
print_vmcs_field(VMCS_GUEST_LDTR_SELECTOR);
print_vmcs_field(VMCS_GUEST_LDTR_BASE);
print_vmcs_field(VMCS_GUEST_LDTR_LIMIT);
print_vmcs_field(VMCS_GUEST_LDTR_ACCESS);
- PrintDebug(" ==> TR\n");
+ PrintDebug(VM_NONE, VCORE_NONE, " ==> TR\n");
print_vmcs_field(VMCS_GUEST_TR_SELECTOR);
print_vmcs_field(VMCS_GUEST_TR_BASE);
print_vmcs_field(VMCS_GUEST_TR_LIMIT);
print_vmcs_field(VMCS_GUEST_TR_ACCESS);
- PrintDebug(" ==> GDTR\n");
+ PrintDebug(VM_NONE, VCORE_NONE, " ==> GDTR\n");
print_vmcs_field(VMCS_GUEST_GDTR_BASE);
print_vmcs_field(VMCS_GUEST_GDTR_LIMIT);
- PrintDebug(" ==> IDTR\n");
+ PrintDebug(VM_NONE, VCORE_NONE, " ==> IDTR\n");
print_vmcs_field(VMCS_GUEST_IDTR_BASE);
print_vmcs_field(VMCS_GUEST_IDTR_LIMIT);
static void print_guest_state()
{
- PrintDebug("VMCS_GUEST_STATE\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "VMCS_GUEST_STATE\n");
print_vmcs_field(VMCS_GUEST_RIP);
print_vmcs_field(VMCS_GUEST_RSP);
print_vmcs_field(VMCS_GUEST_RFLAGS);
#endif
- PrintDebug("\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "\n");
print_vmcs_segments();
- PrintDebug("\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "\n");
print_vmcs_field(VMCS_GUEST_DBG_CTL);
#ifdef __V3_32BIT__
- PrintDebug("GUEST_NON_REGISTER_STATE\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "GUEST_NON_REGISTER_STATE\n");
print_vmcs_field(VMCS_GUEST_ACTIVITY_STATE);
print_vmcs_field(VMCS_GUEST_INT_STATE);
static void print_host_state()
{
- PrintDebug("VMCS_HOST_STATE\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "VMCS_HOST_STATE\n");
print_vmcs_field(VMCS_HOST_RIP);
print_vmcs_field(VMCS_HOST_RSP);
#endif
- PrintDebug("\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "\n");
print_vmcs_field(VMCS_HOST_CS_SELECTOR);
print_vmcs_field(VMCS_HOST_SS_SELECTOR);
print_vmcs_field(VMCS_HOST_DS_SELECTOR);
print_vmcs_field(VMCS_HOST_GS_SELECTOR);
print_vmcs_field(VMCS_HOST_TR_SELECTOR);
- PrintDebug("\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "\n");
print_vmcs_field(VMCS_HOST_FS_BASE);
print_vmcs_field(VMCS_HOST_GS_BASE);
print_vmcs_field(VMCS_HOST_TR_BASE);
print_vmcs_field(VMCS_HOST_GDTR_BASE);
print_vmcs_field(VMCS_HOST_IDTR_BASE);
- PrintDebug("\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "\n");
print_vmcs_field(VMCS_HOST_SYSENTER_CS);
print_vmcs_field(VMCS_HOST_SYSENTER_ESP);
print_vmcs_field(VMCS_HOST_SYSENTER_EIP);
static void print_exec_ctrls() {
- PrintDebug("VMCS_EXEC_CTRL_FIELDS\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "VMCS_EXEC_CTRL_FIELDS\n");
print_vmcs_field(VMCS_PIN_CTRLS);
print_vmcs_field(VMCS_PROC_CTRLS);
print_vmcs_field(VMCS_TSC_OFFSET_HIGH);
#endif
- PrintDebug("\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "\n");
print_vmcs_field(VMCS_CR0_MASK);
print_vmcs_field(VMCS_CR0_READ_SHDW);
// Check max number of CR3 targets... may continue...
- PrintDebug("\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "\n");
// if virtualize apic accesses
print_vmcs_field(VMCS_APIC_ACCESS_ADDR);
}
static void print_ept_state() {
- V3_Print("VMCS EPT INFO\n");
+ V3_Print(VM_NONE, VCORE_NONE, "VMCS EPT INFO\n");
// if enable vpid
print_vmcs_field(VMCS_VPID);
static void print_exit_ctrls() {
- PrintDebug("VMCS_EXIT_CTRLS\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "VMCS_EXIT_CTRLS\n");
print_vmcs_field(VMCS_EXIT_CTRLS);
static void print_entry_ctrls() {
- PrintDebug("VMCS_ENTRY_CTRLS\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "VMCS_ENTRY_CTRLS\n");
print_vmcs_field(VMCS_ENTRY_CTRLS);
static void print_exit_info() {
- PrintDebug("VMCS_EXIT_INFO\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "VMCS_EXIT_INFO\n");
print_vmcs_field(VMCS_EXIT_REASON);
print_vmcs_field(VMCS_EXIT_QUAL);
case 3:
return sizeof(addr_t);
default:
- PrintError("Invalid VMCS field: 0x%x\n", field);
+ PrintError(VM_NONE, VCORE_NONE, "Invalid VMCS field: 0x%x\n", field);
return -1;
}
}