#include <palacios/vmm_direct_paging.h>
#include <palacios/vmm_ctrl_regs.h>
-#ifndef CONFIG_DEBUG_VMX
+#ifndef V3_CONFIG_DEBUG_VMX
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
return -1;
}
+int v3_vmx_handle_cr4_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
+ if (cr_qual->access_type < 2) {
+
+ if (cr_qual->access_type == 0) {
+ if (v3_handle_cr4_write(info) != 0) {
+ PrintError("Could not handle CR4 write\n");
+ return -1;
+ }
+ info->ctrl_regs.cr4 |= 0x2000; // no VMX allowed in guest, so mask CR4.VMXE
+ } else {
+ if (v3_handle_cr4_read(info) != 0) {
+ PrintError("Could not handle CR4 read\n");
+ return -1;
+ }
+ }
+
+ return 0;
+ }
+
+ PrintError("Invalid CR4 Access type?? (type=%d)\n", cr_qual->access_type);
+ return -1;
+}
+
+int v3_vmx_handle_cr8_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
+ if (cr_qual->access_type < 2) {
+
+ if (cr_qual->access_type == 0) {
+ if (v3_handle_cr8_write(info) != 0) {
+ PrintError("Could not handle CR8 write\n");
+ return -1;
+ }
+ } else {
+ if (v3_handle_cr8_read(info) != 0) {
+ PrintError("Could not handle CR8 read\n");
+ return -1;
+ }
+ }
+
+ return 0;
+ }
+
+ PrintError("Invalid CR8 Access type?? (type=%d)\n", cr_qual->access_type);
+ return -1;
+}
+
static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
if (info->shdw_pg_mode == SHADOW_PAGING) {
struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
uint_t paging_transition = 0;
+ extern v3_cpu_arch_t v3_mach_type;
+
- /*
- PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
- (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
- */
+ PrintDebug("Mov to CR0\n");
+ PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
+ (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
- if (new_shdw_cr0->pe != shdw_cr0->pe) {
+ if ((new_shdw_cr0->pe != shdw_cr0->pe) && (vmx_info->assist_state != VMXASSIST_DISABLED)) {
/*
PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
return -1;
}
- if (vmx_info->assist_state == VMXASSIST_ENABLED) {
+ if (vmx_info->assist_state == VMXASSIST_ON) {
PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip);
} else {
PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
if (new_shdw_cr0->pg != shdw_cr0->pg) {
paging_transition = 1;
}
-
- // The shadow always reflects the new value
- *shdw_cr0 = *new_shdw_cr0;
-
- // We don't care about most of the flags, so lets go for it
- // and set them to the guest values
- *guest_cr0 = *shdw_cr0;
+
// Except PG, PE, and NE, which are always set
- guest_cr0->pe = 1;
- guest_cr0->pg = 1;
+ if ((info->shdw_pg_mode == SHADOW_PAGING) ||
+ (v3_mach_type != V3_VMX_EPT_UG_CPU)) {
+
+ // The shadow always reflects the new value
+ *shdw_cr0 = *new_shdw_cr0;
+
+
+ // We don't care about most of the flags, so lets go for it
+ // and set them to the guest values
+ *guest_cr0 = *shdw_cr0;
+
+ guest_cr0->pe = 1;
+ guest_cr0->pg = 1;
+ } else {
+ // Unrestricted guest
+ // *(uint32_t *)shdw_cr0 = (0x00000020 & *(uint32_t *)new_shdw_cr0);
+
+ *guest_cr0 = *new_shdw_cr0;
+
+ guest_cr0->cd = 0;
+ }
+
guest_cr0->ne = 1;
+ guest_cr0->et = 1;
+
if (paging_transition) {
// Paging transition
if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
- struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
+ struct efer_64 * vm_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
+ struct efer_64 * hw_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
- if (guest_efer->lme == 1) {
- // PrintDebug("Enabling long mode\n");
-
- guest_efer->lma = 1;
- guest_efer->lme = 1;
-
- vmx_info->entry_ctrls.guest_ia32e = 1;
- }
+ if (vmx_info->assist_state != VMXASSIST_DISABLED) {
+ if (vm_efer->lme) {
+ PrintDebug("Enabling long mode\n");
+
+ hw_efer->lma = 1;
+ hw_efer->lme = 1;
+
+ vmx_info->entry_ctrls.guest_ia32e = 1;
+ }
+ } else {
+ if (hw_efer->lme) {
+ PrintDebug("Enabling long mode\n");
+
+ hw_efer->lma = 1;
+
+ vmx_info->entry_ctrls.guest_ia32e = 1;
+ }
+ }
// PrintDebug("Activating Shadow Page tables\n");
- if (v3_activate_shadow_pt(info) == -1) {
- PrintError("Failed to activate shadow page tables\n");
- return -1;
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ if (v3_activate_shadow_pt(info) == -1) {
+ PrintError("Failed to activate shadow page tables\n");
+ return -1;
+ }
}
- } else if (v3_activate_passthrough_pt(info) == -1) {
- PrintError("Failed to activate passthrough page tables\n");
- return -1;
+ } else {
+
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ if (v3_activate_passthrough_pt(info) == -1) {
+ PrintError("Failed to activate passthrough page tables\n");
+ return -1;
+ }
+ } else {
+ // This is hideous... Let's hope that the 1to1 page table has not been nuked...
+ info->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
+ }
}
}
}