// We don't need to virtualize CR4, all we need is to detect the activation of PAE
int v3_handle_cr4_read(struct guest_info * info) {
- // PrintError("CR4 Read not handled\n");
+ PrintError("CR4 Read not handled\n");
// Do nothing...
return 0;
}
// Check to see if we need to flush the tlb
+
if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand);
struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
return -1;
}
-
- if (flush_tlb) {
- PrintDebug("Handling PSE/PGE/PAE -> TLBFlush (doing flush now!)\n");
- if (v3_activate_shadow_pt(info) == -1) {
- PrintError("Failed to activate shadow page tables when emulating TLB flush in handling cr4 write\n");
- return -1;
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ if (flush_tlb) {
+ PrintDebug("Handling PSE/PGE/PAE -> TLBFlush (doing flush now!)\n");
+ if (v3_activate_shadow_pt(info) == -1) {
+ PrintError("Failed to activate shadow page tables when emulating TLB flush in handling cr4 write\n");
+ return -1;
+ }
}
}
-
info->rip += dec_instr.instr_length;
return 0;
}
// Set EFER value seen by hardware while the guest is running
*(uint64_t *)hw_efer = src.value;
- // Catch unsupported features
- if ((old_hw_efer.lme == 1) && (hw_efer->lme == 0)) {
+ // We have gotten here either because we are using
+ // shadow paging, or we are using nested paging on SVM
+ // In the latter case, we don't need to do anything
+ // like the following
+ if (core->shdw_pg_mode == SHADOW_PAGING) {
+ // Catch unsupported features
+ if ((old_hw_efer.lme == 1) && (hw_efer->lme == 0)) {
PrintError("Disabling long mode once it has been enabled is not supported\n");
return -1;
- }
-
- // Set LME and LMA bits seen by hardware
- if (old_hw_efer.lme == 0) {
+ }
+
+ // Set LME and LMA bits seen by hardware
+ if (old_hw_efer.lme == 0) {
// Long mode was not previously enabled, so the lme bit cannot
// be set yet. It will be set later when the guest sets CR0.PG
// to enable paging.
hw_efer->lme = 0;
- } else {
+ } else {
// Long mode was previously enabled. Ensure LMA bit is set.
// VMX does not automatically set LMA, and this should not affect SVM.
hw_efer->lma = 1;
+ }
}
+
+
+ PrintDebug("RIP=%p\n", (void *)core->rip);
+ PrintDebug("New EFER value HW(hi=%p), VM(hi=%p)\n", (void *)*(uint64_t *)hw_efer, (void *)vm_efer->value);
+
return 0;
}