int v3_handle_cr4_write(struct guest_info * info) {
uchar_t instr[15];
int ret;
+ int flush_tlb=0;
struct x86_instr dec_instr;
v3_vm_cpu_mode_t cpu_mode = v3_get_cpu_mode(info);
return -1;
}
+ // Check to see if we need to flush the tlb
+
+ if (v3_get_mem_mode(info) == VIRTUAL_MEM) {
+ struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand);
+ struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
+
+ // if pse, pge, or pae have changed while PG (in any mode) is on
+ // the side effect is a TLB flush, which means we need to
+ // toss the current shadow page tables too
+ //
+ //
+ // TODO - PAE FLAG needs to be special cased
+ if ((cr4->pse != new_cr4->pse) ||
+ (cr4->pge != new_cr4->pge) ||
+ (cr4->pae != new_cr4->pae)) {
+ PrintDebug("Handling PSE/PGE/PAE -> TLBFlush case, flag set\n");
+ flush_tlb=1;
+
+ }
+ }
+
+
if ((cpu_mode == PROTECTED) || (cpu_mode == PROTECTED_PAE)) {
struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand);
struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
PrintDebug("OperandVal = %x, length = %d\n", *(uint_t *)new_cr4, dec_instr.src_operand.size);
PrintDebug("Old CR4=%x\n", *(uint_t *)cr4);
- if ((info->shdw_pg_mode == SHADOW_PAGING) &&
- (v3_get_mem_mode(info) == PHYSICAL_MEM)) {
-
- if ((cr4->pae == 0) && (new_cr4->pae == 1)) {
- PrintDebug("Creating PAE passthrough tables\n");
-
- // Delete the old 32 bit direct map page tables
- delete_page_tables_32((pde32_t *)V3_VAddr((void *)(info->direct_map_pt)));
-
- // create 32 bit PAE direct map page table
- info->direct_map_pt = (addr_t)V3_PAddr(create_passthrough_pts_32PAE(info));
-
- // reset cr3 to new page tables
- info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt);
-
- } else if ((cr4->pae == 1) && (new_cr4->pae == 0)) {
- // Create passthrough standard 32bit pagetables
- return -1;
+ if ((info->shdw_pg_mode == SHADOW_PAGING)) {
+ if (v3_get_mem_mode(info) == PHYSICAL_MEM) {
+
+ if ((cr4->pae == 0) && (new_cr4->pae == 1)) {
+ PrintDebug("Creating PAE passthrough tables\n");
+
+ // Delete the old 32 bit direct map page tables
+ delete_page_tables_32((pde32_t *)V3_VAddr((void *)(info->direct_map_pt)));
+
+ // create 32 bit PAE direct map page table
+ info->direct_map_pt = (addr_t)V3_PAddr(create_passthrough_pts_32PAE(info));
+
+ // reset cr3 to new page tables
+ info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt);
+
+ } else if ((cr4->pae == 1) && (new_cr4->pae == 0)) {
+ // Create passthrough standard 32bit pagetables
+ return -1;
+ }
}
}
-
+
*cr4 = *new_cr4;
PrintDebug("New CR4=%x\n", *(uint_t *)cr4);
-
+
} else if ((cpu_mode == LONG) || (cpu_mode == LONG_32_COMPAT)) {
struct cr4_64 * new_cr4 = (struct cr4_64 *)(dec_instr.src_operand.operand);
struct cr4_64 * cr4 = (struct cr4_64 *)&(info->ctrl_regs.cr4);
PrintError("CR4 write not supported in CPU_MODE: %s\n", v3_cpu_mode_to_str(cpu_mode));
return -1;
}
+
+
+ if (flush_tlb) {
+ PrintDebug("Handling PSE/PGE/PAE -> TLBFlush (doing flush now!)\n");
+ if (v3_activate_shadow_pt(info) == -1) {
+ PrintError("Failed to activate shadow page tables when emulating TLB flush in handling cr4 write\n");
+ return -1;
+ }
+ }
+
info->rip += dec_instr.instr_length;
return 0;
}
+
+// TODO: this is a disaster we need to clean this up...
int v3_handle_efer_write(uint_t msr, struct v3_msr src, void * priv_data) {
struct guest_info * info = (struct guest_info *)(priv_data);
//struct efer_64 * new_efer = (struct efer_64 *)&(src.value);
- // struct efer_64 * shadow_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
+ struct efer_64 * shadow_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
struct v3_msr * guest_efer = &(info->guest_efer);
PrintDebug("EFER Write\n");
// We virtualize the guests efer to hide the SVME and LMA bits
guest_efer->value = src.value;
-
+
+ // Enable/Disable Syscall
+ shadow_efer->sce = src.value & 0x1;
+
+
// We have to handle long mode writes....
/*