#include <palacios/vmm_ctrl_regs.h>
#include <palacios/vmm_direct_paging.h>
-#ifndef DEBUG_CTRL_REGS
+#ifndef CONFIG_DEBUG_CTRL_REGS
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
struct x86_instr dec_instr;
if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
// Was there a paging transition
// Meaning we need to change the page tables
if (paging_transition) {
- if (v3_get_mem_mode(info) == VIRTUAL_MEM) {
+ if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
- struct efer_64 * guest_efer = (struct efer_64 *)&(info->guest_efer);
+ struct efer_64 * guest_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
struct efer_64 * shadow_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
// Check long mode LME to set LME
PrintError("Failed to activate shadow page tables\n");
return -1;
}
- } else {
+ } else {
+
+ shadow_cr0->wp = 1;
if (v3_activate_passthrough_pt(info) == -1) {
PrintError("Failed to activate passthrough page tables\n");
struct x86_instr dec_instr;
if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
if (dec_instr.op_type == V3_OP_MOVCR2) {
PrintDebug("MOVCR2 (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
- if ((v3_get_cpu_mode(info) == LONG) ||
- (v3_get_cpu_mode(info) == LONG_32_COMPAT)) {
+ if ((v3_get_vm_cpu_mode(info) == LONG) ||
+ (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
struct cr0_64 * dst_reg = (struct cr0_64 *)(dec_instr.dst_operand.operand);
if (info->shdw_pg_mode == SHADOW_PAGING) {
struct x86_instr dec_instr;
if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
*guest_cr3 = *new_cr3;
}
-
+
+
// If Paging is enabled in the guest then we need to change the shadow page tables
if (info->mem_mode == VIRTUAL_MEM) {
if (v3_activate_shadow_pt(info) == -1) {
PrintError("Failed to activate 32 bit shadow page table\n");
return -1;
}
- }
+ }
PrintDebug("New Shadow CR3=%p; New Guest CR3=%p\n",
(void *)(addr_t)(info->ctrl_regs.cr3),
struct x86_instr dec_instr;
if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
if (info->shdw_pg_mode == SHADOW_PAGING) {
- if ((v3_get_cpu_mode(info) == LONG) ||
- (v3_get_cpu_mode(info) == LONG_32_COMPAT)) {
+ if ((v3_get_vm_cpu_mode(info) == LONG) ||
+ (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
struct cr3_64 * dst_reg = (struct cr3_64 *)(dec_instr.dst_operand.operand);
struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
*dst_reg = *guest_cr3;
} else if (info->shdw_pg_mode == NESTED_PAGING) {
// This is just a passthrough operation which we probably don't need here
- if ((v3_get_cpu_mode(info) == LONG) ||
- (v3_get_cpu_mode(info) == LONG_32_COMPAT)) {
+ if ((v3_get_vm_cpu_mode(info) == LONG) ||
+ (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
struct cr3_64 * dst_reg = (struct cr3_64 *)(dec_instr.dst_operand.operand);
struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
*dst_reg = *guest_cr3;
int ret;
int flush_tlb=0;
struct x86_instr dec_instr;
- v3_vm_cpu_mode_t cpu_mode = v3_get_cpu_mode(info);
+ v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(info);
if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
// Check to see if we need to flush the tlb
- if (v3_get_mem_mode(info) == VIRTUAL_MEM) {
+ if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand);
struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
(cr4->pge != new_cr4->pge) ||
(cr4->pae != new_cr4->pae)) {
PrintDebug("Handling PSE/PGE/PAE -> TLBFlush case, flag set\n");
- flush_tlb=1;
+ flush_tlb = 1;
}
}
PrintDebug("Old CR4=%x\n", *(uint_t *)cr4);
if ((info->shdw_pg_mode == SHADOW_PAGING)) {
- if (v3_get_mem_mode(info) == PHYSICAL_MEM) {
+ if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) {
if ((cr4->pae == 0) && (new_cr4->pae == 1)) {
PrintDebug("Creating PAE passthrough tables\n");
} else if ((cr4->pae == 1) && (new_cr4->pae == 0)) {
// Create passthrough standard 32bit pagetables
+ PrintError("Switching From PAE to Protected mode not supported\n");
return -1;
}
}
if (new_cr4->pae == 0) {
// cannot turn off PAE in long mode GPF the guest
- PrintError("Cannot disable PAE in long mode, sending GPF\n");
+ PrintError("Cannot disable PAE in long mode, should send GPF\n");
return -1;
}
}
-int v3_handle_efer_read(uint_t msr, struct v3_msr * dst, void * priv_data) {
- struct guest_info * info = (struct guest_info *)(priv_data);
- PrintDebug("EFER Read HI=%x LO=%x\n", info->guest_efer.hi, info->guest_efer.lo);
+int v3_handle_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * dst, void * priv_data) {
+ PrintDebug("EFER Read HI=%x LO=%x\n", core->shdw_pg_state.guest_efer.hi, core->shdw_pg_state.guest_efer.lo);
- dst->value = info->guest_efer.value;
+ dst->value = core->shdw_pg_state.guest_efer.value;
- info->rip += 2; // WRMSR/RDMSR are two byte operands
return 0;
}
// TODO: this is a disaster we need to clean this up...
-int v3_handle_efer_write(uint_t msr, struct v3_msr src, void * priv_data) {
- struct guest_info * info = (struct guest_info *)(priv_data);
+int v3_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
//struct efer_64 * new_efer = (struct efer_64 *)&(src.value);
- struct efer_64 * shadow_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
- struct v3_msr * guest_efer = &(info->guest_efer);
+ struct efer_64 * shadow_efer = (struct efer_64 *)&(core->ctrl_regs.efer);
+ struct v3_msr * guest_efer = &(core->shdw_pg_state.guest_efer);
PrintDebug("EFER Write\n");
PrintDebug("EFER Write Values: HI=%x LO=%x\n", src.hi, src.lo);
// Enable/Disable Syscall
shadow_efer->sce = src.value & 0x1;
- info->rip += 2; // WRMSR/RDMSR are two byte operands
-
return 0;
}