uint_t v3_get_addr_width(struct guest_info * info);
-v3_cpu_mode_t v3_get_cpu_mode(struct guest_info * info);
-v3_mem_mode_t v3_get_mem_mode(struct guest_info * info);
+v3_cpu_mode_t v3_get_vm_cpu_mode(struct guest_info * info);
+v3_mem_mode_t v3_get_vm_mem_mode(struct guest_info * info);
const uchar_t * v3_cpu_mode_to_str(v3_cpu_mode_t mode);
typedef enum v3_cpu_arch {V3_INVALID_CPU, V3_SVM_CPU, V3_SVM_REV3_CPU, V3_VMX_CPU} v3_cpu_arch_t;
+v3_cpu_mode_t v3_get_host_cpu_mode();
+
+
#endif //!__V3VEE__
info->ctrl_regs.efer = guest_state->efer;
get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
- info->cpu_mode = v3_get_cpu_mode(info);
- info->mem_mode = v3_get_mem_mode(info);
+ info->cpu_mode = v3_get_vm_cpu_mode(info);
+ info->mem_mode = v3_get_vm_mem_mode(info);
exit_code = guest_ctrl->exit_code;
// Was there a paging transition
// Meaning we need to change the page tables
if (paging_transition) {
- if (v3_get_mem_mode(info) == VIRTUAL_MEM) {
+ if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
struct efer_64 * guest_efer = (struct efer_64 *)&(info->guest_efer);
struct efer_64 * shadow_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
if (dec_instr.op_type == V3_OP_MOVCR2) {
PrintDebug("MOVCR2 (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
- if ((v3_get_cpu_mode(info) == LONG) ||
- (v3_get_cpu_mode(info) == LONG_32_COMPAT)) {
+ if ((v3_get_vm_cpu_mode(info) == LONG) ||
+ (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
struct cr0_64 * dst_reg = (struct cr0_64 *)(dec_instr.dst_operand.operand);
if (info->shdw_pg_mode == SHADOW_PAGING) {
if (info->shdw_pg_mode == SHADOW_PAGING) {
- if ((v3_get_cpu_mode(info) == LONG) ||
- (v3_get_cpu_mode(info) == LONG_32_COMPAT)) {
+ if ((v3_get_vm_cpu_mode(info) == LONG) ||
+ (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
struct cr3_64 * dst_reg = (struct cr3_64 *)(dec_instr.dst_operand.operand);
struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
*dst_reg = *guest_cr3;
} else if (info->shdw_pg_mode == NESTED_PAGING) {
// This is just a passthrough operation which we probably don't need here
- if ((v3_get_cpu_mode(info) == LONG) ||
- (v3_get_cpu_mode(info) == LONG_32_COMPAT)) {
+ if ((v3_get_vm_cpu_mode(info) == LONG) ||
+ (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
struct cr3_64 * dst_reg = (struct cr3_64 *)(dec_instr.dst_operand.operand);
struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
*dst_reg = *guest_cr3;
int ret;
int flush_tlb=0;
struct x86_instr dec_instr;
- v3_cpu_mode_t cpu_mode = v3_get_cpu_mode(info);
+ v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(info);
if (info->mem_mode == PHYSICAL_MEM) {
ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
// Check to see if we need to flush the tlb
- if (v3_get_mem_mode(info) == VIRTUAL_MEM) {
+ if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand);
struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
PrintDebug("Old CR4=%x\n", *(uint_t *)cr4);
if ((info->shdw_pg_mode == SHADOW_PAGING)) {
- if (v3_get_mem_mode(info) == PHYSICAL_MEM) {
+ if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) {
if ((cr4->pae == 0) && (new_cr4->pae == 1)) {
PrintDebug("Creating PAE passthrough tables\n");
}
int v3_reset_passthrough_pts(struct guest_info * info) {
- v3_cpu_mode_t mode = v3_get_cpu_mode(info);
+ v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
// Delete the old direct map page tables
switch(mode) {
int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
- v3_cpu_mode_t mode = v3_get_cpu_mode(info);
+ v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
switch(mode) {
case REAL:
}
int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr) {
- v3_cpu_mode_t mode = v3_get_cpu_mode(info);
+ v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
switch(mode) {
case REAL:
// flush virtual page tables
// 3 cases shadow, shadow passthrough, and nested
if (info->shdw_pg_mode == SHADOW_PAGING) {
- v3_mem_mode_t mem_mode = v3_get_mem_mode(info);
+ v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
if (mem_mode == PHYSICAL_MEM) {
addr_t cur_addr;
// flush virtual page tables
// 3 cases shadow, shadow passthrough, and nested
if (info->shdw_pg_mode == SHADOW_PAGING) {
- v3_mem_mode_t mem_mode = v3_get_mem_mode(info);
+ v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
if (mem_mode == PHYSICAL_MEM) {
addr_t cur_addr;
// creates new shadow page tables
// updates the shadow CR3 register to point to the new pts
int v3_activate_shadow_pt(struct guest_info * info) {
- switch (v3_get_cpu_mode(info)) {
+ switch (v3_get_vm_cpu_mode(info)) {
case PROTECTED:
return activate_shadow_pt_32(info);
case LONG_16_COMPAT:
return activate_shadow_pt_64(info);
default:
- PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info)));
+ PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(info)));
return -1;
}
int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
- if (v3_get_mem_mode(info) == PHYSICAL_MEM) {
+ if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) {
// If paging is not turned on we need to handle the special cases
return v3_handle_passthrough_pagefault(info, fault_addr, error_code);
- } else if (v3_get_mem_mode(info) == VIRTUAL_MEM) {
+ } else if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
- switch (v3_get_cpu_mode(info)) {
+ switch (v3_get_vm_cpu_mode(info)) {
case PROTECTED:
return handle_shadow_pagefault_32(info, fault_addr, error_code);
break;
return handle_shadow_pagefault_64(info, fault_addr, error_code);
break;
default:
- PrintError("Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info)));
+ PrintError("Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(info)));
return -1;
}
} else {
int ret = 0;
addr_t vaddr = 0;
- if (v3_get_mem_mode(info) != VIRTUAL_MEM) {
+ if (v3_get_vm_mem_mode(info) != VIRTUAL_MEM) {
// Paging must be turned on...
// should handle with some sort of fault I think
PrintError("ERROR: INVLPG called in non paged mode\n");
return -1;
}
- if (v3_get_mem_mode(info) == PHYSICAL_MEM) {
+ if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) {
ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
info->rip += dec_instr.instr_length;
- switch (v3_get_cpu_mode(info)) {
+ switch (v3_get_vm_cpu_mode(info)) {
case PROTECTED:
return handle_shadow_invlpg_32(info, vaddr);
case PROTECTED_PAE:
case LONG_16_COMPAT:
return handle_shadow_invlpg_64(info, vaddr);
default:
- PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info)));
+ PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(info)));
return -1;
}
}
// This is a horrendous hack...
// XED really screwed the pooch in calculating the displacement
- if (v3_get_cpu_mode(info) == LONG) {
+ if (v3_get_vm_cpu_mode(info) == LONG) {
displacement = mem_op.displacement;
} else {
displacement = MASK(mem_op.displacement, mem_op.displacement_size);