-# Makefile for GeekOS kernel, userspace, and tools
+# Top level Makefile for V3Vee
#
# Northwestern University
# (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
# (c) 2008, Lei Xia <xiaxlei@gmail.com>
# (c) 2008, The V3VEE Project <http://www.v3vee.org>
#
-# Based on GeekOS Makefile:
-# Copyright (c) 2004,2005 David H. Hovemeyer <daveho@cs.umd.edu>
-# $Revision: 1.71 $
-
-
-# This is free software. You are permitted to use,
-# redistribute, and modify it as specified in the file "COPYING".
-
-# Required software to build GeekOS:
+# Required software to build V3Vee:
# - GNU Make (http://www.gnu.org/software/make)
-# - gcc 2.95.2 generating code for target (i386/ELF) and host platforms
# - nasm (http://nasm.sourceforge.net)
# - Perl5, AWK (any version), egrep
#
-# Cygwin (http://cygwin.com) may be used to build GeekOS.
-# Make sure that gcc, binutils, nasm, and perl are installed.
-# NOTES:
-# - This makefile has been written carefully to work correctly
-# with the -j (parallel make) option. I regularly use "make -j 2"
-# to speed the build process on 2 processor systems.
-# ----------------------------------------------------------------------
-# Configuration -
-# Various options specifying how GeekOS should be built,
-# what source files to build, which user programs to build,
-# etc. This is generally the only section of the makefile
-# that will need to be modified.
-# ----------------------------------------------------------------------
+
PROJECT_ROOT := ..
PALACIOS_BUILD_DIR := $(PROJECT_ROOT)/palacios/build
GEEKOS_BUILD_DIR := $(PROJECT_ROOT)/geekos/build
endif
+ifeq ($(PROFILE_VMM),1)
+ GEEKOS_FLAGS:= $(GEEKOS_FLAGS) PROFILE_VMM=1
+endif
+
+
# ----------------------------------------------------------------------
# Targets -
# Specifies files to be built
cp $(PALACIOS_BUILD_DIR)/libv3vee.a $(GEEKOS_BUILD_DIR)/palacios/
cp $(PALACIOS_BUILD_DIR)/../lib/xed/libxed.a $(GEEKOS_BUILD_DIR)/palacios/
cp $(PALACIOS_BUILD_DIR)/vm_kernel $(GEEKOS_BUILD_DIR)/palacios/
- (cd $(GEEKOS_BUILD_DIR) && make)
+ (cd $(GEEKOS_BUILD_DIR) && make $(GEEKOS_FLAGS))
geekos-full: palacios-full32
cp $(PALACIOS_BUILD_DIR)/libv3vee.a $(GEEKOS_BUILD_DIR)/palacios/
cp $(PALACIOS_BUILD_DIR)/../lib/xed/libxed.a $(GEEKOS_BUILD_DIR)/palacios/
cp $(PALACIOS_BUILD_DIR)/vm_kernel $(GEEKOS_BUILD_DIR)/palacios/
- (cd $(GEEKOS_BUILD_DIR) && make clean && make)
+ (cd $(GEEKOS_BUILD_DIR) && make clean && make $(GEEKOS_FLAGS))
world: geekos-full palacios-full64
-
#
#uIP, ON -- used, OFF -- not used
#
#endif
+ifeq ($(PROFILE_VMM),1)
+ EXTRA_C_OPTS:= -DPROFILE_VMM
+endif
# ----------------------------------------------------------------------
vm_config.rombios_size = rombios->length;
region_start += rombios->length;
-
+
+#ifdef PROFILE_VMM
+ vm_config.enable_profiling = 1;
+#else
+ vm_config.enable_profiling = 0;
+#endif
+
vm_config.vgabios = region_start;
vm_config.vgabios_size = vgabios->length;
}
#
ifeq ($(LEAN_AND_MEAN),1)
-DEBUG=0
-DEBUG_SECTIONS=
+DEBUG := 0
+DEBUG_SECTIONS :=
else
-DEBUG=1
-DEBUG_SECTIONS=
+DEBUG := 1
+DEBUG_SECTIONS :=
endif
palacios/vmm_msr.o \
palacios/svm_msr.o \
palacios/vmm_socket.o \
+ palacios/vmm_xed.o \
+ palacios/vmm_rbtree.o \
+ palacios/vmm_profiler.o \
# vmx.c vmcs_gen.c vmcs.c
xed/v3-xed-compat.o \
xed/v3-udiv-compat.o \
-$(XED_OBJS) :: EXTRA_CFLAGS =
-
+$(XED_OBJS) :: EXTRA_CFLAGS = \
+ $(JRLDEBUG) \
DEVICES_OBJS := \
devices/generic.o \
devices/cdrom.o \
devices/bochs_debug.o \
-$(DEVICES_OBJS) :: EXTRA_CFLAGS =
-
-#
-# DECODER is the decoder that will be used
-# currently we only support xed
-#
-DECODER=XED
-
-ifeq ($(DECODER),XED)
-VMM_OBJS += palacios/vmm_xed.o
-else
-# This is an error
-endif
+$(DEVICES_OBJS) :: EXTRA_CFLAGS = \
+ $(JRLDEBUG) \
$(EXTRA_C_OPTS) \
$(VMM_FLAGS) \
-I$(PROJECT_ROOT)/include \
- -fPIC \
-Werror \
+ -fPIC \
-Wp,-MD,$(@D)/.$(@F).d \
-Wp,-MT,$@ \
+#-fPIC \
#-fvisibility=hidden
int v3_handle_svm_exit(struct guest_info * info);
+const char * vmexit_code_to_str(uint_t exit_code);
+
#endif // ! __V3VEE__
#include <palacios/vmm_emulator.h>
#include <palacios/vmm_host_events.h>
#include <palacios/vmm_msr.h>
+#include <palacios/vmm_profiler.h>
uint_t long_mode : 1;
uint_t db : 1;
uint_t granularity : 1;
-};
+} __attribute__((packed));
struct v3_segments {
struct v3_segment gdtr;
struct v3_segment idtr;
struct v3_segment tr;
-};
+} ;
struct shadow_page_state;
-struct shadow_map;
-struct vmm_io_map;
struct emulation_state;
struct v3_intr_state;
-
+struct v3_profiler;
uint_t cpl;
addr_t mem_size; // Probably in bytes for now....
- struct shadow_map mem_map;
+ v3_shdw_map_t mem_map;
+
struct vm_time time_state;
// This structure is how we get interrupts for the guest
struct v3_intr_state intr_state;
- struct vmm_io_map io_map;
+ v3_io_map_t io_map;
struct v3_msr_map msr_map;
// device_map
struct v3_dbg_regs dbg_regs;
struct v3_segments segments;
- struct emulation_state emulator;
-
v3_vm_operating_mode_t run_state;
void * vmm_data;
+ uint_t enable_profiler;
+ struct v3_profiler profiler;
+
+ void * decoder_state;
+
struct v3_msr guest_efer;
/* TEMP */
//offset 0x0cb
uchar_t cpl; // if the guest is real-mode then the CPL is forced to 0
- // if the guest is virtual-mode then the CPL is forced to 3
+ // if the guest is virtual-mode then the CPL is forced to 3
uint_t rsvd2;
/* utility definitions */
-#ifdef VMM_DEBUG
+
#define PrintDebug(fmt, args...) \
do { \
extern struct v3_os_hooks * os_hooks; \
(os_hooks)->print_debug((fmt), ##args); \
} \
} while (0)
+
+#if 1
#else
#define PrintDebug(fmt,args ...)
#endif
// so we can specify maximum physical address size
// (We're screwed if we want to do 32 bit host/64 bit guest)
+
+ int enable_profiling;
+
int use_ramdisk;
void * ramdisk;
int ramdisk_size;
-
/*
* This file is part of the Palacios Virtual Machine Monitor developed
* by the V3VEE Project with funding from the United States National
#include <palacios/vmm.h>
+typedef enum { V3_INVALID_OP,
+ V3_OP_MOVCR2, V3_OP_MOV2CR, V3_OP_SMSW, V3_OP_LMSW, V3_OP_CLTS,
+ V3_OP_INVLPG,
+ V3_OP_ADC, V3_OP_ADD, V3_OP_AND, V3_OP_OR, V3_OP_XOR, V3_OP_SUB,
+ V3_OP_INC, V3_OP_DEC, V3_OP_NEG, V3_OP_MOV, V3_OP_NOT, V3_OP_XCHG,
+ V3_OP_SETB, V3_OP_SETBE, V3_OP_SETL, V3_OP_SETLE, V3_OP_SETNB,
+ V3_OP_SETNBE, V3_OP_SETNL, V3_OP_SETNLE, V3_OP_SETNO, V3_OP_SETNP,
+ V3_OP_SETNS, V3_OP_SETNZ, V3_OP_SETO, V3_OP_SETP, V3_OP_SETS,
+ V3_OP_SETZ, V3_OP_MOVS} v3_op_type_t;
+
+
typedef enum {INVALID_OPERAND, REG_OPERAND, MEM_OPERAND, IMM_OPERAND} v3_operand_type_t;
struct x86_operand {
uint_t fs_override : 1; // 0x64
uint_t gs_override : 1; // 0x65
uint_t br_not_taken : 1; // 0x2E
- uint_t br_takend : 1; // 0x3E
+ uint_t br_taken : 1; // 0x3E
uint_t op_size : 1; // 0x66
uint_t addr_size : 1; // 0x67
};
struct x86_instr {
struct x86_prefixes prefixes;
uint_t instr_length;
- addr_t opcode; // a pointer to the V3_OPCODE_[*] arrays defined below
+ v3_op_type_t op_type;
uint_t num_operands;
struct x86_operand dst_operand;
struct x86_operand src_operand;
struct x86_operand third_operand;
+ addr_t str_op_length;
+ addr_t is_str_op;
void * decoder_data;
};
/* Removes a rep prefix in place */
void v3_strip_rep_prefix(uchar_t * instr, int length);
-
+void v3_get_prefixes(uchar_t * instr, struct x86_prefixes * prefixes);
/*
base_addr = gprs->rax;
break;
case 1:
- base_addr = gprs->rcx;
+ base_addr = gprs->rcx;
break;
case 2:
base_addr = gprs->rdx;
#ifdef __V3VEE__
-#include <palacios/vmm_list.h>
#include <palacios/vmm_shadow_paging.h>
#include <palacios/vmm_paging.h>
-
-struct emulated_page {
- addr_t page_addr;
- addr_t va;
- pte32_t pte;
- struct list_head page_list;
-};
-
-struct saved_page {
- addr_t va;
- pte32_t pte;
- struct list_head page_list;
-};
-
-
-struct write_region {
- void * write_data;
-
- uint_t length;
- int (*write)(addr_t write_addr, void * src, uint_t length, void * priv_data);
- addr_t write_addr;
- void * private_data;
-
- struct list_head write_list;
-};
-
-
-struct emulation_state {
- uint_t num_emulated_pages;
- struct list_head emulated_pages;
-
- uint_t num_saved_pages;
- struct list_head saved_pages;
-
- uint_t num_write_regions;
- struct list_head write_regions;
-
- uint_t running : 1;
- uint_t instr_length;
-
- uint_t tf_enabled : 1;
-};
-
-
-int v3_init_emulator(struct guest_info * info);
-
-
-int v3_emulation_exit_handler(struct guest_info * info);
-
-int v3_emulate_memory_write(struct guest_info * info, addr_t fault_gva,
- int (*write)(addr_t write_addr, void * src, uint_t length, void * priv_data),
- addr_t write_addr, void * private_data);
-int v3_emulate_memory_read(struct guest_info * info, addr_t fault_gva,
- int (*read)(addr_t read_addr, void * dst, uint_t length, void * priv_data),
- addr_t read_addr, void * private_data);
+int v3_emulate_write_op(struct guest_info * info, addr_t fault_gva, addr_t write_gpa, addr_t * dst_addr);
#endif // !__V3VEE__
#define DEFINE_HASHTABLE_INSERT(fnname, keytype, valuetype) \
- int fnname (struct hashtable * htable, keytype key, valuetype value) { \
- return hashtable_insert(htable, (addr_t)key, (addr_t)value); \
+ static int fnname (struct hashtable * htable, keytype key, valuetype value) { \
+ return hashtable_insert(htable, (addr_t)key, (addr_t)value); \
}
#define DEFINE_HASHTABLE_SEARCH(fnname, keytype, valuetype) \
- valuetype * fnname (struct hashtable * htable, keytype key) { \
- return (valuetype *) (hashtable_search(htable, (addr_t)key)); \
+ static valuetype * fnname (struct hashtable * htable, keytype key) { \
+ return (valuetype *) (hashtable_search(htable, (addr_t)key)); \
}
#define DEFINE_HASHTABLE_REMOVE(fnname, keytype, valuetype, free_key) \
- valuetype * fnname (struct hashtable * htable, keytype key) { \
- return (valuetype *) (hashtable_remove(htable, (addr_t)key, free_key)); \
+ static valuetype * fnname (struct hashtable * htable, keytype key) { \
+ return (valuetype *) (hashtable_remove(htable, (addr_t)key, free_key)); \
}
uint_t hashtable_count(struct hashtable * htable);
+// Specialty functions for a counting hashtable
+int hashtable_inc(struct hashtable * htable, addr_t key, addr_t value);
+int hashtable_dec(struct hashtable * htable, addr_t key, addr_t value);
+
+
/* ************ */
/* ITERATOR API */
/* ************ */
--- /dev/null
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
+ * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Jack Lange <jarusl@cs.northwestern.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#include <palacios/vmm_types.h>
+
+
+
+#define MAKE_1OP_8FLAGS_INST(iname) static inline void iname##8(addr_t * dst, addr_t * flags) { \
+ uchar_t tmp_dst = *dst; \
+ \
+ /* Some of the flags values are not copied out in a pushf, we save them here */ \
+ addr_t flags_rsvd = *flags & ~0xfffe7fff; \
+ \
+ asm volatile ( \
+ "pushf; " \
+ "push %2; " \
+ "popf; " \
+ #iname"b %0; " \
+ "pushf; " \
+ "pop %1; " \
+ "popf; " \
+ : "=q"(tmp_dst),"=q"(*flags) \
+ : "q"(*flags), "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ *flags |= flags_rsvd; \
+ \
+ }
+
+#define MAKE_1OP_16FLAGS_INST(iname) static inline void iname##16(addr_t * dst, addr_t * flags) { \
+ ushort_t tmp_dst = *dst; \
+ \
+ /* Some of the flags values are not copied out in a pushf, we save them here */ \
+ addr_t flags_rsvd = *flags & ~0xfffe7fff; \
+ \
+ asm volatile ( \
+ "pushf; " \
+ "push %2; " \
+ "popf; " \
+ #iname"w %0; " \
+ "pushf; " \
+ "pop %1; " \
+ "popf; " \
+ : "=q"(tmp_dst),"=q"(*flags) \
+ : "q"(*flags), "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ *flags |= flags_rsvd; \
+ \
+ }
+
+#define MAKE_1OP_32FLAGS_INST(iname) static inline void iname##32(addr_t * dst, addr_t * flags) { \
+ uint_t tmp_dst = *dst; \
+ \
+ /* Some of the flags values are not copied out in a pushf, we save them here */ \
+ addr_t flags_rsvd = *flags & ~0xfffe7fff; \
+ \
+ asm volatile ( \
+ "pushf; " \
+ "push %2; " \
+ "popf; " \
+ #iname"l %0; " \
+ "pushf; " \
+ "pop %1; " \
+ "popf; " \
+ : "=q"(tmp_dst),"=q"(*flags) \
+ : "q"(*flags), "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ *flags |= flags_rsvd; \
+ \
+ }
+
+#define MAKE_1OP_64FLAGS_INST(iname) static inline void iname##64(addr_t * dst, addr_t * flags) { \
+ ullong_t tmp_dst = *dst; \
+ \
+ /* Some of the flags values are not copied out in a pushf, we save them here */ \
+ addr_t flags_rsvd = *flags & ~0xfffe7fff; \
+ \
+ asm volatile ( \
+ "pushfq; " \
+ "push %2; " \
+ "popfq; " \
+ #iname"q %0; " \
+ "pushfq; " \
+ "pop %1; " \
+ "popfq; " \
+ : "=q"(tmp_dst),"=q"(*flags) \
+ : "q"(*flags), "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ *flags |= flags_rsvd; \
+ \
+ }
+
+
+
+#define MAKE_1OP_8_INST(iname) static inline void iname##8(addr_t * dst) { \
+ uchar_t tmp_dst = *dst; \
+ \
+ asm volatile ( \
+ #iname"b %0; " \
+ : "=q"(tmp_dst) \
+ : "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ }
+
+#define MAKE_1OP_16_INST(iname) static inline void iname##16(addr_t * dst) { \
+ ushort_t tmp_dst = *dst; \
+ \
+ asm volatile ( \
+ #iname"w %0; " \
+ : "=q"(tmp_dst) \
+ : "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ }
+
+#define MAKE_1OP_32_INST(iname) static inline void iname##32(addr_t * dst) { \
+ uint_t tmp_dst = *dst; \
+ \
+ asm volatile ( \
+ #iname"l %0; " \
+ : "=q"(tmp_dst) \
+ : "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ }
+
+#define MAKE_1OP_64_INST(iname) static inline void iname##64(addr_t * dst) { \
+ ullong_t tmp_dst = *dst; \
+ \
+ asm volatile ( \
+ #iname"q %0; " \
+ : "=q"(tmp_dst) \
+ : "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ }
+
+
+#define MAKE_2OP_64FLAGS_INST(iname) static inline void iname##64(addr_t * dst, addr_t * src, addr_t * flags) { \
+ uint64_t tmp_dst = *dst, tmp_src = *src; \
+ addr_t tmp_flags = *flags; \
+ \
+ /* Some of the flags values are not copied out in a pushf, we save them here */ \
+ addr_t flags_rsvd = *flags & ~0xfffe7fff; \
+ \
+ asm volatile ( \
+ "pushfq\r\n" \
+ "push %3\r\n" \
+ "popfq\r\n" \
+ #iname"q %2, %0\r\n" \
+ "pushfq\r\n" \
+ "pop %1\r\n" \
+ "popfq\r\n" \
+ : "=q"(tmp_dst),"=q"(tmp_flags) \
+ : "q"(tmp_src),"q"(tmp_flags), "0"(tmp_dst) \
+ ); \
+ \
+ *dst = tmp_dst; \
+ *flags = tmp_flags; \
+ *flags |= flags_rsvd; \
+ \
+ }
+
+
+
+
+#define MAKE_2OP_32FLAGS_INST(iname) static inline void iname##32(addr_t * dst, addr_t * src, addr_t * flags) { \
+ uint32_t tmp_dst = *dst, tmp_src = *src; \
+ \
+ /* Some of the flags values are not copied out in a pushf, we save them here */ \
+ addr_t flags_rsvd = *flags & ~0xfffe7fff; \
+ \
+ asm volatile ( \
+ "pushf; " \
+ "push %3; " \
+ "popf; " \
+ #iname"l %2, %0; " \
+ "pushf; " \
+ "pop %1; " \
+ "popf; " \
+ : "=q"(tmp_dst),"=q"(*flags) \
+ : "q"(tmp_src),"q"(*flags), "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ *flags |= flags_rsvd; \
+ \
+ }
+
+
+#define MAKE_2OP_16FLAGS_INST(iname) static inline void iname##16(addr_t * dst, addr_t * src, addr_t * flags) { \
+ ushort_t tmp_dst = *dst, tmp_src = *src; \
+ \
+ /* Some of the flags values are not copied out in a pushf, we save them here */ \
+ addr_t flags_rsvd = *flags & ~0xfffe7fff; \
+ \
+ asm volatile ( \
+ "pushf; " \
+ "push %3; " \
+ "popf; " \
+ #iname"w %2, %0; " \
+ "pushf; " \
+ "pop %1; " \
+ "popf; " \
+ : "=q"(tmp_dst),"=q"(*flags) \
+ : "q"(tmp_src),"q"(*flags), "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ *flags |= flags_rsvd; \
+ \
+ }
+
+#define MAKE_2OP_8FLAGS_INST(iname) static inline void iname##8(addr_t * dst, addr_t * src, addr_t * flags) { \
+ uchar_t tmp_dst = *dst, tmp_src = *src; \
+ \
+ /* Some of the flags values are not copied out in a pushf, we save them here */ \
+ addr_t flags_rsvd = *flags & ~0xfffe7fff; \
+ \
+ asm volatile ( \
+ "pushf; " \
+ "push %3; " \
+ "popf; " \
+ #iname"b %2, %0; " \
+ "pushf; " \
+ "pop %1; " \
+ "popf; " \
+ : "=q"(tmp_dst),"=q"(*flags) \
+ : "q"(tmp_src),"q"(*flags), "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ *flags |= flags_rsvd; \
+ \
+ }
+
+
+
+
+#define MAKE_2OP_32STR_INST(iname) static inline void iname##32(addr_t * dst, \
+ addr_t * src, \
+ addr_t * ecx, addr_t * flags) { \
+ /* Some of the flags values are not copied out in a pushf, we save them here */ \
+ addr_t flags_rsvd = *flags & ~0xfffe7fff; \
+ \
+ asm volatile ( \
+ "pushf; " \
+ "push %4; " \
+ "popf; " \
+ "rep; " \
+ #iname"l; " \
+ "pushf; " \
+ "pop %0; " \
+ "popf; " \
+ : "=q"(*flags) \
+ : "D"(*dst),"S"(*src),"c"(*ecx),"q"(*flags) \
+ ); \
+ \
+ /* : "=D"(*dst),"=S"(*src),"=c"(*ecx),"=q"(*flags)*/ \
+ *flags |= flags_rsvd; \
+ }
+
+#define MAKE_2OP_16STR_INST(iname) static inline void iname##16(addr_t * dst, \
+ addr_t * src, \
+ addr_t * ecx, addr_t * flags) { \
+ /* Some of the flags values are not copied out in a pushf, we save them here */ \
+ addr_t flags_rsvd = *flags & ~0xfffe7fff; \
+ \
+ asm volatile ( \
+ "pushf; " \
+ "push %4; " \
+ "popf; " \
+ "rep; " \
+ #iname"w; " \
+ "pushf; " \
+ "pop %0; " \
+ "popf; " \
+ : "=q"(*flags) \
+ : "D"(*dst),"S"(*src),"c"(*ecx),"q"(*flags) \
+ ); \
+ *flags |= flags_rsvd; \
+ }
+
+
+
+#define MAKE_2OP_8STR_INST(iname) static inline void iname##8(addr_t * dst, \
+ addr_t * src, \
+ addr_t * ecx, addr_t * flags) { \
+ /* Some of the flags values are not copied out in a pushf, we save them here */ \
+ addr_t flags_rsvd = *flags & ~0xfffe7fff; \
+ \
+ asm volatile ( \
+ "pushf; " \
+ "push %4; " \
+ "popf; " \
+ "rep; " \
+ #iname"b; " \
+ "pushf; " \
+ "pop %0; " \
+ "popf; " \
+ : "=q"(*flags) \
+ : "D"(*dst),"S"(*src),"c"(*ecx),"q"(*flags) \
+ ); \
+ *flags |= flags_rsvd; \
+ }
+
+
+
+
+#define MAKE_2OP_32_INST(iname) static inline void iname##32(addr_t * dst, addr_t * src) { \
+ uint32_t tmp_dst = *dst, tmp_src = *src; \
+ \
+ asm volatile ( \
+ #iname"l %1, %0; " \
+ : "=q"(tmp_dst) \
+ : "q"(tmp_src), "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ }
+
+#define MAKE_2OP_16_INST(iname) static inline void iname##16(addr_t * dst, addr_t * src) { \
+ ushort_t tmp_dst = *dst, tmp_src = *src; \
+ \
+ asm volatile ( \
+ #iname"w %1, %0; " \
+ : "=q"(tmp_dst) \
+ : "q"(tmp_src), "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ }
+
+#define MAKE_2OP_8_INST(iname) static inline void iname##8(addr_t * dst, addr_t * src) { \
+ uchar_t tmp_dst = *dst, tmp_src = *src; \
+ \
+ asm volatile ( \
+ #iname"b %1, %0; " \
+ : "=q"(tmp_dst) \
+ : "q"(tmp_src), "0"(tmp_dst) \
+ ); \
+ *dst = tmp_dst; \
+ }
+
+
+
+
+
+
+
+MAKE_2OP_8FLAGS_INST(adc);
+MAKE_2OP_8FLAGS_INST(add);
+MAKE_2OP_8FLAGS_INST(and);
+MAKE_2OP_8FLAGS_INST(or);
+MAKE_2OP_8FLAGS_INST(xor);
+MAKE_2OP_8FLAGS_INST(sub);
+
+
+MAKE_1OP_8FLAGS_INST(inc);
+MAKE_1OP_8FLAGS_INST(dec);
+MAKE_1OP_8FLAGS_INST(neg);
+MAKE_1OP_8FLAGS_INST(setb);
+MAKE_1OP_8FLAGS_INST(setbe);
+MAKE_1OP_8FLAGS_INST(setl);
+MAKE_1OP_8FLAGS_INST(setle);
+MAKE_1OP_8FLAGS_INST(setnb);
+MAKE_1OP_8FLAGS_INST(setnbe);
+MAKE_1OP_8FLAGS_INST(setnl);
+MAKE_1OP_8FLAGS_INST(setnle);
+MAKE_1OP_8FLAGS_INST(setno);
+MAKE_1OP_8FLAGS_INST(setnp);
+MAKE_1OP_8FLAGS_INST(setns);
+MAKE_1OP_8FLAGS_INST(setnz);
+MAKE_1OP_8FLAGS_INST(seto);
+MAKE_1OP_8FLAGS_INST(setp);
+MAKE_1OP_8FLAGS_INST(sets);
+MAKE_1OP_8FLAGS_INST(setz);
+
+
+MAKE_1OP_8_INST(not);
+
+MAKE_2OP_8_INST(mov);
+MAKE_2OP_8_INST(xchg);
+
+
+
+MAKE_2OP_16FLAGS_INST(adc);
+MAKE_2OP_16FLAGS_INST(add);
+MAKE_2OP_16FLAGS_INST(and);
+MAKE_2OP_16FLAGS_INST(or);
+MAKE_2OP_16FLAGS_INST(xor);
+MAKE_2OP_16FLAGS_INST(sub);
+
+
+MAKE_1OP_16FLAGS_INST(inc);
+MAKE_1OP_16FLAGS_INST(dec);
+MAKE_1OP_16FLAGS_INST(neg);
+
+MAKE_1OP_16_INST(not);
+
+MAKE_2OP_16_INST(mov);
+MAKE_2OP_16_INST(xchg);
+
+
+
+
+
+MAKE_2OP_32FLAGS_INST(adc);
+MAKE_2OP_32FLAGS_INST(add);
+MAKE_2OP_32FLAGS_INST(and);
+MAKE_2OP_32FLAGS_INST(or);
+MAKE_2OP_32FLAGS_INST(xor);
+MAKE_2OP_32FLAGS_INST(sub);
+
+
+MAKE_1OP_32FLAGS_INST(inc);
+MAKE_1OP_32FLAGS_INST(dec);
+MAKE_1OP_32FLAGS_INST(neg);
+
+MAKE_1OP_32_INST(not);
+
+MAKE_2OP_32_INST(mov);
+MAKE_2OP_32_INST(xchg);
+
+MAKE_2OP_8STR_INST(movs);
+MAKE_2OP_16STR_INST(movs);
+MAKE_2OP_32STR_INST(movs);
#include <palacios/vmm_types.h>
#include <palacios/vmm_util.h>
+#include <palacios/vmm_rbtree.h>
+
+typedef struct rb_root v3_io_map_t;
struct guest_info;
-int v3_unhook_io_port(struct guest_info * info, uint_t port);
+void v3_init_io_map(struct guest_info * info);
+
+
/* External API */
int (*write)(ushort_t port, void * src, uint_t length, void * priv_data),
void * priv_data);
+int v3_unhook_io_port(struct guest_info * info, uint_t port);
-
-struct vmm_io_hook;
-
-struct vmm_io_map {
- uint_t num_ports;
- struct vmm_io_hook * head;
-
-};
-
-
-void v3_init_vmm_io_map(struct guest_info * info);
-
-// FOREACH_IO_HOOK(vmm_io_map_t * io_map, vmm_io_hook_t * io_hook)
-#define FOREACH_IO_HOOK(io_map, io_hook) for (io_hook = (io_map).head; io_hook != NULL; io_hook = (io_hook)->next)
-
-
-struct vmm_io_hook {
+struct v3_io_hook {
ushort_t port;
// Reads data into the IO port (IN, INS)
int (*write)(ushort_t port, void * src, uint_t length, void * priv_data);
void * priv_data;
-
- struct vmm_io_hook * next;
- struct vmm_io_hook * prev;
+
+ struct rb_node tree_node;
};
-struct vmm_io_hook * v3_get_io_hook(struct vmm_io_map * io_map, uint_t port);
+struct v3_io_hook * v3_get_io_hook(struct guest_info * info, uint_t port);
-void v3_print_io_map(struct vmm_io_map * io_map);
+void v3_print_io_map(struct guest_info * info);
container_of(ptr, type, member)
/**
+ * list_entry - get the struct for the tail entry
+ * @ptr: the list_head head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_tail_entry(head, type, member) ({ \
+ type * tail = NULL; \
+ if ((head)->prev != (head)) { \
+ tail = list_entry((head)->prev, type, member); \
+ } \
+ tail; \
+})
+
+/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
#include <palacios/vmm_types.h>
#include <palacios/vmm_paging.h>
+#include <palacios/vmm_rbtree.h>
-struct guest_info;
-
-
-/*
- Guest Shadow Host
- Virtual Physical Virtual Physical Virtual Physical
- OK OK
- OK NOK
- NOK OK
- NOK NOK
+struct guest_info;
-*/
-// These are the types of physical memory address regions
-// from the perspective of the guest
-typedef enum guest_region_type {
- GUEST_REGION_NOTHING,
- GUEST_REGION_PHYSICAL_MEMORY,
- GUEST_REGION_MEMORY_MAPPED_DEVICE} guest_region_type_t;
// These are the types of physical memory address regions
// from the perspective of the HOST
-typedef enum host_region_type {
- HOST_REGION_INVALID, // This region is INVALID (this is a return type, to denote errors)
- HOST_REGION_HOOK, // This region is mapped as not present (always generate page faults)
- HOST_REGION_PHYSICAL_MEMORY, // Region is a section of host memory
- HOST_REGION_MEMORY_MAPPED_DEVICE, // Region is allocated for DMA
- HOST_REGION_UNALLOCATED, // Region is mapped on demand
- HOST_REGION_REMOTE, // Region is located on a remote machine
- HOST_REGION_SWAPPED, // Region is swapped
-} host_region_type_t;
+typedef enum shdw_region_type {
+ SHDW_REGION_INVALID, // This region is INVALID (this is a return type to denote errors)
+ SHDW_REGION_WRITE_HOOK, // This region is mapped as read-only (page faults on write)
+ SHDW_REGION_FULL_HOOK, // This region is mapped as not present (always generate page faults)
+ SHDW_REGION_ALLOCATED, // Region is a section of host memory
+} v3_shdw_region_type_t;
+typedef struct rb_root v3_shdw_map_t;
-#define shadow_mem_type_t host_region_type_t
-struct shadow_region {
- guest_region_type_t guest_type;
+struct v3_shadow_region {
addr_t guest_start;
addr_t guest_end;
- host_region_type_t host_type;
- addr_t host_addr; // This either points to a host address mapping,
- // or a structure holding the map info
-
- struct shadow_region *next, *prev;
-};
+ v3_shdw_region_type_t host_type;
+
+ addr_t host_addr; // This either points to a host address mapping
+ // Called when data is read from a memory page
+ int (*read_hook)(addr_t guest_addr, void * dst, uint_t length, void * priv_data);
+ // Called when data is written to a memory page
+ int (*write_hook)(addr_t guest_addr, void * src, uint_t length, void * priv_data);
-struct shadow_map {
- uint_t num_regions;
+ void * priv_data;
- struct shadow_region * head;
+ struct rb_node tree_node;
};
-void init_shadow_region(struct shadow_region * entry,
- addr_t guest_addr_start,
- addr_t guest_addr_end,
- guest_region_type_t guest_region_type,
- host_region_type_t host_region_type);
-/*
-void init_shadow_region_physical(struct shadow_region * entry,
- addr_t guest_addr_start,
- addr_t guest_addr_end,
- guest_region_type_t guest_region_type,
- addr_t host_addr_start,
- host_region_type_t host_region_type);
-*/
+void v3_init_shadow_map(struct guest_info * info);
+void v3_delete_shadow_map(struct guest_info * info);
-int add_shadow_region_passthrough(struct guest_info * guest_info,
- addr_t guest_addr_start,
- addr_t guest_addr_end,
- addr_t host_addr);
-void init_shadow_map(struct guest_info * info);
-void free_shadow_map(struct shadow_map * map);
+int v3_add_shadow_mem(struct guest_info * guest_info,
+ addr_t guest_addr_start,
+ addr_t guest_addr_end,
+ addr_t host_addr);
-struct shadow_region * get_shadow_region_by_addr(struct shadow_map * map, addr_t guest_addr);
+int v3_hook_full_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
+ int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data),
+ int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
+ void * priv_data);
-struct shadow_region * get_shadow_region_by_index(struct shadow_map * map, uint_t index);
+int v3_hook_write_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
+ addr_t host_addr,
+ int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
+ void * priv_data);
-host_region_type_t lookup_shadow_map_addr(struct shadow_map * map, addr_t guest_addr, addr_t * host_addr);
-host_region_type_t get_shadow_addr_type(struct guest_info * info, addr_t guest_addr);
-addr_t get_shadow_addr(struct guest_info * info, addr_t guest_addr);
-// Semantics:
-// Adding a region that overlaps with an existing region results is undefined
-// and will probably fail
-int add_shadow_region(struct shadow_map * map, struct shadow_region * entry);
-// Semantics:
-// Deletions result in splitting
-int delete_shadow_region(struct shadow_map * map,
- addr_t guest_start,
- addr_t guest_end);
-void print_shadow_map(struct shadow_map * map);
+void v3_delete_shadow_region(struct guest_info * info, struct v3_shadow_region * reg);
+struct v3_shadow_region * v3_get_shadow_region(struct guest_info * info, addr_t guest_addr);
+addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, addr_t guest_addr);
-struct vmm_mem_hook {
- // Called when data is read from a memory page
- int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data);
-
- // Called when data is written to a memory page
- int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data);
- void * priv_data;
- struct shadow_region * region;
-};
-struct vmm_mem_hook * get_mem_hook(struct guest_info * info, addr_t guest_addr);
+void print_shadow_map(struct guest_info * info);
+
+
+
-int hook_guest_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
- int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data),
- int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
- void * priv_data);
-int unhook_guest_mem(struct guest_info * info, addr_t guest_addr);
+const uchar_t * v3_shdw_region_type_to_str(v3_shdw_region_type_t type);
int handle_special_page_fault(struct guest_info * info, addr_t fault_addr, addr_t gp_addr, pf_error_t access_info);
+int v3_handle_mem_wr_hook(struct guest_info * info, addr_t guest_va, addr_t guest_pa,
+ struct v3_shadow_region * reg, pf_error_t access_info);
+int v3_handle_mem_full_hook(struct guest_info * info, addr_t guest_va, addr_t guest_pa,
+ struct v3_shadow_region * reg, pf_error_t access_info);
#endif // ! __V3VEE__
/* Gets the base address needed for a Page Table entry */
-/* Deprecate these :*/
-/*
- #define PD32_BASE_ADDR(x) (((uint_t)x) >> 12)
- #define PT32_BASE_ADDR(x) (((uint_t)x) >> 12)
- #define PD32_4MB_BASE_ADDR(x) (((uint_t)x) >> 22)
-
- #define PML4E64_BASE_ADDR(x) (((ullong_t)x) >> 12)
- #define PDPE64_BASE_ADDR(x) (((ullong_t)x) >> 12)
- #define PDE64_BASE_ADDR(x) (((ullong_t)x) >> 12)
- #define PTE64_BASE_ADDR(x) (((ullong_t)x) >> 12)
-
- // Accessor functions for the page table structures
- #define PDE32_T_ADDR(x) (((x).pt_base_addr) << 12)
- #define PTE32_T_ADDR(x) (((x).page_base_addr) << 12)
- #define PDE32_4MB_T_ADDR(x) (((x).page_base_addr) << 22)
-*/
-/* Replace The above with these... */
+
#define PAGE_BASE_ADDR(x) ((x) >> 12)
#define PAGE_BASE_ADDR_4KB(x) ((x) >> 12)
#define PAGE_BASE_ADDR_2MB(x) ((x) >> 21)
#define BASE_TO_PAGE_ADDR_1GB(x) (((addr_t)x) << 30)
/* *** */
-/* Deprecated */
-/*
- #define PT32_PAGE_OFFSET(x) (((uint_t)x) & 0xfff)
- #define PD32_4MB_PAGE_OFFSET(x) (((uint_t)x) & 0x003fffff)
-
- #define PT32_PAGE_ADDR(x) (((uint_t)x) & 0xfffff000)
- #define PD32_4MB_PAGE_ADDR(x) (((uint_t)x) & 0xffc00000)
-
- #define PT32_PAGE_POWER 12
- #define PAGE_ALIGNED_ADDR(x) (((uint_t) (x)) >> 12)
- //#define PAGE_ADDR(x) (PAGE_ALIGNED_ADDR(x) << 12)
- #define PAGE_POWER 12
- #define PAGE_SIZE 4096
-*/
-/* use these instead */
+
#define PAGE_OFFSET(x) ((x) & 0xfff)
#define PAGE_OFFSET_4KB(x) ((x) & 0xfff)
#define PAGE_OFFSET_2MB(x) ((x) & 0x1fffff)
--- /dev/null
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
+ * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Jack Lange <jarusl@cs.northwestern.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#ifndef __VMM_PROFILER_H__
+#define __VMM_PROFILER_H__
+
+#ifdef __V3VEE__
+
+#include <palacios/vmm_rbtree.h>
+
+struct guest_info;
+
+
+struct v3_profiler {
+ uint_t total_exits;
+
+ ullong_t start_time;
+ ullong_t end_time;
+
+ uint_t guest_pf_cnt;
+
+ struct rb_root root;
+};
+
+
+void v3_init_profiler(struct guest_info * info);
+
+void v3_profile_exit(struct guest_info * info, uint_t exit_code);
+
+void v3_print_profile(struct guest_info * info);
+
+
+#endif
+
+#endif
--- /dev/null
+/*
+ Red Black Trees
+ (C) 1999 Andrea Arcangeli <andrea@suse.de>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ linux/include/linux/rbtree.h
+
+ To use rbtrees you'll have to implement your own insert and search cores.
+ This will avoid us to use callbacks and to drop drammatically performances.
+ I know it's not the cleaner way, but in C (not in C++) to get
+ performances and genericity...
+
+ Some example of insert and search follows here. The search is a plain
+ normal search over an ordered tree. The insert instead must be implemented
+ int two steps: as first thing the code must insert the element in
+ order as a red leaf in the tree, then the support library function
+ rb_insert_color() must be called. Such function will do the
+ not trivial work to rebalance the rbtree if necessary.
+
+-----------------------------------------------------------------------
+static inline struct page * rb_search_page_cache(struct inode * inode,
+ unsigned long offset)
+{
+ struct rb_node * n = inode->i_rb_page_cache.rb_node;
+ struct page * page;
+
+ while (n)
+ {
+ page = rb_entry(n, struct page, rb_page_cache);
+
+ if (offset < page->offset)
+ n = n->rb_left;
+ else if (offset > page->offset)
+ n = n->rb_right;
+ else
+ return page;
+ }
+ return NULL;
+}
+
+static inline struct page * __rb_insert_page_cache(struct inode * inode,
+ unsigned long offset,
+ struct rb_node * node)
+{
+ struct rb_node ** p = &inode->i_rb_page_cache.rb_node;
+ struct rb_node * parent = NULL;
+ struct page * page;
+
+ while (*p)
+ {
+ parent = *p;
+ page = rb_entry(parent, struct page, rb_page_cache);
+
+ if (offset < page->offset)
+ p = &(*p)->rb_left;
+ else if (offset > page->offset)
+ p = &(*p)->rb_right;
+ else
+ return page;
+ }
+
+ rb_link_node(node, parent, p);
+
+ return NULL;
+}
+
+static inline struct page * rb_insert_page_cache(struct inode * inode,
+ unsigned long offset,
+ struct rb_node * node)
+{
+ struct page * ret;
+ if ((ret = __rb_insert_page_cache(inode, offset, node)))
+ goto out;
+ rb_insert_color(node, &inode->i_rb_page_cache);
+ out:
+ return ret;
+}
+-----------------------------------------------------------------------
+*/
+
+#ifndef _VMM_RBTREE_H
+#define _VMM_RBTREE_H
+
+#ifdef __V3VEE__
+
+#include <palacios/vmm_types.h>
+
+
+#undef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+
+
+struct rb_node
+{
+ unsigned long rb_parent_color;
+#define RB_RED 0
+#define RB_BLACK 1
+ struct rb_node *rb_right;
+ struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+ /* The alignment might seem pointless, but allegedly CRIS needs it */
+
+struct rb_root
+{
+ struct rb_node *rb_node;
+};
+
+
+#define rb_parent(r) ((struct rb_node *)((r)->rb_parent_color & ~3))
+#define rb_color(r) ((r)->rb_parent_color & 1)
+#define rb_is_red(r) (!rb_color(r))
+#define rb_is_black(r) rb_color(r)
+#define rb_set_red(r) do { (r)->rb_parent_color &= ~1; } while (0)
+#define rb_set_black(r) do { (r)->rb_parent_color |= 1; } while (0)
+
+static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
+{
+ rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p;
+}
+static inline void rb_set_color(struct rb_node *rb, int color)
+{
+ rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
+}
+
+#define RB_ROOT (struct rb_root) { NULL, }
+#define rb_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
+#define RB_EMPTY_NODE(node) (rb_parent(node) != node)
+#define RB_CLEAR_NODE(node) (rb_set_parent(node, node))
+
+extern void v3_rb_insert_color(struct rb_node *, struct rb_root *);
+extern void v3_rb_erase(struct rb_node *, struct rb_root *);
+
+/* Find logical next and previous nodes in a tree */
+extern struct rb_node *v3_rb_next(struct rb_node *);
+extern struct rb_node *v3_rb_prev(struct rb_node *);
+extern struct rb_node *v3_rb_first(struct rb_root *);
+extern struct rb_node *v3_rb_last(struct rb_root *);
+
+/* Fast replacement of a single node without remove/rebalance/add/rebalance */
+extern void v3_rb_replace_node(struct rb_node *victim, struct rb_node *new,
+ struct rb_root *root);
+
+static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
+ struct rb_node ** rb_link)
+{
+ node->rb_parent_color = (unsigned long )parent;
+ node->rb_left = node->rb_right = NULL;
+
+ *rb_link = node;
+}
+
+
+#endif
+
+#endif /* _LINUX_RBTREE_H */
v3_reg_t guest_cr0;
-
-
- // Page table struct lookup table
- struct hashtable * pt_cache;
-
- // Guest CR3 to Shadow CR3 Lookup Table
- struct hashtable * cr3_cache;
-
/* SOON TO BE DEPRECATED */
// Hash table that contains a mapping of guest pte addresses to host pte addresses
struct hashtable * cached_ptes;
int v3_activate_shadow_pt(struct guest_info * info);
int v3_activate_passthrough_pt(struct guest_info * info);
-/* TODO: Change to static functions
- * External visibility not needed
- */
-addr_t v3_create_new_shadow_pt();
-int v3_replace_shdw_page32(struct guest_info * info, addr_t location, pte32_t * new_page, pte32_t * old_page);
-/* *** */
-
-
-int v3_replace_shdw_page(struct guest_info * info, addr_t location, void * new_page, void * old_page);
#endif // ! __V3VEE__
#ifdef __V3VEE__
-
#endif // ! __V3VEE__
#endif
int i = 0;
int irq = -1;
- PrintDebug("8259 PIC: getnum: master_irr: 0x%x master_imr: 0x%x\n", i, state->master_irr, state->master_imr);
- PrintDebug("8259 PIC: getnum: slave_irr: 0x%x slave_imr: 0x%x\n", i, state->slave_irr, state->slave_imr);
+ PrintDebug("8259 PIC: getnum: master_irr: 0x%x master_imr: 0x%x\n", state->master_irr, state->master_imr);
+ PrintDebug("8259 PIC: getnum: slave_irr: 0x%x slave_imr: 0x%x\n", state->slave_irr, state->slave_imr);
for (i = 0; i < 16; i++) {
if (i <= 7) {
case 0xff: // reset
PushToOutputQueue(dev, 0xfa, OVERWRITE, COMMAND, KEYBOARD); // ack
state->state = RESET;
- PrintDebug("keyboard: reset complete and acked\n", data);
+ PrintDebug("keyboard: reset complete and acked\n");
break;
case 0xf5: // disable scanning
case 0xf4: // enable scanning
PushToOutputQueue(dev, 0xfa, OVERWRITE, COMMAND, KEYBOARD);
// should do something here... PAD
state->state = NORMAL;
- PrintDebug("keyboard: %s scanning done and acked\n",data==0xf5 ? "disable" : "enable", data);
+ PrintDebug("keyboard: %s scanning done and acked\n",data==0xf5 ? "disable" : "enable");
break;
case 0xfe: // resend
case 0xfd: // set key type make
nvram_state = (struct nvram_internal *)V3_Malloc(sizeof(struct nvram_internal) + 1000);
- PrintDebug("nvram: internal at %x\n", nvram_state);
+ PrintDebug("nvram: internal at %p\n", (void *)nvram_state);
struct vm_device * device = v3_create_device("NVRAM", &dev_ops, nvram_state);
#include <palacios/vmm_lowlevel.h>
#include <palacios/svm_msr.h>
+#include <palacios/vmm_rbtree.h>
+
+#include <palacios/vmm_profiler.h>
extern void v3_stgi();
- if (vm_info->io_map.num_ports > 0) {
- struct vmm_io_hook * iter;
+ if ( !RB_EMPTY_ROOT(&(vm_info->io_map)) ) {
+ struct v3_io_hook * iter;
+ struct rb_node * io_node = v3_rb_first(&(vm_info->io_map));
addr_t io_port_bitmap;
-
+ int i = 0;
+
io_port_bitmap = (addr_t)V3_VAddr(V3_AllocPages(3));
memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
//PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
- FOREACH_IO_HOOK(vm_info->io_map, iter) {
+ do {
+ iter = rb_entry(io_node, struct v3_io_hook, tree_node);
+
ushort_t port = iter->port;
uchar_t * bitmap = (uchar_t *)io_port_bitmap;
+ PrintDebug("%d: Hooking Port %d\n", i, port);
bitmap += (port / 8);
// PrintDebug("Setting Bit for port 0x%x\n", port);
*bitmap |= 1 << (port % 8);
- }
+
+ i++;
+ } while ((io_node = v3_rb_next(io_node)));
//PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
ullong_t tmp_tsc;
uint_t vm_cr_low = 0, vm_cr_high = 0;
-
v3_enable_ints();
v3_clgi();
- //PrintDebug("SVM Entry to rip=%p...\n", (void *)info->rip);
+ /*
+ PrintDebug("SVM Entry to CS=%p rip=%p...\n",
+ (void *)(addr_t)info->segments.cs.base,
+ (void *)(addr_t)info->rip);
+ */
v3_get_msr(0xc0000101, &vm_cr_high, &vm_cr_low);
v3_stgi();
- if (num_exits % 25 == 0) {
+ if ((num_exits % 5000) == 0) {
PrintDebug("SVM Exit number %d\n", num_exits);
+ if (info->enable_profiler)
+ v3_print_profile(info);
}
+
if (v3_handle_svm_exit(info) != 0) {
vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
}
v3_print_GPRs(info);
-
-
-
-
PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code);
PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
break;
}
+
}
return 0;
}
#include <palacios/vmm_intr.h>
#include <palacios/vmm_emulator.h>
#include <palacios/svm_msr.h>
+#include <palacios/vmm_profiler.h>
-static const uchar_t * vmexit_code_to_str(uint_t exit_code);
-
int v3_handle_svm_exit(struct guest_info * info) {
vmcb_ctrl_t * guest_ctrl = 0;
exit_code = guest_ctrl->exit_code;
-
+
+
+
+
// Disable printing io exits due to bochs debug messages
//if (!((exit_code == VMEXIT_IOIO) && ((ushort_t)(guest_ctrl->exit_info1 >> 16) == 0x402))) {
}
- // }
- // PrintDebugVMCB((vmcb_t*)(info->vmm_data));
-
- // PrintDebug("SVM Returned:(VMCB=%x)\n", info->vmm_data);
- //PrintDebug("RIP: %x\n", guest_state->rip);
+ if (info->enable_profiler) {
+ rdtscll(info->profiler.start_time);
+ }
//PrintDebug("SVM Returned: Exit Code: %x\n",exit_code);
}
break;
+
+#if 0
+ // Emulation handlers currently not used
case VMEXIT_EXCP1:
{
#ifdef DEBUG_EMULATOR
}
break;
}
-
+
case VMEXIT_VMMCALL:
{
}
break;
}
-
+#endif
case VMEXIT_WBINVD:
// END OF SWITCH (EXIT_CODE)
+ if (info->enable_profiler) {
+ rdtscll(info->profiler.end_time);
+ v3_profile_exit(info, exit_code);
+ }
+
+
+
// Update the low level state
if (v3_intr_pending(info)) {
}
-static const uchar_t VMEXIT_CR0_READ_STR[] = "VMEXIT_CR0_READ";
-static const uchar_t VMEXIT_CR1_READ_STR[] = "VMEXIT_CR1_READ";
-static const uchar_t VMEXIT_CR2_READ_STR[] = "VMEXIT_CR2_READ";
-static const uchar_t VMEXIT_CR3_READ_STR[] = "VMEXIT_CR3_READ";
-static const uchar_t VMEXIT_CR4_READ_STR[] = "VMEXIT_CR4_READ";
-static const uchar_t VMEXIT_CR5_READ_STR[] = "VMEXIT_CR5_READ";
-static const uchar_t VMEXIT_CR6_READ_STR[] = "VMEXIT_CR6_READ";
-static const uchar_t VMEXIT_CR7_READ_STR[] = "VMEXIT_CR7_READ";
-static const uchar_t VMEXIT_CR8_READ_STR[] = "VMEXIT_CR8_READ";
-static const uchar_t VMEXIT_CR9_READ_STR[] = "VMEXIT_CR9_READ";
-static const uchar_t VMEXIT_CR10_READ_STR[] = "VMEXIT_CR10_READ";
-static const uchar_t VMEXIT_CR11_READ_STR[] = "VMEXIT_CR11_READ";
-static const uchar_t VMEXIT_CR12_READ_STR[] = "VMEXIT_CR12_READ";
-static const uchar_t VMEXIT_CR13_READ_STR[] = "VMEXIT_CR13_READ";
-static const uchar_t VMEXIT_CR14_READ_STR[] = "VMEXIT_CR14_READ";
-static const uchar_t VMEXIT_CR15_READ_STR[] = "VMEXIT_CR15_READ";
-static const uchar_t VMEXIT_CR0_WRITE_STR[] = "VMEXIT_CR0_WRITE";
-static const uchar_t VMEXIT_CR1_WRITE_STR[] = "VMEXIT_CR1_WRITE";
-static const uchar_t VMEXIT_CR2_WRITE_STR[] = "VMEXIT_CR2_WRITE";
-static const uchar_t VMEXIT_CR3_WRITE_STR[] = "VMEXIT_CR3_WRITE";
-static const uchar_t VMEXIT_CR4_WRITE_STR[] = "VMEXIT_CR4_WRITE";
-static const uchar_t VMEXIT_CR5_WRITE_STR[] = "VMEXIT_CR5_WRITE";
-static const uchar_t VMEXIT_CR6_WRITE_STR[] = "VMEXIT_CR6_WRITE";
-static const uchar_t VMEXIT_CR7_WRITE_STR[] = "VMEXIT_CR7_WRITE";
-static const uchar_t VMEXIT_CR8_WRITE_STR[] = "VMEXIT_CR8_WRITE";
-static const uchar_t VMEXIT_CR9_WRITE_STR[] = "VMEXIT_CR9_WRITE";
-static const uchar_t VMEXIT_CR10_WRITE_STR[] = "VMEXIT_CR10_WRITE";
-static const uchar_t VMEXIT_CR11_WRITE_STR[] = "VMEXIT_CR11_WRITE";
-static const uchar_t VMEXIT_CR12_WRITE_STR[] = "VMEXIT_CR12_WRITE";
-static const uchar_t VMEXIT_CR13_WRITE_STR[] = "VMEXIT_CR13_WRITE";
-static const uchar_t VMEXIT_CR14_WRITE_STR[] = "VMEXIT_CR14_WRITE";
-static const uchar_t VMEXIT_CR15_WRITE_STR[] = "VMEXIT_CR15_WRITE";
-static const uchar_t VMEXIT_DR0_READ_STR[] = "VMEXIT_DR0_READ";
-static const uchar_t VMEXIT_DR1_READ_STR[] = "VMEXIT_DR1_READ";
-static const uchar_t VMEXIT_DR2_READ_STR[] = "VMEXIT_DR2_READ";
-static const uchar_t VMEXIT_DR3_READ_STR[] = "VMEXIT_DR3_READ";
-static const uchar_t VMEXIT_DR4_READ_STR[] = "VMEXIT_DR4_READ";
-static const uchar_t VMEXIT_DR5_READ_STR[] = "VMEXIT_DR5_READ";
-static const uchar_t VMEXIT_DR6_READ_STR[] = "VMEXIT_DR6_READ";
-static const uchar_t VMEXIT_DR7_READ_STR[] = "VMEXIT_DR7_READ";
-static const uchar_t VMEXIT_DR8_READ_STR[] = "VMEXIT_DR8_READ";
-static const uchar_t VMEXIT_DR9_READ_STR[] = "VMEXIT_DR9_READ";
-static const uchar_t VMEXIT_DR10_READ_STR[] = "VMEXIT_DR10_READ";
-static const uchar_t VMEXIT_DR11_READ_STR[] = "VMEXIT_DR11_READ";
-static const uchar_t VMEXIT_DR12_READ_STR[] = "VMEXIT_DR12_READ";
-static const uchar_t VMEXIT_DR13_READ_STR[] = "VMEXIT_DR13_READ";
-static const uchar_t VMEXIT_DR14_READ_STR[] = "VMEXIT_DR14_READ";
-static const uchar_t VMEXIT_DR15_READ_STR[] = "VMEXIT_DR15_READ";
-static const uchar_t VMEXIT_DR0_WRITE_STR[] = "VMEXIT_DR0_WRITE";
-static const uchar_t VMEXIT_DR1_WRITE_STR[] = "VMEXIT_DR1_WRITE";
-static const uchar_t VMEXIT_DR2_WRITE_STR[] = "VMEXIT_DR2_WRITE";
-static const uchar_t VMEXIT_DR3_WRITE_STR[] = "VMEXIT_DR3_WRITE";
-static const uchar_t VMEXIT_DR4_WRITE_STR[] = "VMEXIT_DR4_WRITE";
-static const uchar_t VMEXIT_DR5_WRITE_STR[] = "VMEXIT_DR5_WRITE";
-static const uchar_t VMEXIT_DR6_WRITE_STR[] = "VMEXIT_DR6_WRITE";
-static const uchar_t VMEXIT_DR7_WRITE_STR[] = "VMEXIT_DR7_WRITE";
-static const uchar_t VMEXIT_DR8_WRITE_STR[] = "VMEXIT_DR8_WRITE";
-static const uchar_t VMEXIT_DR9_WRITE_STR[] = "VMEXIT_DR9_WRITE";
-static const uchar_t VMEXIT_DR10_WRITE_STR[] = "VMEXIT_DR10_WRITE";
-static const uchar_t VMEXIT_DR11_WRITE_STR[] = "VMEXIT_DR11_WRITE";
-static const uchar_t VMEXIT_DR12_WRITE_STR[] = "VMEXIT_DR12_WRITE";
-static const uchar_t VMEXIT_DR13_WRITE_STR[] = "VMEXIT_DR13_WRITE";
-static const uchar_t VMEXIT_DR14_WRITE_STR[] = "VMEXIT_DR14_WRITE";
-static const uchar_t VMEXIT_DR15_WRITE_STR[] = "VMEXIT_DR15_WRITE";
-static const uchar_t VMEXIT_EXCP0_STR[] = "VMEXIT_EXCP0";
-static const uchar_t VMEXIT_EXCP1_STR[] = "VMEXIT_EXCP1";
-static const uchar_t VMEXIT_EXCP2_STR[] = "VMEXIT_EXCP2";
-static const uchar_t VMEXIT_EXCP3_STR[] = "VMEXIT_EXCP3";
-static const uchar_t VMEXIT_EXCP4_STR[] = "VMEXIT_EXCP4";
-static const uchar_t VMEXIT_EXCP5_STR[] = "VMEXIT_EXCP5";
-static const uchar_t VMEXIT_EXCP6_STR[] = "VMEXIT_EXCP6";
-static const uchar_t VMEXIT_EXCP7_STR[] = "VMEXIT_EXCP7";
-static const uchar_t VMEXIT_EXCP8_STR[] = "VMEXIT_EXCP8";
-static const uchar_t VMEXIT_EXCP9_STR[] = "VMEXIT_EXCP9";
-static const uchar_t VMEXIT_EXCP10_STR[] = "VMEXIT_EXCP10";
-static const uchar_t VMEXIT_EXCP11_STR[] = "VMEXIT_EXCP11";
-static const uchar_t VMEXIT_EXCP12_STR[] = "VMEXIT_EXCP12";
-static const uchar_t VMEXIT_EXCP13_STR[] = "VMEXIT_EXCP13";
-static const uchar_t VMEXIT_EXCP14_STR[] = "VMEXIT_EXCP14";
-static const uchar_t VMEXIT_EXCP15_STR[] = "VMEXIT_EXCP15";
-static const uchar_t VMEXIT_EXCP16_STR[] = "VMEXIT_EXCP16";
-static const uchar_t VMEXIT_EXCP17_STR[] = "VMEXIT_EXCP17";
-static const uchar_t VMEXIT_EXCP18_STR[] = "VMEXIT_EXCP18";
-static const uchar_t VMEXIT_EXCP19_STR[] = "VMEXIT_EXCP19";
-static const uchar_t VMEXIT_EXCP20_STR[] = "VMEXIT_EXCP20";
-static const uchar_t VMEXIT_EXCP21_STR[] = "VMEXIT_EXCP21";
-static const uchar_t VMEXIT_EXCP22_STR[] = "VMEXIT_EXCP22";
-static const uchar_t VMEXIT_EXCP23_STR[] = "VMEXIT_EXCP23";
-static const uchar_t VMEXIT_EXCP24_STR[] = "VMEXIT_EXCP24";
-static const uchar_t VMEXIT_EXCP25_STR[] = "VMEXIT_EXCP25";
-static const uchar_t VMEXIT_EXCP26_STR[] = "VMEXIT_EXCP26";
-static const uchar_t VMEXIT_EXCP27_STR[] = "VMEXIT_EXCP27";
-static const uchar_t VMEXIT_EXCP28_STR[] = "VMEXIT_EXCP28";
-static const uchar_t VMEXIT_EXCP29_STR[] = "VMEXIT_EXCP29";
-static const uchar_t VMEXIT_EXCP30_STR[] = "VMEXIT_EXCP30";
-static const uchar_t VMEXIT_EXCP31_STR[] = "VMEXIT_EXCP31";
-static const uchar_t VMEXIT_INTR_STR[] = "VMEXIT_INTR";
-static const uchar_t VMEXIT_NMI_STR[] = "VMEXIT_NMI";
-static const uchar_t VMEXIT_SMI_STR[] = "VMEXIT_SMI";
-static const uchar_t VMEXIT_INIT_STR[] = "VMEXIT_INIT";
-static const uchar_t VMEXIT_VINITR_STR[] = "VMEXIT_VINITR";
-static const uchar_t VMEXIT_CR0_SEL_WRITE_STR[] = "VMEXIT_CR0_SEL_WRITE";
-static const uchar_t VMEXIT_IDTR_READ_STR[] = "VMEXIT_IDTR_READ";
-static const uchar_t VMEXIT_GDTR_READ_STR[] = "VMEXIT_GDTR_READ";
-static const uchar_t VMEXIT_LDTR_READ_STR[] = "VMEXIT_LDTR_READ";
-static const uchar_t VMEXIT_TR_READ_STR[] = "VMEXIT_TR_READ";
-static const uchar_t VMEXIT_IDTR_WRITE_STR[] = "VMEXIT_IDTR_WRITE";
-static const uchar_t VMEXIT_GDTR_WRITE_STR[] = "VMEXIT_GDTR_WRITE";
-static const uchar_t VMEXIT_LDTR_WRITE_STR[] = "VMEXIT_LDTR_WRITE";
-static const uchar_t VMEXIT_TR_WRITE_STR[] = "VMEXIT_TR_WRITE";
-static const uchar_t VMEXIT_RDTSC_STR[] = "VMEXIT_RDTSC";
-static const uchar_t VMEXIT_RDPMC_STR[] = "VMEXIT_RDPMC";
-static const uchar_t VMEXIT_PUSHF_STR[] = "VMEXIT_PUSHF";
-static const uchar_t VMEXIT_POPF_STR[] = "VMEXIT_POPF";
-static const uchar_t VMEXIT_CPUID_STR[] = "VMEXIT_CPUID";
-static const uchar_t VMEXIT_RSM_STR[] = "VMEXIT_RSM";
-static const uchar_t VMEXIT_IRET_STR[] = "VMEXIT_IRET";
-static const uchar_t VMEXIT_SWINT_STR[] = "VMEXIT_SWINT";
-static const uchar_t VMEXIT_INVD_STR[] = "VMEXIT_INVD";
-static const uchar_t VMEXIT_PAUSE_STR[] = "VMEXIT_PAUSE";
-static const uchar_t VMEXIT_HLT_STR[] = "VMEXIT_HLT";
-static const uchar_t VMEXIT_INVLPG_STR[] = "VMEXIT_INVLPG";
-static const uchar_t VMEXIT_INVLPGA_STR[] = "VMEXIT_INVLPGA";
-static const uchar_t VMEXIT_IOIO_STR[] = "VMEXIT_IOIO";
-static const uchar_t VMEXIT_MSR_STR[] = "VMEXIT_MSR";
-static const uchar_t VMEXIT_TASK_SWITCH_STR[] = "VMEXIT_TASK_SWITCH";
-static const uchar_t VMEXIT_FERR_FREEZE_STR[] = "VMEXIT_FERR_FREEZE";
-static const uchar_t VMEXIT_SHUTDOWN_STR[] = "VMEXIT_SHUTDOWN";
-static const uchar_t VMEXIT_VMRUN_STR[] = "VMEXIT_VMRUN";
-static const uchar_t VMEXIT_VMMCALL_STR[] = "VMEXIT_VMMCALL";
-static const uchar_t VMEXIT_VMLOAD_STR[] = "VMEXIT_VMLOAD";
-static const uchar_t VMEXIT_VMSAVE_STR[] = "VMEXIT_VMSAVE";
-static const uchar_t VMEXIT_STGI_STR[] = "VMEXIT_STGI";
-static const uchar_t VMEXIT_CLGI_STR[] = "VMEXIT_CLGI";
-static const uchar_t VMEXIT_SKINIT_STR[] = "VMEXIT_SKINIT";
-static const uchar_t VMEXIT_RDTSCP_STR[] = "VMEXIT_RDTSCP";
-static const uchar_t VMEXIT_ICEBP_STR[] = "VMEXIT_ICEBP";
-static const uchar_t VMEXIT_WBINVD_STR[] = "VMEXIT_WBINVD";
-static const uchar_t VMEXIT_MONITOR_STR[] = "VMEXIT_MONITOR";
-static const uchar_t VMEXIT_MWAIT_STR[] = "VMEXIT_MWAIT";
-static const uchar_t VMEXIT_MWAIT_CONDITIONAL_STR[] = "VMEXIT_MWAIT_CONDITIONAL";
-static const uchar_t VMEXIT_NPF_STR[] = "VMEXIT_NPF";
-static const uchar_t VMEXIT_INVALID_VMCB_STR[] = "VMEXIT_INVALID_VMCB";
-
-
-
-const uchar_t * vmexit_code_to_str(uint_t exit_code) {
+static const char VMEXIT_CR0_READ_STR[] = "VMEXIT_CR0_READ";
+static const char VMEXIT_CR1_READ_STR[] = "VMEXIT_CR1_READ";
+static const char VMEXIT_CR2_READ_STR[] = "VMEXIT_CR2_READ";
+static const char VMEXIT_CR3_READ_STR[] = "VMEXIT_CR3_READ";
+static const char VMEXIT_CR4_READ_STR[] = "VMEXIT_CR4_READ";
+static const char VMEXIT_CR5_READ_STR[] = "VMEXIT_CR5_READ";
+static const char VMEXIT_CR6_READ_STR[] = "VMEXIT_CR6_READ";
+static const char VMEXIT_CR7_READ_STR[] = "VMEXIT_CR7_READ";
+static const char VMEXIT_CR8_READ_STR[] = "VMEXIT_CR8_READ";
+static const char VMEXIT_CR9_READ_STR[] = "VMEXIT_CR9_READ";
+static const char VMEXIT_CR10_READ_STR[] = "VMEXIT_CR10_READ";
+static const char VMEXIT_CR11_READ_STR[] = "VMEXIT_CR11_READ";
+static const char VMEXIT_CR12_READ_STR[] = "VMEXIT_CR12_READ";
+static const char VMEXIT_CR13_READ_STR[] = "VMEXIT_CR13_READ";
+static const char VMEXIT_CR14_READ_STR[] = "VMEXIT_CR14_READ";
+static const char VMEXIT_CR15_READ_STR[] = "VMEXIT_CR15_READ";
+static const char VMEXIT_CR0_WRITE_STR[] = "VMEXIT_CR0_WRITE";
+static const char VMEXIT_CR1_WRITE_STR[] = "VMEXIT_CR1_WRITE";
+static const char VMEXIT_CR2_WRITE_STR[] = "VMEXIT_CR2_WRITE";
+static const char VMEXIT_CR3_WRITE_STR[] = "VMEXIT_CR3_WRITE";
+static const char VMEXIT_CR4_WRITE_STR[] = "VMEXIT_CR4_WRITE";
+static const char VMEXIT_CR5_WRITE_STR[] = "VMEXIT_CR5_WRITE";
+static const char VMEXIT_CR6_WRITE_STR[] = "VMEXIT_CR6_WRITE";
+static const char VMEXIT_CR7_WRITE_STR[] = "VMEXIT_CR7_WRITE";
+static const char VMEXIT_CR8_WRITE_STR[] = "VMEXIT_CR8_WRITE";
+static const char VMEXIT_CR9_WRITE_STR[] = "VMEXIT_CR9_WRITE";
+static const char VMEXIT_CR10_WRITE_STR[] = "VMEXIT_CR10_WRITE";
+static const char VMEXIT_CR11_WRITE_STR[] = "VMEXIT_CR11_WRITE";
+static const char VMEXIT_CR12_WRITE_STR[] = "VMEXIT_CR12_WRITE";
+static const char VMEXIT_CR13_WRITE_STR[] = "VMEXIT_CR13_WRITE";
+static const char VMEXIT_CR14_WRITE_STR[] = "VMEXIT_CR14_WRITE";
+static const char VMEXIT_CR15_WRITE_STR[] = "VMEXIT_CR15_WRITE";
+static const char VMEXIT_DR0_READ_STR[] = "VMEXIT_DR0_READ";
+static const char VMEXIT_DR1_READ_STR[] = "VMEXIT_DR1_READ";
+static const char VMEXIT_DR2_READ_STR[] = "VMEXIT_DR2_READ";
+static const char VMEXIT_DR3_READ_STR[] = "VMEXIT_DR3_READ";
+static const char VMEXIT_DR4_READ_STR[] = "VMEXIT_DR4_READ";
+static const char VMEXIT_DR5_READ_STR[] = "VMEXIT_DR5_READ";
+static const char VMEXIT_DR6_READ_STR[] = "VMEXIT_DR6_READ";
+static const char VMEXIT_DR7_READ_STR[] = "VMEXIT_DR7_READ";
+static const char VMEXIT_DR8_READ_STR[] = "VMEXIT_DR8_READ";
+static const char VMEXIT_DR9_READ_STR[] = "VMEXIT_DR9_READ";
+static const char VMEXIT_DR10_READ_STR[] = "VMEXIT_DR10_READ";
+static const char VMEXIT_DR11_READ_STR[] = "VMEXIT_DR11_READ";
+static const char VMEXIT_DR12_READ_STR[] = "VMEXIT_DR12_READ";
+static const char VMEXIT_DR13_READ_STR[] = "VMEXIT_DR13_READ";
+static const char VMEXIT_DR14_READ_STR[] = "VMEXIT_DR14_READ";
+static const char VMEXIT_DR15_READ_STR[] = "VMEXIT_DR15_READ";
+static const char VMEXIT_DR0_WRITE_STR[] = "VMEXIT_DR0_WRITE";
+static const char VMEXIT_DR1_WRITE_STR[] = "VMEXIT_DR1_WRITE";
+static const char VMEXIT_DR2_WRITE_STR[] = "VMEXIT_DR2_WRITE";
+static const char VMEXIT_DR3_WRITE_STR[] = "VMEXIT_DR3_WRITE";
+static const char VMEXIT_DR4_WRITE_STR[] = "VMEXIT_DR4_WRITE";
+static const char VMEXIT_DR5_WRITE_STR[] = "VMEXIT_DR5_WRITE";
+static const char VMEXIT_DR6_WRITE_STR[] = "VMEXIT_DR6_WRITE";
+static const char VMEXIT_DR7_WRITE_STR[] = "VMEXIT_DR7_WRITE";
+static const char VMEXIT_DR8_WRITE_STR[] = "VMEXIT_DR8_WRITE";
+static const char VMEXIT_DR9_WRITE_STR[] = "VMEXIT_DR9_WRITE";
+static const char VMEXIT_DR10_WRITE_STR[] = "VMEXIT_DR10_WRITE";
+static const char VMEXIT_DR11_WRITE_STR[] = "VMEXIT_DR11_WRITE";
+static const char VMEXIT_DR12_WRITE_STR[] = "VMEXIT_DR12_WRITE";
+static const char VMEXIT_DR13_WRITE_STR[] = "VMEXIT_DR13_WRITE";
+static const char VMEXIT_DR14_WRITE_STR[] = "VMEXIT_DR14_WRITE";
+static const char VMEXIT_DR15_WRITE_STR[] = "VMEXIT_DR15_WRITE";
+static const char VMEXIT_EXCP0_STR[] = "VMEXIT_EXCP0";
+static const char VMEXIT_EXCP1_STR[] = "VMEXIT_EXCP1";
+static const char VMEXIT_EXCP2_STR[] = "VMEXIT_EXCP2";
+static const char VMEXIT_EXCP3_STR[] = "VMEXIT_EXCP3";
+static const char VMEXIT_EXCP4_STR[] = "VMEXIT_EXCP4";
+static const char VMEXIT_EXCP5_STR[] = "VMEXIT_EXCP5";
+static const char VMEXIT_EXCP6_STR[] = "VMEXIT_EXCP6";
+static const char VMEXIT_EXCP7_STR[] = "VMEXIT_EXCP7";
+static const char VMEXIT_EXCP8_STR[] = "VMEXIT_EXCP8";
+static const char VMEXIT_EXCP9_STR[] = "VMEXIT_EXCP9";
+static const char VMEXIT_EXCP10_STR[] = "VMEXIT_EXCP10";
+static const char VMEXIT_EXCP11_STR[] = "VMEXIT_EXCP11";
+static const char VMEXIT_EXCP12_STR[] = "VMEXIT_EXCP12";
+static const char VMEXIT_EXCP13_STR[] = "VMEXIT_EXCP13";
+static const char VMEXIT_EXCP14_STR[] = "VMEXIT_EXCP14";
+static const char VMEXIT_EXCP15_STR[] = "VMEXIT_EXCP15";
+static const char VMEXIT_EXCP16_STR[] = "VMEXIT_EXCP16";
+static const char VMEXIT_EXCP17_STR[] = "VMEXIT_EXCP17";
+static const char VMEXIT_EXCP18_STR[] = "VMEXIT_EXCP18";
+static const char VMEXIT_EXCP19_STR[] = "VMEXIT_EXCP19";
+static const char VMEXIT_EXCP20_STR[] = "VMEXIT_EXCP20";
+static const char VMEXIT_EXCP21_STR[] = "VMEXIT_EXCP21";
+static const char VMEXIT_EXCP22_STR[] = "VMEXIT_EXCP22";
+static const char VMEXIT_EXCP23_STR[] = "VMEXIT_EXCP23";
+static const char VMEXIT_EXCP24_STR[] = "VMEXIT_EXCP24";
+static const char VMEXIT_EXCP25_STR[] = "VMEXIT_EXCP25";
+static const char VMEXIT_EXCP26_STR[] = "VMEXIT_EXCP26";
+static const char VMEXIT_EXCP27_STR[] = "VMEXIT_EXCP27";
+static const char VMEXIT_EXCP28_STR[] = "VMEXIT_EXCP28";
+static const char VMEXIT_EXCP29_STR[] = "VMEXIT_EXCP29";
+static const char VMEXIT_EXCP30_STR[] = "VMEXIT_EXCP30";
+static const char VMEXIT_EXCP31_STR[] = "VMEXIT_EXCP31";
+static const char VMEXIT_INTR_STR[] = "VMEXIT_INTR";
+static const char VMEXIT_NMI_STR[] = "VMEXIT_NMI";
+static const char VMEXIT_SMI_STR[] = "VMEXIT_SMI";
+static const char VMEXIT_INIT_STR[] = "VMEXIT_INIT";
+static const char VMEXIT_VINITR_STR[] = "VMEXIT_VINITR";
+static const char VMEXIT_CR0_SEL_WRITE_STR[] = "VMEXIT_CR0_SEL_WRITE";
+static const char VMEXIT_IDTR_READ_STR[] = "VMEXIT_IDTR_READ";
+static const char VMEXIT_GDTR_READ_STR[] = "VMEXIT_GDTR_READ";
+static const char VMEXIT_LDTR_READ_STR[] = "VMEXIT_LDTR_READ";
+static const char VMEXIT_TR_READ_STR[] = "VMEXIT_TR_READ";
+static const char VMEXIT_IDTR_WRITE_STR[] = "VMEXIT_IDTR_WRITE";
+static const char VMEXIT_GDTR_WRITE_STR[] = "VMEXIT_GDTR_WRITE";
+static const char VMEXIT_LDTR_WRITE_STR[] = "VMEXIT_LDTR_WRITE";
+static const char VMEXIT_TR_WRITE_STR[] = "VMEXIT_TR_WRITE";
+static const char VMEXIT_RDTSC_STR[] = "VMEXIT_RDTSC";
+static const char VMEXIT_RDPMC_STR[] = "VMEXIT_RDPMC";
+static const char VMEXIT_PUSHF_STR[] = "VMEXIT_PUSHF";
+static const char VMEXIT_POPF_STR[] = "VMEXIT_POPF";
+static const char VMEXIT_CPUID_STR[] = "VMEXIT_CPUID";
+static const char VMEXIT_RSM_STR[] = "VMEXIT_RSM";
+static const char VMEXIT_IRET_STR[] = "VMEXIT_IRET";
+static const char VMEXIT_SWINT_STR[] = "VMEXIT_SWINT";
+static const char VMEXIT_INVD_STR[] = "VMEXIT_INVD";
+static const char VMEXIT_PAUSE_STR[] = "VMEXIT_PAUSE";
+static const char VMEXIT_HLT_STR[] = "VMEXIT_HLT";
+static const char VMEXIT_INVLPG_STR[] = "VMEXIT_INVLPG";
+static const char VMEXIT_INVLPGA_STR[] = "VMEXIT_INVLPGA";
+static const char VMEXIT_IOIO_STR[] = "VMEXIT_IOIO";
+static const char VMEXIT_MSR_STR[] = "VMEXIT_MSR";
+static const char VMEXIT_TASK_SWITCH_STR[] = "VMEXIT_TASK_SWITCH";
+static const char VMEXIT_FERR_FREEZE_STR[] = "VMEXIT_FERR_FREEZE";
+static const char VMEXIT_SHUTDOWN_STR[] = "VMEXIT_SHUTDOWN";
+static const char VMEXIT_VMRUN_STR[] = "VMEXIT_VMRUN";
+static const char VMEXIT_VMMCALL_STR[] = "VMEXIT_VMMCALL";
+static const char VMEXIT_VMLOAD_STR[] = "VMEXIT_VMLOAD";
+static const char VMEXIT_VMSAVE_STR[] = "VMEXIT_VMSAVE";
+static const char VMEXIT_STGI_STR[] = "VMEXIT_STGI";
+static const char VMEXIT_CLGI_STR[] = "VMEXIT_CLGI";
+static const char VMEXIT_SKINIT_STR[] = "VMEXIT_SKINIT";
+static const char VMEXIT_RDTSCP_STR[] = "VMEXIT_RDTSCP";
+static const char VMEXIT_ICEBP_STR[] = "VMEXIT_ICEBP";
+static const char VMEXIT_WBINVD_STR[] = "VMEXIT_WBINVD";
+static const char VMEXIT_MONITOR_STR[] = "VMEXIT_MONITOR";
+static const char VMEXIT_MWAIT_STR[] = "VMEXIT_MWAIT";
+static const char VMEXIT_MWAIT_CONDITIONAL_STR[] = "VMEXIT_MWAIT_CONDITIONAL";
+static const char VMEXIT_NPF_STR[] = "VMEXIT_NPF";
+static const char VMEXIT_INVALID_VMCB_STR[] = "VMEXIT_INVALID_VMCB";
+
+
+
+const char * vmexit_code_to_str(uint_t exit_code) {
switch(exit_code) {
case VMEXIT_CR0_READ:
return VMEXIT_CR0_READ_STR;
* redistribute, and modify it as specified in the file "V3VEE_LICENSE".
*/
-
-
-
#include <palacios/svm_io.h>
#include <palacios/vmm_io.h>
#include <palacios/vmm_ctrl_regs.h>
// vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
struct svm_io_info * io_info = (struct svm_io_info *)&(ctrl_area->exit_info1);
- struct vmm_io_hook * hook = v3_get_io_hook(&(info->io_map), io_info->port);
+ struct v3_io_hook * hook = v3_get_io_hook(info, io_info->port);
int read_size = 0;
if (hook == NULL) {
struct svm_io_info * io_info = (struct svm_io_info *)&(ctrl_area->exit_info1);
- struct vmm_io_hook * hook = v3_get_io_hook(&(info->io_map), io_info->port);
+ struct v3_io_hook * hook = v3_get_io_hook(info, io_info->port);
int read_size = 0;
addr_t dst_addr = 0;
// vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
struct svm_io_info * io_info = (struct svm_io_info *)&(ctrl_area->exit_info1);
- struct vmm_io_hook * hook = v3_get_io_hook(&(info->io_map), io_info->port);
+ struct v3_io_hook * hook = v3_get_io_hook(info, io_info->port);
int write_size = 0;
if (hook == NULL) {
struct svm_io_info * io_info = (struct svm_io_info *)&(ctrl_area->exit_info1);
- struct vmm_io_hook * hook = v3_get_io_hook(&(info->io_map), io_info->port);
+ struct v3_io_hook * hook = v3_get_io_hook(info, io_info->port);
int write_size = 0;
addr_t dst_addr = 0;
int guest_pa_to_host_pa(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_pa) {
- // we use the shadow map here...
- host_region_type_t reg_type = lookup_shadow_map_addr(&(guest_info->mem_map), guest_pa, host_pa);
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(guest_info, guest_pa);
- if (reg_type != HOST_REGION_PHYSICAL_MEMORY) {
- PrintError("In GPA->HPA: Could not find address in shadow map (addr=%p) (reg_type=%d)\n",
- (void *)guest_pa, reg_type);
+ *host_pa = v3_get_shadow_addr(shdw_reg, guest_pa);
+
+ if ((shdw_reg == NULL) ||
+ (shdw_reg->host_type == SHDW_REGION_INVALID) ||
+ (shdw_reg->host_type == SHDW_REGION_FULL_HOOK)){
+ PrintError("In GPA->HPA: Could not find address in shadow map (addr=%p) (reg_type=%s)\n",
+ (void *)guest_pa, v3_shdw_region_type_to_str(shdw_reg->host_type));
return -1;
}
#include <palacios/vmm_intr.h>
#include <palacios/vmm_config.h>
#include <palacios/vm_guest.h>
-#include <palacios/vmm_decoder.h>
+
v3_cpu_arch_t v3_cpu_type;
struct v3_os_hooks * os_hooks = NULL;
v3_cpu_type = V3_INVALID_CPU;
- v3_init_decoder();
+
if (v3_is_svm_capable()) {
#include <palacios/vmm.h>
#include <palacios/vmm_debug.h>
#include <palacios/vmm_msr.h>
-
+#include <palacios/vmm_decoder.h>
+#include <palacios/vmm_profiler.h>
+#include <palacios/vmm_mem.h>
#include <devices/serial.h>
#include <devices/keyboard.h>
#include <devices/bochs_debug.h>
+
#include <palacios/vmm_host_events.h>
#define USE_GENERIC 1
-static int mem_test_read(addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
- int foo = 20;
+static int passthrough_mem_write(addr_t guest_addr, void * src, uint_t length, void * priv_data) {
- memcpy(dst, &foo, length);
-
- PrintDebug("Passthrough mem read returning: %p (length=%d)\n", (void *)(foo + (guest_addr & 0xfff)), length);
return length;
-}
-
-static int passthrough_mem_read(addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
- memcpy(dst, (void*)guest_addr, length);
- return length;
-}
+ // memcpy((void*)guest_addr, src, length);
+ PrintDebug("Write of %d bytes to %p\n", length, (void *)guest_addr);
+ PrintDebug("Write Value = %p\n", (void *)*(addr_t *)src);
-static int passthrough_mem_write(addr_t guest_addr, void * src, uint_t length, void * priv_data) {
- memcpy((void*)guest_addr, src, length);
return length;
}
// Initialize the subsystem data strutures
v3_init_time(info);
- v3_init_vmm_io_map(info);
+ v3_init_io_map(info);
v3_init_msr_map(info);
v3_init_interrupt_state(info);
v3_init_dev_mgr(info);
- v3_init_emulator(info);
v3_init_host_events(info);
- init_shadow_map(info);
+ v3_init_decoder(info);
+
+ v3_init_shadow_map(info);
if (v3_cpu_type == V3_SVM_REV3_CPU) {
info->shdw_pg_mode = NESTED_PAGING;
setup_devices(info, config_ptr);
+
+ if (config_ptr->enable_profiling) {
+ info->enable_profiler = 1;
+ v3_init_profiler(info);
+ } else {
+ info->enable_profiler = 0;
+ }
+
//v3_hook_io_port(info, 1234, &IO_Read, NULL, info);
// Setup initial cpu register state
PrintDebug("Layout Region %d bytes\n", config_ptr->rombios_size);
memcpy(V3_VAddr(guest_mem), config_ptr->rombios, config_ptr->rombios_size);
- add_shadow_region_passthrough(info, ROMBIOS_START, ROMBIOS_START + (num_pages * PAGE_SIZE), (addr_t)guest_mem);
+ v3_add_shadow_mem(info, ROMBIOS_START, ROMBIOS_START + (num_pages * PAGE_SIZE) - 1, (addr_t)guest_mem);
PrintDebug("Adding Shadow Region (0x%p-0x%p) -> 0x%p\n",
(void *)ROMBIOS_START,
PrintDebug("Layout Region %d bytes\n", config_ptr->vgabios_size);
memcpy(V3_VAddr(guest_mem), config_ptr->vgabios, config_ptr->vgabios_size);
- add_shadow_region_passthrough(info, VGABIOS_START, VGABIOS_START + (num_pages * PAGE_SIZE), (addr_t)guest_mem);
+ v3_add_shadow_mem(info, VGABIOS_START, VGABIOS_START + (num_pages * PAGE_SIZE) - 1, (addr_t)guest_mem);
PrintDebug("Adding Shadow Region (0x%p-0x%p) -> 0x%p\n",
(void *)VGABIOS_START,
}
//
- add_shadow_region_passthrough(info, 0x0, 0xa0000, (addr_t)V3_AllocPages(160));
+ v3_add_shadow_mem(info, 0x0, 0x9ffff, (addr_t)V3_AllocPages(160));
if (1) {
- add_shadow_region_passthrough(info, 0xa0000, 0xc0000, 0xa0000);
+ v3_add_shadow_mem(info, 0xa0000, 0xbffff, 0xa0000);
} else {
- hook_guest_mem(info, 0xa0000, 0xc0000, passthrough_mem_read, passthrough_mem_write, NULL);
+ v3_hook_write_mem(info, 0xa0000, 0xbffff, 0xa0000, passthrough_mem_write, NULL);
}
// TEMP
//add_shadow_region_passthrough(info, 0xc0000, 0xc8000, 0xc0000);
if (1) {
- add_shadow_region_passthrough(info, 0xc7000, 0xc8000, (addr_t)V3_AllocPages(1));
- if (add_shadow_region_passthrough(info, 0xc8000, 0xf0000, (addr_t)V3_AllocPages(40)) == -1) {
+ v3_add_shadow_mem(info, 0xc7000, 0xc8000, (addr_t)V3_AllocPages(1));
+ if (v3_add_shadow_mem(info, 0xc8000, 0xf0000, (addr_t)V3_AllocPages(40)) == -1) {
PrintDebug("Error adding shadow region\n");
}
} else {
- add_shadow_region_passthrough(info, 0xc0000, 0xc8000, 0xc0000);
- add_shadow_region_passthrough(info, 0xc8000, 0xf0000, 0xc8000);
+ v3_add_shadow_mem(info, 0xc0000, 0xc8000, 0xc0000);
+ v3_add_shadow_mem(info, 0xc8000, 0xf0000, 0xc8000);
}
if (1) {
- add_shadow_region_passthrough(info, 0x100000, 0x1000000, (addr_t)V3_AllocPages(4096));
+ v3_add_shadow_mem(info, 0x100000, 0x1000000, (addr_t)V3_AllocPages(4096));
} else {
/* MEMORY HOOK TEST */
- add_shadow_region_passthrough(info, 0x100000, 0xa00000, (addr_t)V3_AllocPages(2304));
- hook_guest_mem(info, 0xa00000, 0xa01000, mem_test_read, passthrough_mem_write, NULL);
- add_shadow_region_passthrough(info, 0xa01000, 0x1000000, (addr_t)V3_AllocPages(1791));
+ v3_add_shadow_mem(info, 0x100000, 0xa00000, (addr_t)V3_AllocPages(2304));
+ v3_hook_write_mem(info, 0xa00000, 0xa01000, (addr_t)V3_AllocPages(1), passthrough_mem_write, NULL);
+ v3_add_shadow_mem(info, 0xa01000, 0x1000000, (addr_t)V3_AllocPages(1791));
}
- add_shadow_region_passthrough(info, 0x1000000, 0x8000000, (addr_t)V3_AllocPages(32768));
+ v3_add_shadow_mem(info, 0x1000000, 0x8000000, (addr_t)V3_AllocPages(32768));
// test - give linux accesss to PCI space - PAD
- add_shadow_region_passthrough(info, 0xc0000000,0xffffffff,0xc0000000);
+ v3_add_shadow_mem(info, 0xc0000000,0xffffffff,0xc0000000);
- print_shadow_map(&(info->mem_map));
+ print_shadow_map(info);
return 0;
}
return -1;
}
- if (v3_opcode_cmp(V3_OPCODE_LMSW, (const uchar_t *)(dec_instr.opcode)) == 0) {
+ if (dec_instr.op_type == V3_OP_LMSW) {
+ // if (v3_opcode_cmp(V3_OPCODE_LMSW, (const uchar_t *)(dec_instr.opcode)) == 0) {
if (handle_lmsw(info, &dec_instr) == -1) {
return -1;
}
- } else if (v3_opcode_cmp(V3_OPCODE_MOV2CR, (const uchar_t *)(dec_instr.opcode)) == 0) {
-
+ // } else if (v3_opcode_cmp(V3_OPCODE_MOV2CR, (const uchar_t *)(dec_instr.opcode)) == 0) {
+ } else if (dec_instr.op_type == V3_OP_MOV2CR) {
if (handle_mov_to_cr0(info, &dec_instr) == -1) {
return -1;
}
- } else if (v3_opcode_cmp(V3_OPCODE_CLTS, (const uchar_t *)(dec_instr.opcode)) == 0) {
+ // } else if (v3_opcode_cmp(V3_OPCODE_CLTS, (const uchar_t *)(dec_instr.opcode)) == 0) {
+ } else if (dec_instr.op_type == V3_OP_CLTS) {
if (handle_clts(info, &dec_instr) == -1) {
return -1;
return -1;
}
- if (v3_opcode_cmp(V3_OPCODE_MOVCR2, (const uchar_t *)(dec_instr.opcode)) == 0) {
+ // if (v3_opcode_cmp(V3_OPCODE_MOVCR2, (const uchar_t *)(dec_instr.opcode)) == 0) {
+ if (dec_instr.op_type == V3_OP_MOVCR2) {
struct cr0_32 * dst_reg = (struct cr0_32 *)(dec_instr.dst_operand.operand);
struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
PrintDebug("Shadow CR0: %x\n", *(uint_t*)shadow_cr0);
PrintDebug("returned CR0: %x\n", *(uint_t*)dst_reg);
- } else if (v3_opcode_cmp(V3_OPCODE_SMSW, (const uchar_t *)(dec_instr.opcode)) == 0) {
+ // } else if (v3_opcode_cmp(V3_OPCODE_SMSW, (const uchar_t *)(dec_instr.opcode)) == 0) {
+ } else if (dec_instr.op_type == V3_OP_SMSW) {
struct cr0_real * shadow_cr0 = (struct cr0_real *)&(info->ctrl_regs.cr0);
struct cr0_real * dst_reg = (struct cr0_real *)(dec_instr.dst_operand.operand);
char cr0_val = *(char*)shadow_cr0 & 0x0f;
return -1;
}
- if (v3_opcode_cmp(V3_OPCODE_MOV2CR, (const uchar_t *)(dec_instr.opcode)) == 0) {
+ // if (v3_opcode_cmp(V3_OPCODE_MOV2CR, (const uchar_t *)(dec_instr.opcode)) == 0) {
+ if (dec_instr.op_type == V3_OP_MOV2CR) {
PrintDebug("MOV2CR3 (cpu_mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
if (info->shdw_pg_mode == SHADOW_PAGING) {
return -1;
}
- if (v3_opcode_cmp(V3_OPCODE_MOVCR2, (const uchar_t *)(dec_instr.opcode)) == 0) {
+ // if (v3_opcode_cmp(V3_OPCODE_MOVCR2, (const uchar_t *)(dec_instr.opcode)) == 0) {
+ if (dec_instr.op_type == V3_OP_MOVCR2) {
PrintDebug("MOVCR32 (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
if (info->shdw_pg_mode == SHADOW_PAGING) {
return -1;
}
- if (v3_opcode_cmp(V3_OPCODE_MOV2CR, (const uchar_t *)(dec_instr.opcode)) != 0) {
+ // if (v3_opcode_cmp(V3_OPCODE_MOV2CR, (const uchar_t *)(dec_instr.opcode)) != 0) {
+ if (dec_instr.op_type != V3_OP_MOV2CR) {
PrintError("Invalid opcode in write to CR4\n");
return -1;
}
}
+void v3_get_prefixes(uchar_t * instr, struct x86_prefixes * prefixes) {
+ while (1) {
+ switch (*instr) {
+ case 0xF0: // lock
+ prefixes->lock = 1;
+ break;
+
+ case 0xF2: // REPNE/REPNZ
+ prefixes->repnz = 1;
+ prefixes->repne = 1;
+ break;
+
+ case 0xF3: // REP or REPE/REPZ
+ prefixes->rep = 1;
+ prefixes->repe = 1;
+ prefixes->repz = 1;
+ break;
+
+ case 0x2E: // CS override or Branch hint not taken (with Jcc instrs)
+ prefixes->cs_override = 1;
+ prefixes->br_not_taken = 1;
+ break;
+
+ case 0x36: // SS override
+ prefixes->ss_override = 1;
+ break;
+
+ case 0x3E: // DS override or Branch hint taken (with Jcc instrs)
+ prefixes->ds_override = 1;
+ prefixes->br_taken = 1;
+ break;
+
+ case 0x26: // ES override
+ prefixes->es_override = 1;
+ break;
+
+ case 0x64: // FS override
+ prefixes->fs_override = 1;
+ break;
+
+ case 0x65: // GS override
+ prefixes->gs_override = 1;
+ break;
+
+ case 0x66: // operand size override
+ prefixes->op_size = 1;
+ break;
+
+ case 0x67: // address size override
+ prefixes->addr_size = 1;
+ break;
+
+ default:
+ return;
+ }
+
+ instr++;
+ }
+
+}
+
void v3_strip_rep_prefix(uchar_t * instr, int length) {
int read_ctr = 0;
int write_ctr = 0;
#include <palacios/vmm_emulator.h>
#include <palacios/vm_guest_mem.h>
#include <palacios/vmm_decoder.h>
-#include <palacios/vmm_debug.h>
-#include <palacios/vmcb.h>
-#include <palacios/vmm_ctrl_regs.h>
-
-static const char VMMCALL[3] = {0x0f, 0x01, 0xd9};
+#include <palacios/vmm_paging.h>
+#include <palacios/vmm_instr_emulator.h>
#ifndef DEBUG_EMULATOR
#undef PrintDebug
#endif
-int v3_init_emulator(struct guest_info * info) {
- struct emulation_state * emulator = &(info->emulator);
-
- emulator->num_emulated_pages = 0;
- INIT_LIST_HEAD(&(emulator->emulated_pages));
-
- emulator->num_saved_pages = 0;
- INIT_LIST_HEAD(&(emulator->saved_pages));
-
- emulator->num_write_regions = 0;
- INIT_LIST_HEAD(&(emulator->write_regions));
- emulator->running = 0;
- emulator->instr_length = 0;
- emulator->tf_enabled = 0;
- return 0;
-}
+// We emulate up to the next 4KB page boundry
+static int emulate_string_write_op(struct guest_info * info, struct x86_instr * dec_instr,
+ addr_t write_gva, addr_t write_gpa, addr_t * dst_addr) {
+ uint_t emulation_length = 0;
+ addr_t tmp_rcx = 0;
+ addr_t src_addr = 0;
-static addr_t get_new_page() {
- void * page = V3_VAddr(V3_AllocPages(1));
- memset(page, 0, PAGE_SIZE);
+ if (dec_instr->op_type == V3_OP_MOVS) {
+ PrintDebug("MOVS emulation\n");
- return (addr_t)page;
-}
+ if (dec_instr->dst_operand.operand != write_gva) {
+ PrintError("Inconsistency between Pagefault and Instruction Decode XED_ADDR=%p, PF_ADDR=%p\n",
+ (void *)dec_instr->dst_operand.operand, (void *)write_gva);
+ return -1;
+ }
-/*
-static int setup_code_page(struct guest_info * info, char * instr, struct basic_instr_info * instr_info ) {
- addr_t code_page_offset = PT32_PAGE_OFFSET(info->rip);
- addr_t code_page = get_new_page();
- struct emulated_page * new_code_page = V3_Malloc(sizeof(struct emulated_page));
- struct saved_page * saved_code_page = V3_Malloc(sizeof(struct saved_page));
+ emulation_length = ( (dec_instr->str_op_length < (0x1000 - PAGE_OFFSET_4KB(write_gva))) ?
+ dec_instr->str_op_length :
+ (0x1000 - PAGE_OFFSET_4KB(write_gva)));
+ /* ** Fix emulation length so that it doesn't overrun over the src page either ** */
- saved_code_page->va = PT32_PAGE_ADDR(info->rip);
-
- new_code_page->page_addr = code_page;
- new_code_page->va = PT32_PAGE_ADDR(info->rip);
-
- new_code_page->pte.present = 1;
- new_code_page->pte.writable = 0;
- new_code_page->pte.user_page = 1;
- new_code_page->pte.page_base_addr = PT32_BASE_ADDR(code_page);
-
- memcpy((void *)(code_page + code_page_offset), instr, instr_info->instr_length);
- memcpy((void *)(code_page + code_page_offset + instr_info->instr_length), VMMCALL, 3);
-
-#ifdef DEBUG_EMULATOR
- PrintDebug("New Instr Stream:\n");
- PrintTraceMemDump((void *)(code_page + code_page_offset), 32);
- PrintDebug("rip =%x\n", info->rip);
-#endif
+ PrintDebug("STR_OP_LEN: %d, Page Len: %d\n",
+ (uint_t)dec_instr->str_op_length,
+ (uint_t)(0x1000 - PAGE_OFFSET_4KB(write_gva)));
+ PrintDebug("Emulation length: %d\n", emulation_length);
+ tmp_rcx = emulation_length;
+
+ if (guest_pa_to_host_va(info, write_gpa, dst_addr) == -1) {
+ PrintError("Could not translate write destination to host VA\n");
+ return -1;
+ }
+ // figure out addresses here....
+ if (info->mem_mode == PHYSICAL_MEM) {
+ if (guest_pa_to_host_va(info, dec_instr->src_operand.operand, &src_addr) == -1) {
+ PrintError("Could not translate write Source (Physical) to host VA\n");
+ return -1;
+ }
+ } else {
+ if (guest_va_to_host_va(info, dec_instr->src_operand.operand, &src_addr) == -1) {
+ PrintError("Could not translate write Source (Virtual) to host VA\n");
+ return -1;
+ }
+ }
- v3_replace_shdw_page32(info, new_code_page->va, &(new_code_page->pte), &(saved_code_page->pte));
+ PrintDebug("Dst Operand: %p (size=%d), Src Operand: %p\n",
+ (void *)dec_instr->dst_operand.operand,
+ dec_instr->dst_operand.size,
+ (void *)dec_instr->src_operand.operand);
+ PrintDebug("Dst Addr: %p, Src Addr: %p\n", (void *)(addr_t *)*dst_addr, (void *)src_addr);
+ //return -1;
- list_add(&(new_code_page->page_list), &(info->emulator.emulated_pages));
- info->emulator.num_emulated_pages++;
- list_add(&(saved_code_page->page_list), &(info->emulator.saved_pages));
- info->emulator.num_saved_pages++;
+
- return 0;
-}
-*/
+ if (dec_instr->dst_operand.size == 1) {
+ movs8(dst_addr, &src_addr, &tmp_rcx, (addr_t *)&(info->ctrl_regs.rflags));
+ } else if (dec_instr->dst_operand.size == 2) {
+ movs16(dst_addr, &src_addr, &tmp_rcx, (addr_t *)&(info->ctrl_regs.rflags));
+ } else if (dec_instr->dst_operand.size == 4) {
+ movs32(dst_addr, &src_addr, &tmp_rcx, (addr_t *)&(info->ctrl_regs.rflags));
+ } else {
+ PrintError("Invalid operand length\n");
+ return -1;
+ }
-static int set_stepping(struct guest_info * info) {
- vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
- ctrl_area->exceptions.db = 1;
- info->emulator.tf_enabled = ((struct rflags *)&(info->ctrl_regs.rflags))->tf;
- ((struct rflags *)&(info->ctrl_regs.rflags))->tf = 1;
- return 0;
-}
+ PrintDebug("RDI=%p, RSI=%p, RCX=%p\n",
+ (void *)*(addr_t *)&(info->vm_regs.rdi),
+ (void *)*(addr_t *)&(info->vm_regs.rsi),
+ (void *)*(addr_t *)&(info->vm_regs.rcx));
-static int unset_stepping(struct guest_info * info) {
- vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
- ctrl_area->exceptions.db = 0;
+ info->vm_regs.rdi += emulation_length;
+ info->vm_regs.rsi += emulation_length;
+ info->vm_regs.rcx -= emulation_length;
+
+ PrintDebug("RDI=%p, RSI=%p, RCX=%p\n",
+ (void *)*(addr_t *)&(info->vm_regs.rdi),
+ (void *)*(addr_t *)&(info->vm_regs.rsi),
+ (void *)*(addr_t *)&(info->vm_regs.rcx));
- ((struct rflags *)&(info->ctrl_regs.rflags))->tf = info->emulator.tf_enabled;
+ if (emulation_length == dec_instr->str_op_length) {
+ info->rip += dec_instr->instr_length;
+ }
- if (info->emulator.tf_enabled) {
- // Inject breakpoint exception into guest
+ return emulation_length;
}
- return 0;
+
+
+ return -1;
}
-// get the current instr
-// check if rep + remove
-// put into new page, vmexit after
-// replace new page with current eip page
-//
-int v3_emulate_memory_read(struct guest_info * info, addr_t read_gva,
- int (*read)(addr_t read_addr, void * dst, uint_t length, void * priv_data),
- addr_t read_gpa, void * private_data) {
- struct basic_instr_info instr_info;
+int v3_emulate_write_op(struct guest_info * info, addr_t write_gva, addr_t write_gpa, addr_t * dst_addr) {
+ struct x86_instr dec_instr;
uchar_t instr[15];
- int ret;
- struct emulated_page * data_page = V3_Malloc(sizeof(struct emulated_page));
- addr_t data_addr_offset = PAGE_OFFSET(read_gva);
- pte32_t saved_pte;
+ int ret = 0;
+ addr_t src_addr = 0;
+
- PrintDebug("Emulating Read\n");
+ PrintDebug("Emulating Write for instruction at %p\n", (void *)(addr_t)(info->rip));
+ PrintDebug("GPA=%p, GVA=%p\n", (void *)write_gpa, (void *)write_gva);
if (info->mem_mode == PHYSICAL_MEM) {
ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
if (ret == -1) {
- PrintError("Could not read guest memory\n");
return -1;
}
-#ifdef DEBUG_EMULATOR
- PrintDebug("Instr (15 bytes) at %p:\n", (void *)(addr_t)instr);
- PrintTraceMemDump(instr, 15);
-#endif
-
- if (v3_basic_mem_decode(info, (addr_t)instr, &instr_info) == -1) {
- PrintError("Could not do a basic memory instruction decode\n");
- V3_Free(data_page);
+ if (guest_pa_to_host_va(info, write_gpa, dst_addr) == -1) {
+ PrintError("Could not translate write destination to host VA\n");
return -1;
}
- /*
- if (instr_info.has_rep == 1) {
- PrintError("We currently don't handle rep* instructions\n");
- V3_Free(data_page);
+ if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
+ PrintError("Decoding Error\n");
+ // Kick off single step emulator
return -1;
}
- */
-
- data_page->page_addr = get_new_page();
- data_page->va = PAGE_ADDR(read_gva);
- data_page->pte.present = 1;
- data_page->pte.writable = 0;
- data_page->pte.user_page = 1;
- data_page->pte.page_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr((void *)(addr_t)(data_page->page_addr)));
-
-
- // Read the data directly onto the emulated page
- ret = read(read_gpa, (void *)(data_page->page_addr + data_addr_offset), instr_info.op_size, private_data);
- if ((ret == -1) || ((uint_t)ret != instr_info.op_size)) {
- PrintError("Read error in emulator\n");
- V3_FreePage((void *)V3_PAddr((void *)(data_page->page_addr)));
- V3_Free(data_page);
- return -1;
+
+ if (dec_instr.is_str_op) {
+ return emulate_string_write_op(info, &dec_instr, write_gva, write_gpa, dst_addr);
}
- v3_replace_shdw_page32(info, data_page->va, &(data_page->pte), &saved_pte);
-
-
- list_add(&(data_page->page_list), &(info->emulator.emulated_pages));
- info->emulator.num_emulated_pages++;
- if (saved_pte.present == 1) {
- struct saved_page * saved_data_page = V3_Malloc(sizeof(struct saved_page));
- saved_data_page->pte = saved_pte;
- saved_data_page->va = PAGE_ADDR(read_gva);
-
- list_add(&(saved_data_page->page_list), &(info->emulator.saved_pages));
- info->emulator.num_saved_pages++;
+ if ((dec_instr.dst_operand.type != MEM_OPERAND) ||
+ (dec_instr.dst_operand.operand != write_gva)) {
+ PrintError("Inconsistency between Pagefault and Instruction Decode XED_ADDR=%p, PF_ADDR=%p\n",
+ (void *)dec_instr.dst_operand.operand, (void *)write_gva);
+ return -1;
}
- // setup_code_page(info, instr, &instr_info);
- set_stepping(info);
-
- info->emulator.running = 1;
- info->run_state = VM_EMULATING;
- info->emulator.instr_length = instr_info.instr_length;
-
- return 0;
-}
-
-
-
-int v3_emulate_memory_write(struct guest_info * info, addr_t write_gva,
- int (*write)(addr_t write_addr, void * src, uint_t length, void * priv_data),
- addr_t write_gpa, void * private_data) {
-
- struct basic_instr_info instr_info;
- uchar_t instr[15];
- int ret;
- struct write_region * write_op = V3_Malloc(sizeof(struct write_region ));
- struct emulated_page * data_page = V3_Malloc(sizeof(struct emulated_page));
- addr_t data_addr_offset = PAGE_OFFSET(write_gva);
- pte32_t saved_pte;
- int i;
-
- PrintDebug("Emulating Write for instruction at 0x%p\n", (void *)(addr_t)(info->rip));
-
- if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
- } else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ if (dec_instr.src_operand.type == MEM_OPERAND) {
+ if (info->mem_mode == PHYSICAL_MEM) {
+ if (guest_pa_to_host_va(info, dec_instr.src_operand.operand, &src_addr) == -1) {
+ PrintError("Could not translate write Source (Physical) to host VA\n");
+ return -1;
+ }
+ } else {
+ if (guest_va_to_host_va(info, dec_instr.src_operand.operand, &src_addr) == -1) {
+ PrintError("Could not translate write Source (Virtual) to host VA\n");
+ return -1;
+ }
+ }
+ } else if (dec_instr.src_operand.type == REG_OPERAND) {
+ src_addr = dec_instr.src_operand.operand;
+ } else {
+ src_addr = (addr_t)&(dec_instr.src_operand.operand);
}
- PrintDebug("Instruction is");
- for (i=0;i<15;i++) { PrintDebug(" 0x%x",instr[i]); }
- PrintDebug("\n");
-
- if (v3_basic_mem_decode(info, (addr_t)instr, &instr_info) == -1) {
- PrintError("Could not do a basic memory instruction decode\n");
- V3_Free(write_op);
- V3_Free(data_page);
+ PrintDebug("Dst_Addr Ptr = %p (val=%p), SRC operand = %p\n",
+ (void *)dst_addr, (void *)*dst_addr, (void *)src_addr);
+
+
+ if (dec_instr.dst_operand.size == 1) {
+
+ switch (dec_instr.op_type) {
+ case V3_OP_ADC:
+ adc8((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_ADD:
+ add8((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_AND:
+ and8((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_OR:
+ or8((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_XOR:
+ xor8((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SUB:
+ sub8((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+
+ case V3_OP_MOV:
+ mov8((addr_t *)*dst_addr, (addr_t *)src_addr);
+ break;
+ case V3_OP_NOT:
+ not8((addr_t *)*dst_addr);
+ break;
+ case V3_OP_XCHG:
+ xchg8((addr_t *)*dst_addr, (addr_t *)src_addr);
+ break;
+
+
+ case V3_OP_INC:
+ inc8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_DEC:
+ dec8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_NEG:
+ neg8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETB:
+ setb8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETBE:
+ setbe8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETL:
+ setl8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETLE:
+ setle8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETNB:
+ setnb8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETNBE:
+ setnbe8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETNL:
+ setnl8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETNLE:
+ setnle8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETNO:
+ setno8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETNP:
+ setnp8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETNS:
+ setns8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETNZ:
+ setnz8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETO:
+ seto8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETP:
+ setp8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETS:
+ sets8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SETZ:
+ setz8((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+
+ default:
+ PrintError("Unknown 8 bit instruction\n");
+ return -1;
+ }
+
+ } else if (dec_instr.dst_operand.size == 2) {
+
+ switch (dec_instr.op_type) {
+ case V3_OP_ADC:
+ adc16((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_ADD:
+ add16((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_AND:
+ and16((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_OR:
+ or16((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_XOR:
+ xor16((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SUB:
+ sub16((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+
+
+ case V3_OP_INC:
+ inc16((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_DEC:
+ dec16((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_NEG:
+ neg16((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+
+ case V3_OP_MOV:
+ mov16((addr_t *)*dst_addr, (addr_t *)src_addr);
+ break;
+ case V3_OP_NOT:
+ not16((addr_t *)*dst_addr);
+ break;
+ case V3_OP_XCHG:
+ xchg16((addr_t *)*dst_addr, (addr_t *)src_addr);
+ break;
+
+ default:
+ PrintError("Unknown 16 bit instruction\n");
+ return -1;
+ }
+
+ } else if (dec_instr.dst_operand.size == 4) {
+
+ switch (dec_instr.op_type) {
+ case V3_OP_ADC:
+ adc32((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_ADD:
+ add32((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_AND:
+ and32((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_OR:
+ or32((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_XOR:
+ xor32((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_SUB:
+ sub32((addr_t *)*dst_addr, (addr_t *)src_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+
+ case V3_OP_INC:
+ inc32((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_DEC:
+ dec32((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+ case V3_OP_NEG:
+ neg32((addr_t *)*dst_addr, (addr_t *)&(info->ctrl_regs.rflags));
+ break;
+
+ case V3_OP_MOV:
+ mov32((addr_t *)*dst_addr, (addr_t *)src_addr);
+ break;
+ case V3_OP_NOT:
+ not32((addr_t *)*dst_addr);
+ break;
+ case V3_OP_XCHG:
+ xchg32((addr_t *)*dst_addr, (addr_t *)src_addr);
+ break;
+
+ default:
+ PrintError("Unknown 32 bit instruction\n");
+ return -1;
+ }
+
+ } else if (dec_instr.dst_operand.size == 8) {
+ PrintError("64 bit instructions not handled\n");
return -1;
- }
-
- if (instr_info.has_rep==1) {
- PrintDebug("Emulated instruction has rep\n");
- }
-
- /*
- if (instr_info.has_rep == 1) {
- PrintError("We currently don't handle rep* instructions\n");
- V3_Free(write_op);
- V3_Free(data_page);
+ } else {
+ PrintError("Invalid Operation Size\n");
return -1;
}
- */
- data_page->page_addr = get_new_page();
- data_page->va = PAGE_ADDR(write_gva);
- data_page->pte.present = 1;
- data_page->pte.writable = 1;
- data_page->pte.user_page = 1;
- data_page->pte.page_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr((void *)(addr_t)(data_page->page_addr)));
+ info->rip += dec_instr.instr_length;
-
-
- write_op->write = write;
- write_op->write_addr = write_gpa;
- write_op->length = instr_info.op_size;
- write_op->private_data = private_data;
-
- write_op->write_data = (void *)(data_page->page_addr + data_addr_offset);
-
- list_add(&(write_op->write_list), &(info->emulator.write_regions));
- info->emulator.num_write_regions--;
-
- v3_replace_shdw_page32(info, data_page->va, &(data_page->pte), &saved_pte);
-
-
- list_add(&(data_page->page_list), &(info->emulator.emulated_pages));
- info->emulator.num_emulated_pages++;
-
- if (saved_pte.present == 1) {
- struct saved_page * saved_data_page = V3_Malloc(sizeof(struct saved_page));
- saved_data_page->pte = saved_pte;
- saved_data_page->va = PAGE_ADDR(write_gva);
-
- list_add(&(saved_data_page->page_list), &(info->emulator.saved_pages));
- info->emulator.num_saved_pages++;
- }
-
-
- if (info->emulator.running == 0) {
- // setup_code_page(info, instr, &instr_info);
- set_stepping(info);
- info->emulator.running = 1;
- info->run_state = VM_EMULATING;
- info->emulator.instr_length = instr_info.instr_length;
- }
-
- return 0;
+ return dec_instr.dst_operand.size;
}
-
-// end emulation
-int v3_emulation_exit_handler(struct guest_info * info) {
- struct saved_page * svpg, * p_svpg;
- struct emulated_page * empg, * p_empg;
- struct write_region * wr_reg, * p_wr_reg;
- pte32_t dummy_pte;
-
- // Complete the writes
- // delete writes
- // swap out emulated pages with blank dummies
- // swap in saved pages
- // increment rip
-
- PrintDebug("V3 Emulation Exit Handler\n");
-
- list_for_each_entry_safe(wr_reg, p_wr_reg, &(info->emulator.write_regions), write_list) {
- wr_reg->write(wr_reg->write_addr, wr_reg->write_data, wr_reg->length, wr_reg->private_data);
- PrintDebug("Writing \n");
-
- list_del(&(wr_reg->write_list));
- V3_Free(wr_reg);
-
- }
- info->emulator.num_write_regions = 0;
-
-
- *(uint_t *)&dummy_pte = 0;
-
- list_for_each_entry_safe(empg, p_empg, &(info->emulator.emulated_pages), page_list) {
- pte32_t empte32_t;
-
- PrintDebug("wiping page %p\n", (void *)(addr_t)(empg->va));
-
- v3_replace_shdw_page32(info, empg->va, &dummy_pte, &empte32_t);
- V3_FreePage((void *)(V3_PAddr((void *)(empg->page_addr))));
-
- list_del(&(empg->page_list));
- V3_Free(empg);
- }
- info->emulator.num_emulated_pages = 0;
-
- list_for_each_entry_safe(svpg, p_svpg, &(info->emulator.saved_pages), page_list) {
-
- PrintDebug("Setting Saved page %p back\n", (void *)(addr_t)(svpg->va));
- v3_replace_shdw_page32(info, empg->va, &(svpg->pte), &dummy_pte);
-
- list_del(&(svpg->page_list));
- V3_Free(svpg);
- }
- info->emulator.num_saved_pages = 0;
-
- info->run_state = VM_RUNNING;
- info->emulator.running = 0;
- //info->rip += info->emulator.instr_length;
-
-
- PrintDebug("Returning to rip: 0x%p\n", (void *)(addr_t)(info->rip));
-
- info->emulator.instr_length = 0;
-
-
- unset_stepping(info);
-
-
- PrintDebug("returning from emulation\n");
-
- return 0;
-}
+int hashtable_inc(struct hashtable * htable, addr_t key, addr_t value) {
+ struct hash_entry * tmp_entry;
+ uint_t hash_value;
+ uint_t index;
+
+ hash_value = do_hash(htable, key);
+
+ index = indexFor(htable->table_length, hash_value);
+
+ tmp_entry = htable->table[index];
+
+ while (tmp_entry != NULL) {
+ /* Check hash value to short circuit heavier comparison */
+ if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
+
+ tmp_entry->value += value;
+ return -1;
+ }
+ tmp_entry = tmp_entry->next;
+ }
+ return 0;
+}
+
+
+int hashtable_dec(struct hashtable * htable, addr_t key, addr_t value) {
+ struct hash_entry * tmp_entry;
+ uint_t hash_value;
+ uint_t index;
+
+ hash_value = do_hash(htable, key);
+
+ index = indexFor(htable->table_length, hash_value);
+
+ tmp_entry = htable->table[index];
+
+ while (tmp_entry != NULL) {
+ /* Check hash value to short circuit heavier comparison */
+ if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
+
+ tmp_entry->value -= value;
+ return -1;
+ }
+ tmp_entry = tmp_entry->next;
+ }
+ return 0;
+}
+
+
+
/*****************************************************************************/
/* returns value associated with key */
+++ /dev/null
-#include <stdio.h>
-#include <stdlib.h>
-
-#define addr_t unsigned long
-
-
-
-
-void test() {
- int a = 10;
- int b;
-
- asm ("movl %1, %%eax\n\t"
- "movl %%eax, %0\n\t"
- :"=r"(b) /* output */
- :"r"(a) /* input */
- :"%eax" /* clobbered register */
- );
-
-}
-
-void get_flags(addr_t * flags) {
- addr_t tmp;
- asm ("pushfq\n\t"
- "pop %0\n\t"
- :"=r"(tmp)
- :
- );
-
- *flags = tmp;
-}
-
-void adc64(int * dst, int * src, addr_t * flags) {
- int tmp_dst = *dst, tmp_src = *src;
- addr_t tmp_flags = *flags;
-
- char * inst = "adcl";
-
- // Some of the flags values are not copied out in a pushf, we save them here
- addr_t flags_rsvd = *flags & ~0xfffe7fff;
-
- asm volatile (
- "pushfq\r\n"
- "push %3\r\n"
- "popfq\r\n"
- "adcl %2, %0\r\n"
- "pushfq\r\n"
- "pop %1\r\n"
- "popfq\r\n"
- : "=a"(tmp_dst),"=c"(tmp_flags)
- : "b"(tmp_src),"c"(tmp_flags), "0"(tmp_dst)
- );
-
- *dst = tmp_dst;
- *flags = tmp_flags;
- *flags |= flags_rsvd;
-
-}
-
-
-void adc32(int * dst, int * src, addr_t * flags) {
- int tmp_dst = *dst, tmp_src = *src;
- addr_t tmp_flags = *flags;
-
-
- asm volatile (
- "pushfd\r\n"
- "push %3\r\n"
- "popfd\r\n"
- "adcl %2, %0\r\n"
- "pushfd\r\n"
- "pop %1\r\n"
- "popfd\r\n"
- : "=a"(tmp_dst),"=c"(tmp_flags)
- : "b"(tmp_src),"c"(tmp_flags), "0"(tmp_dst)
- );
-
- *dst = tmp_dst;
- *flags = tmp_flags;
-
-}
-
-
-int main(int argc, char ** argv) {
- addr_t flags;
- int dest = 4;
- int src = 5;
-
- printf("sizeof ulong: %d\n", sizeof(unsigned long));
-
- printf("Getting flags\n");
- get_flags(&flags);
- flags = flags | 0x1;
-
- printf("Flags=0x%x\n", flags);
- test();
- printf("Adding\n");
- adc64(&dest, &src, &flags);
- printf("Result=%d\n", dest);
-
-}
static int default_read(ushort_t port, void * dst, uint_t length, void * priv_data);
-void v3_init_vmm_io_map(struct guest_info * info) {
- struct vmm_io_map * io_map = &(info->io_map);
- io_map->num_ports = 0;
- io_map->head = NULL;
+void v3_init_io_map(struct guest_info * info) {
+ info->io_map.rb_node = NULL;
}
-static int add_io_hook(struct vmm_io_map * io_map, struct vmm_io_hook * io_hook) {
+static inline struct v3_io_hook * __insert_io_hook(struct guest_info * info, struct v3_io_hook * hook) {
+ struct rb_node ** p = &(info->io_map.rb_node);
+ struct rb_node * parent = NULL;
+ struct v3_io_hook * tmp_hook = NULL;
- if (!(io_map->head)) {
- io_map->head = io_hook;
- io_map->num_ports = 1;
- return 0;
- } else if (io_map->head->port > io_hook->port) {
- io_hook->next = io_map->head;
+ while (*p) {
+ parent = *p;
+ tmp_hook = rb_entry(parent, struct v3_io_hook, tree_node);
- io_map->head->prev = io_hook;
- io_map->head = io_hook;
- io_map->num_ports++;
-
- return 0;
- } else {
- struct vmm_io_hook * tmp_hook = io_map->head;
-
- while ((tmp_hook->next) &&
- (tmp_hook->next->port <= io_hook->port)) {
- tmp_hook = tmp_hook->next;
- }
-
- if (tmp_hook->port == io_hook->port) {
- //tmp_hook->read = io_hook->read;
- //tmp_hook->write = io_hook->write;
- //V3_Free(io_hook);
- return -1;
+ if (hook->port < tmp_hook->port) {
+ p = &(*p)->rb_left;
+ } else if (hook->port > tmp_hook->port) {
+ p = &(*p)->rb_right;
} else {
- io_hook->prev = tmp_hook;
- io_hook->next = tmp_hook->next;
+ return tmp_hook;
+ }
+ }
+ rb_link_node(&(hook->tree_node), parent, p);
- if (tmp_hook->next) {
- tmp_hook->next->prev = io_hook;
- }
+ return NULL;
+}
- tmp_hook->next = io_hook;
- io_map->num_ports++;
- return 0;
- }
+static inline struct v3_io_hook * insert_io_hook(struct guest_info * info, struct v3_io_hook * hook) {
+ struct v3_io_hook * ret;
+
+ if ((ret = __insert_io_hook(info, hook))) {
+ return ret;
}
- return -1;
+
+ v3_rb_insert_color(&(hook->tree_node), &(info->io_map));
+
+ return NULL;
}
-static int remove_io_hook(struct vmm_io_map * io_map, struct vmm_io_hook * io_hook) {
- if (io_map->head == io_hook) {
- io_map->head = io_hook->next;
- } else if (io_hook->prev) {
- io_hook->prev->next = io_hook->next;
- } else {
- return -1;
- // data corruption failure
- }
-
- if (io_hook->next) {
- io_hook->next->prev = io_hook->prev;
- }
- io_map->num_ports--;
+struct v3_io_hook * v3_get_io_hook(struct guest_info * info, uint_t port) {
+ struct rb_node * n = info->io_map.rb_node;
+ struct v3_io_hook * hook = NULL;
- return 0;
+ while (n) {
+ hook = rb_entry(n, struct v3_io_hook, tree_node);
+
+ if (port < hook->port) {
+ n = n->rb_left;
+ } else if (port > hook->port) {
+ n = n->rb_right;
+ } else {
+ return hook;
+ }
+ }
+
+ return NULL;
}
+
int v3_hook_io_port(struct guest_info * info, uint_t port,
int (*read)(ushort_t port, void * dst, uint_t length, void * priv_data),
int (*write)(ushort_t port, void * src, uint_t length, void * priv_data),
void * priv_data) {
- struct vmm_io_map * io_map = &(info->io_map);
- struct vmm_io_hook * io_hook = (struct vmm_io_hook *)V3_Malloc(sizeof(struct vmm_io_hook));
+ struct v3_io_hook * io_hook = (struct v3_io_hook *)V3_Malloc(sizeof(struct v3_io_hook));
io_hook->port = port;
io_hook->write = write;
}
- io_hook->next = NULL;
- io_hook->prev = NULL;
io_hook->priv_data = priv_data;
- if (add_io_hook(io_map, io_hook) != 0) {
+ if (insert_io_hook(info, io_hook)) {
V3_Free(io_hook);
return -1;
}
}
int v3_unhook_io_port(struct guest_info * info, uint_t port) {
- struct vmm_io_map * io_map = &(info->io_map);
- struct vmm_io_hook * hook = v3_get_io_hook(io_map, port);
+ struct v3_io_hook * hook = v3_get_io_hook(info, port);
if (hook == NULL) {
return -1;
}
- remove_io_hook(io_map, hook);
+ v3_rb_erase(&(hook->tree_node), &(info->io_map));
+
+ V3_Free(hook);
+
return 0;
}
-struct vmm_io_hook * v3_get_io_hook(struct vmm_io_map * io_map, uint_t port) {
- struct vmm_io_hook * tmp_hook;
- FOREACH_IO_HOOK(*io_map, tmp_hook) {
- if (tmp_hook->port == port) {
- return tmp_hook;
- }
- }
- return NULL;
-}
-void v3_print_io_map(struct vmm_io_map * io_map) {
- struct vmm_io_hook * iter = io_map->head;
+
+void v3_print_io_map(struct guest_info * info) {
+ struct v3_io_hook * tmp_hook = NULL;
+ struct rb_node * node = v3_rb_first(&(info->io_map));
PrintDebug("VMM IO Map (Entries=%d)\n", io_map->num_ports);
- while (iter) {
+ do {
+ tmp_hook = rb_entry(node, struct v3_io_hook, tree_node);
+
PrintDebug("IO Port: %hu (Read=%p) (Write=%p)\n",
- iter->port,
- (void *)(iter->read), (void *)(iter->write));
- }
+ tmp_hook->port,
+ (void *)(tmp_hook->read), (void *)(tmp_hook->write));
+ } while ((node = v3_rb_next(node)));
}
-void init_shadow_region(struct shadow_region * entry,
- addr_t guest_addr_start,
- addr_t guest_addr_end,
- guest_region_type_t guest_region_type,
- host_region_type_t host_region_type)
-{
- entry->guest_type = guest_region_type;
- entry->guest_start = guest_addr_start;
- entry->guest_end = guest_addr_end;
- entry->host_type = host_region_type;
- entry->host_addr = 0;
- entry->next=entry->prev = NULL;
-}
-
-int add_shadow_region_passthrough( struct guest_info * guest_info,
- addr_t guest_addr_start,
- addr_t guest_addr_end,
- addr_t host_addr)
-{
- struct shadow_region * entry = (struct shadow_region *)V3_Malloc(sizeof(struct shadow_region));
-
- init_shadow_region(entry, guest_addr_start, guest_addr_end,
- GUEST_REGION_PHYSICAL_MEMORY, HOST_REGION_PHYSICAL_MEMORY);
- entry->host_addr = host_addr;
-
- return add_shadow_region(&(guest_info->mem_map), entry);
-}
-int hook_guest_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
- int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data),
- int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
- void * priv_data) {
-
- struct shadow_region * entry = (struct shadow_region *)V3_Malloc(sizeof(struct shadow_region));
- struct vmm_mem_hook * hook = (struct vmm_mem_hook *)V3_Malloc(sizeof(struct vmm_mem_hook));
- memset(hook, 0, sizeof(struct vmm_mem_hook));
- hook->read = read;
- hook->write = write;
- hook->region = entry;
- hook->priv_data = priv_data;
+static inline
+struct v3_shadow_region * insert_shadow_region(struct guest_info * info,
+ struct v3_shadow_region * region);
- init_shadow_region(entry, guest_addr_start, guest_addr_end,
- GUEST_REGION_PHYSICAL_MEMORY, HOST_REGION_HOOK);
- entry->host_addr = (addr_t)hook;
- return add_shadow_region(&(info->mem_map), entry);
+void v3_init_shadow_map(struct guest_info * info) {
+ info->mem_map.rb_node = NULL;
}
+void v3_delete_shadow_map(struct guest_info * info) {
+ struct rb_node * node = v3_rb_first(&(info->mem_map));
+ struct v3_shadow_region * reg;
+ struct rb_node * tmp_node = NULL;
+
+ while (node) {
+ reg = rb_entry(node, struct v3_shadow_region, tree_node);
+ tmp_node = node;
+ node = v3_rb_next(node);
-struct vmm_mem_hook * get_mem_hook(struct guest_info * info, addr_t guest_addr) {
- struct shadow_region * region = get_shadow_region_by_addr(&(info->mem_map), guest_addr);
-
- if (region == NULL) {
- PrintDebug("Could not find shadow region for addr: %p\n", (void *)guest_addr);
- return NULL;
+ v3_delete_shadow_region(info, reg);
}
-
- return (struct vmm_mem_hook *)(region->host_addr);
}
-/* mem_addr is the guest physical memory address */
-static int mem_hook_dispatch(struct guest_info * info,
- addr_t fault_gva, addr_t fault_gpa,
- pf_error_t access_info, struct vmm_mem_hook * hook)
-{
-
- // emulate and then dispatch
- // or dispatch and emulate
-
-
- if (access_info.write == 1) {
- if (v3_emulate_memory_write(info, fault_gva, hook->write, fault_gpa, hook->priv_data) == -1) {
- PrintError("Memory write emulation failed\n");
- return -1;
- }
-
- } else {
- if (v3_emulate_memory_read(info, fault_gva, hook->read, fault_gpa, hook->priv_data) == -1) {
- PrintError("Memory read emulation failed\n");
- return -1;
- }
- }
-
- return 0;
-}
-int handle_special_page_fault(struct guest_info * info,
- addr_t fault_gva, addr_t fault_gpa,
- pf_error_t access_info)
+int v3_add_shadow_mem( struct guest_info * info,
+ addr_t guest_addr_start,
+ addr_t guest_addr_end,
+ addr_t host_addr)
{
- struct shadow_region * reg = get_shadow_region_by_addr(&(info->mem_map), fault_gpa);
+ struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
- PrintDebug("Handling Special Page Fault\n");
+ entry->guest_start = guest_addr_start;
+ entry->guest_end = guest_addr_end;
+ entry->host_type = SHDW_REGION_ALLOCATED;
+ entry->host_addr = host_addr;
+ entry->write_hook = NULL;
+ entry->read_hook = NULL;
+ entry->priv_data = NULL;
- switch (reg->host_type) {
- case HOST_REGION_HOOK:
- return mem_hook_dispatch(info, fault_gva, fault_gpa, access_info, (struct vmm_mem_hook *)(reg->host_addr));
- default:
+ if (insert_shadow_region(info, entry)) {
+ V3_Free(entry);
return -1;
}
return 0;
-
}
-void init_shadow_map(struct guest_info * info) {
- struct shadow_map * map = &(info->mem_map);
+int v3_hook_write_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
+ addr_t host_addr,
+ int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
+ void * priv_data) {
- map->num_regions = 0;
-
- map->head = NULL;
-}
+ struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
-void free_shadow_map(struct shadow_map * map) {
- struct shadow_region * cursor = map->head;
- struct shadow_region * tmp = NULL;
+ entry->guest_start = guest_addr_start;
+ entry->guest_end = guest_addr_end;
+ entry->host_type = SHDW_REGION_WRITE_HOOK;
+ entry->host_addr = host_addr;
+ entry->write_hook = write;
+ entry->read_hook = NULL;
+ entry->priv_data = priv_data;
- while(cursor) {
- tmp = cursor;
- cursor = cursor->next;
- V3_Free(tmp);
+ if (insert_shadow_region(info, entry)) {
+ V3_Free(entry);
+ return -1;
}
- V3_Free(map);
+ return 0;
}
-
-
-
-int add_shadow_region(struct shadow_map * map,
- struct shadow_region * region)
-{
- struct shadow_region * cursor = map->head;
-
- PrintDebug("Adding Shadow Region: (0x%p-0x%p)\n",
- (void *)region->guest_start, (void *)region->guest_end);
-
- if ((!cursor) || (cursor->guest_start >= region->guest_end)) {
- region->prev = NULL;
- region->next = cursor;
- map->num_regions++;
- map->head = region;
- return 0;
- }
-
- while (cursor) {
- // Check if it overlaps with the current cursor
- if ((cursor->guest_end > region->guest_start) && (cursor->guest_start < region->guest_start)) {
- // overlaps not allowed
- return -1;
- }
-
- if (!(cursor->next)) {
- // add to the end of the list
- cursor->next = region;
- region->prev = cursor;
- region->next = NULL;
- map->num_regions++;
- return 0;
- } else if (cursor->next->guest_start >= region->guest_end) {
- // add here
- region->next = cursor->next;
- region->prev = cursor;
-
- cursor->next->prev = region;
- cursor->next = region;
-
- map->num_regions++;
-
- return 0;
- } else if (cursor->next->guest_end <= region->guest_start) {
- cursor = cursor->next;
- } else {
- // This cannot happen!
- // we should panic here
- return -1;
- }
- }
+int v3_hook_full_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
+ int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data),
+ int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
+ void * priv_data) {
- // This cannot happen
- // We should panic here
- return -1;
-}
+ struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
+ entry->guest_start = guest_addr_start;
+ entry->guest_end = guest_addr_end;
+ entry->host_type = SHDW_REGION_FULL_HOOK;
+ entry->host_addr = (addr_t)NULL;
+ entry->write_hook = write;
+ entry->read_hook = read;
+ entry->priv_data = priv_data;
+
+ if (insert_shadow_region(info, entry)) {
+ V3_Free(entry);
+ return -1;
+ }
-int delete_shadow_region(struct shadow_map * map,
- addr_t guest_start,
- addr_t guest_end) {
- return -1;
+ return 0;
}
-struct shadow_region *get_shadow_region_by_index(struct shadow_map * map,
- uint_t index) {
- struct shadow_region * reg = map->head;
- uint_t i = 0;
-
- while (reg) {
- if (i == index) {
- return reg;
- }
- reg = reg->next;
- i++;
- }
- return NULL;
-}
+static inline
+struct v3_shadow_region * __insert_shadow_region(struct guest_info * info,
+ struct v3_shadow_region * region) {
+ struct rb_node ** p = &(info->mem_map.rb_node);
+ struct rb_node * parent = NULL;
+ struct v3_shadow_region * tmp_region;
-struct shadow_region * get_shadow_region_by_addr(struct shadow_map * map,
- addr_t addr) {
- struct shadow_region * reg = map->head;
+ while (*p) {
+ parent = *p;
+ tmp_region = rb_entry(parent, struct v3_shadow_region, tree_node);
- while (reg) {
- if ((reg->guest_start <= addr) && (reg->guest_end > addr)) {
- return reg;
- } else if (reg->guest_start > addr) {
- return NULL;
+ if (region->guest_end <= tmp_region->guest_start) {
+ p = &(*p)->rb_left;
+ } else if (region->guest_start >= tmp_region->guest_end) {
+ p = &(*p)->rb_right;
} else {
- reg = reg->next;
+ return tmp_region;
}
}
- return NULL;
-}
-
-
-host_region_type_t get_shadow_addr_type(struct guest_info * info, addr_t guest_addr) {
- struct shadow_region * reg = get_shadow_region_by_addr(&(info->mem_map), guest_addr);
-
- if (!reg) {
- return HOST_REGION_INVALID;
- } else {
- return reg->host_type;
- }
-}
-addr_t get_shadow_addr(struct guest_info * info, addr_t guest_addr) {
- struct shadow_region * reg = get_shadow_region_by_addr(&(info->mem_map), guest_addr);
-
- if (!reg) {
- return 0;
- } else {
- return (guest_addr - reg->guest_start) + reg->host_addr;
- }
+ rb_link_node(&(region->tree_node), parent, p);
+
+ return NULL;
}
-host_region_type_t lookup_shadow_map_addr(struct shadow_map * map, addr_t guest_addr, addr_t * host_addr) {
- struct shadow_region * reg = get_shadow_region_by_addr(map, guest_addr);
+static inline
+struct v3_shadow_region * insert_shadow_region(struct guest_info * info,
+ struct v3_shadow_region * region) {
+ struct v3_shadow_region * ret;
- if (!reg) {
- // No mapping exists
- return HOST_REGION_INVALID;
- } else {
- switch (reg->host_type) {
- case HOST_REGION_PHYSICAL_MEMORY:
- *host_addr = (guest_addr - reg->guest_start) + reg->host_addr;
- return reg->host_type;
- case HOST_REGION_MEMORY_MAPPED_DEVICE:
- case HOST_REGION_UNALLOCATED:
- // ...
- default:
- *host_addr = 0;
- return reg->host_type;
- }
+ if ((ret = __insert_shadow_region(info, region))) {
+ return ret;
}
-}
-
-
-void print_shadow_map(struct shadow_map * map) {
- struct shadow_region * cur = map->head;
- int i = 0;
+
+ v3_rb_insert_color(&(region->tree_node), &(info->mem_map));
- PrintDebug("Memory Layout (regions: %d) \n", map->num_regions);
-
- while (cur) {
- PrintDebug("%d: 0x%p - 0x%p (%s) -> ", i,
- (void *)cur->guest_start, (void *)(cur->guest_end - 1),
- cur->guest_type == GUEST_REGION_PHYSICAL_MEMORY ? "GUEST_REGION_PHYSICAL_MEMORY" :
- cur->guest_type == GUEST_REGION_NOTHING ? "GUEST_REGION_NOTHING" :
- cur->guest_type == GUEST_REGION_MEMORY_MAPPED_DEVICE ? "GUEST_REGION_MEMORY_MAPPED_DEVICE" :
- "UNKNOWN");
- if (cur->host_type == HOST_REGION_PHYSICAL_MEMORY ||
- cur->host_type == HOST_REGION_UNALLOCATED ||
- cur->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) {
- PrintDebug("0x%p", (void *)(cur->host_addr));
- }
- PrintDebug("(%s)\n",
- cur->host_type == HOST_REGION_PHYSICAL_MEMORY ? "HOST_REGION_PHYSICAL_MEMORY" :
- cur->host_type == HOST_REGION_UNALLOCATED ? "HOST_REGION_UNALLOACTED" :
- cur->host_type == HOST_REGION_HOOK ? "HOST_REGION_HOOK" :
- cur->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE ? "HOST_REGION_MEMORY_MAPPED_DEVICE" :
- cur->host_type == HOST_REGION_REMOTE ? "HOST_REGION_REMOTE" :
- cur->host_type == HOST_REGION_SWAPPED ? "HOST_REGION_SWAPPED" :
- "UNKNOWN");
- cur = cur->next;
- i++;
- }
+ return NULL;
}
+
+int handle_special_page_fault(struct guest_info * info,
+ addr_t fault_gva, addr_t fault_gpa,
+ pf_error_t access_info)
+{
+ struct v3_shadow_region * reg = v3_get_shadow_region(info, fault_gpa);
+ PrintDebug("Handling Special Page Fault\n");
+ switch (reg->host_type) {
+ case SHDW_REGION_WRITE_HOOK:
+ return v3_handle_mem_wr_hook(info, fault_gva, fault_gpa, reg, access_info);
+ case SHDW_REGION_FULL_HOOK:
+ return v3_handle_mem_full_hook(info, fault_gva, fault_gpa, reg, access_info);
+ default:
+ return -1;
+ }
-#ifdef VMM_MEM_TEST
-
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <stdarg.h>
-
-
-
-
-
-struct vmm_os_hooks * os_hooks;
-
-void * TestMalloc(uint_t size) {
- return malloc(size);
-}
-
-void * TestAllocatePages(int size) {
- return malloc(4096 * size);
-}
-
-
-void TestPrint(const char * fmt, ...) {
- va_list args;
+ return 0;
- va_start(args, fmt);
- vprintf(fmt, args);
- va_end(args);
}
-int mem_list_add_test_1( vmm_mem_list_t * list) {
-
- uint_t offset = 0;
-
- PrintDebug("\n\nTesting Memory List\n");
-
- init_mem_list(list);
-
- offset = PAGE_SIZE * 6;
- PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 10));
- add_mem_list_pages(list, offset, 10);
- print_mem_list(list);
+int v3_handle_mem_wr_hook(struct guest_info * info, addr_t guest_va, addr_t guest_pa,
+ struct v3_shadow_region * reg, pf_error_t access_info) {
+ addr_t write_src_addr = 0;
- offset = 0;
- PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + PAGE_SIZE * 4);
- add_mem_list_pages(list, offset, 4);
- print_mem_list(list);
-
- offset = PAGE_SIZE * 20;
- PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 1));
- add_mem_list_pages(list, offset, 1);
- print_mem_list(list);
-
- offset = PAGE_SIZE * 21;
- PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 3));
- add_mem_list_pages(list, offset, 3);
- print_mem_list(list);
-
-
- offset = PAGE_SIZE * 10;
- PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 30));
- add_mem_list_pages(list, offset, 30);
- print_mem_list(list);
+ int write_len = v3_emulate_write_op(info, guest_va, guest_pa, &write_src_addr);
+ if (write_len == -1) {
+ PrintError("Emulation failure in write hook\n");
+ return -1;
+ }
- offset = PAGE_SIZE * 5;
- PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 1));
- add_mem_list_pages(list, offset, 1);
- print_mem_list(list);
-
+ if (reg->write_hook(guest_pa, (void *)write_src_addr, write_len, reg->priv_data) != write_len) {
+ PrintError("Memory write hook did not return correct value\n");
+ return -1;
+ }
return 0;
}
+int v3_handle_mem_full_hook(struct guest_info * info, addr_t guest_va, addr_t guest_pa,
+ struct v3_shadow_region * reg, pf_error_t access_info) {
+ return -1;
+}
-int mem_layout_add_test_1(vmm_mem_layout_t * layout) {
-
-
- uint_t start = 0;
- uint_t end = 0;
-
- PrintDebug("\n\nTesting Memory Layout\n");
- init_mem_layout(layout);
- start = 0x6000;
- end = 0x10000;;
- PrintDebug("Adding 0x%x - 0x%x\n", start, end);
- add_guest_mem_range(layout, start, end);
- print_mem_layout(layout);
+struct v3_shadow_region * v3_get_shadow_region(struct guest_info * info, addr_t guest_addr) {
+ struct rb_node * n = info->mem_map.rb_node;
+ struct v3_shadow_region * reg = NULL;
+ while (n) {
+ reg = rb_entry(n, struct v3_shadow_region, tree_node);
- start = 0x1000;
- end = 0x3000;
- PrintDebug("Adding 0x%x - 0x%x\n", start, end);
- add_guest_mem_range(layout, start, end);
- print_mem_layout(layout);
+ if (guest_addr < reg->guest_start) {
+ n = n->rb_left;
+ } else if (guest_addr >= reg->guest_end) {
+ n = n->rb_right;
+ } else {
+ return reg;
+ }
+ }
- start = 0x2000;
- end = 0x6000;
- PrintDebug("Adding 0x%x - 0x%x\n", start, end);
- add_guest_mem_range(layout, start, end);
- print_mem_layout(layout);
+ return NULL;
+}
- start = 0x4000;
- end = 0x5000;
- PrintDebug("Adding 0x%x - 0x%x\n", start, end);
- add_guest_mem_range(layout, start, end);
- print_mem_layout(layout);
- start = 0x5000;
- end = 0x7000;
- PrintDebug("Adding 0x%x - 0x%x\n", start, end);
- add_guest_mem_range(layout, start, end);
- print_mem_layout(layout);
+addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, addr_t guest_addr) {
+ if ((reg) &&
+ (reg->host_type != SHDW_REGION_FULL_HOOK) &&
+ (reg->host_type != SHDW_REGION_INVALID)) {
+ return (guest_addr - reg->guest_start) + reg->host_addr;
+ } else {
+ PrintError("MEM Region Invalid\n");
+ return 0;
+ }
+}
+void v3_delete_shadow_region(struct guest_info * info, struct v3_shadow_region * reg) {
+ if (reg != NULL) {
+ v3_rb_erase(&(reg->tree_node), &(info->mem_map));
- return 0;
+ V3_Free(reg);
+ }
}
-int main(int argc, char ** argv) {
- struct vmm_os_hooks dummy_hooks;
- os_hooks = &dummy_hooks;
-
- vmm_mem_layout_t layout;
- vmm_mem_list_t list;
- os_hooks->malloc = &TestMalloc;
- os_hooks->free = &free;
- os_hooks->print_debug = &TestPrint;
- os_hooks->allocate_pages = &TestAllocatePages;
+void print_shadow_map(struct guest_info * info) {
+ struct rb_node * node = v3_rb_first(&(info->mem_map));
+ struct v3_shadow_region * reg;
+ int i = 0;
+ PrintDebug("Memory Layout:\n");
+ do {
+ reg = rb_entry(node, struct v3_shadow_region, tree_node);
- printf("mem_list_add_test_1: %d\n", mem_list_add_test_1(&list));
- printf("layout_add_test_1: %d\n", mem_layout_add_test_1(&layout));
+ PrintDebug("%d: 0x%p - 0x%p -> 0x%p\n", i,
+ (void *)(reg->guest_start),
+ (void *)(reg->guest_end - 1),
+ (void *)(reg->host_addr));
- return 0;
+ PrintDebug("\t(%s) (WriteHook = 0x%p) (ReadHook = 0x%p)\n",
+ v3_shdw_region_type_to_str(reg->host_type),
+ (void *)(reg->write_hook),
+ (void *)(reg->read_hook));
+
+ i++;
+ } while ((node = v3_rb_next(node)));
}
-#endif
+static const uchar_t SHDW_REGION_INVALID_STR[] = "SHDW_REGION_INVALID";
+static const uchar_t SHDW_REGION_WRITE_HOOK_STR[] = "SHDW_REGION_WRITE_HOOK";
+static const uchar_t SHDW_REGION_FULL_HOOK_STR[] = "SHDW_REGION_FULL_HOOK";
+static const uchar_t SHDW_REGION_ALLOCATED_STR[] = "SHDW_REGION_ALLOCATED";
+const uchar_t * v3_shdw_region_type_to_str(v3_shdw_region_type_t type) {
+ switch (type) {
+ case SHDW_REGION_WRITE_HOOK:
+ return SHDW_REGION_WRITE_HOOK_STR;
+ case SHDW_REGION_FULL_HOOK:
+ return SHDW_REGION_FULL_HOOK_STR;
+ case SHDW_REGION_ALLOCATED:
+ return SHDW_REGION_ALLOCATED_STR;
+ default:
+ return SHDW_REGION_INVALID_STR;
+ }
+}
+#ifndef DEBUG_SHADOW_PAGING
+#undef PrintDebug
+#define PrintDebug(fmt, args...)
+#endif
+
void delete_page_tables_32(pde32_t * pde) {
pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
addr_t current_page_addr = 0;
int i, j;
- struct shadow_map * map = &(guest_info->mem_map);
pde32_t * pde = V3_VAddr(V3_AllocPages(1));
for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
- struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
+ struct v3_shadow_region * region = v3_get_shadow_region(guest_info, current_page_addr);
if (!region ||
- (region->host_type == HOST_REGION_HOOK) ||
- (region->host_type == HOST_REGION_UNALLOCATED) ||
- (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
- (region->host_type == HOST_REGION_REMOTE) ||
- (region->host_type == HOST_REGION_SWAPPED)) {
+ (region->host_type == SHDW_REGION_FULL_HOOK)) {
pte[j].present = 0;
pte[j].writable = 0;
pte[j].user_page = 0;
} else {
addr_t host_addr;
pte[j].present = 1;
- pte[j].writable = 1;
+
+ if (region->host_type == SHDW_REGION_WRITE_HOOK) {
+ pte[j].writable = 0;
+ PrintDebug("Marking Write hook host_addr %p as RO\n", (void *)current_page_addr);
+ } else {
+ pte[j].writable = 1;
+ }
+
pte[j].user_page = 1;
pte[j].write_through = 0;
pte[j].cache_disable = 0;
pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
addr_t current_page_addr = 0;
int i, j, k;
- struct shadow_map * map = &(guest_info->mem_map);
pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
memset(pdpe, 0, PAGE_SIZE);
for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
- struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
+ struct v3_shadow_region * region = v3_get_shadow_region(guest_info, current_page_addr);
if (!region ||
- (region->host_type == HOST_REGION_HOOK) ||
- (region->host_type == HOST_REGION_UNALLOCATED) ||
- (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
- (region->host_type == HOST_REGION_REMOTE) ||
- (region->host_type == HOST_REGION_SWAPPED)) {
+ (region->host_type == SHDW_REGION_FULL_HOOK)) {
pte[k].present = 0;
pte[k].writable = 0;
pte[k].user_page = 0;
} else {
addr_t host_addr;
pte[k].present = 1;
- pte[k].writable = 1;
+
+ if (region->host_type == SHDW_REGION_WRITE_HOOK) {
+ pte[k].writable = 0;
+ } else {
+ pte[k].writable = 1;
+ }
+
pte[k].user_page = 1;
pte[k].write_through = 0;
pte[k].cache_disable = 0;
pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
addr_t current_page_addr = 0;
int i, j, k, m;
- struct shadow_map * map = &(info->mem_map);
pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
- struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
+ struct v3_shadow_region * region = v3_get_shadow_region(info, current_page_addr);
if (!region ||
- (region->host_type == HOST_REGION_HOOK) ||
- (region->host_type == HOST_REGION_UNALLOCATED) ||
- (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
- (region->host_type == HOST_REGION_REMOTE) ||
- (region->host_type == HOST_REGION_SWAPPED)) {
+ (region->host_type == SHDW_REGION_FULL_HOOK)) {
pte[m].present = 0;
pte[m].writable = 0;
pte[m].user_page = 0;
} else {
addr_t host_addr;
pte[m].present = 1;
- pte[m].writable = 1;
+
+ if (region->host_type == SHDW_REGION_WRITE_HOOK) {
+ pte[m].writable = 0;
+ } else {
+ pte[m].writable = 1;
+ }
+
pte[m].user_page = 1;
pte[m].write_through = 0;
pte[m].cache_disable = 0;
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
+ * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Jack Lange <jarusl@cs.northwestern.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
#ifdef USE_VMM_PAGING_DEBUG
--- /dev/null
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
+ * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Jack Lange <jarusl@cs.northwestern.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#include <palacios/vmm_types.h>
+#include <palacios/vmm_profiler.h>
+#include <palacios/svm_handler.h>
+#include <palacios/vmm_rbtree.h>
+
+
+struct exit_event {
+ uint_t exit_code;
+ uint_t exit_count;
+ uint_t handler_time;
+
+ struct rb_node tree_node;
+};
+
+
+void v3_init_profiler(struct guest_info * info) {
+ info->profiler.total_exits = 0;
+
+ info->profiler.start_time = 0;
+ info->profiler.end_time = 0;
+ info->profiler.guest_pf_cnt = 0;
+
+ info->profiler.root.rb_node = NULL;
+}
+
+
+
+static inline struct exit_event * __insert_event(struct guest_info * info,
+ struct exit_event * evt) {
+ struct rb_node ** p = &(info->profiler.root.rb_node);
+ struct rb_node * parent = NULL;
+ struct exit_event * tmp_evt = NULL;
+
+ while (*p) {
+ parent = *p;
+ tmp_evt = rb_entry(parent, struct exit_event, tree_node);
+
+ if (evt->exit_code < tmp_evt->exit_code) {
+ p = &(*p)->rb_left;
+ } else if (evt->exit_code > tmp_evt->exit_code) {
+ p = &(*p)->rb_right;
+ } else {
+ return tmp_evt;
+ }
+ }
+ rb_link_node(&(evt->tree_node), parent, p);
+
+ return NULL;
+}
+
+static inline struct exit_event * insert_event(struct guest_info * info,
+ struct exit_event * evt) {
+ struct exit_event * ret;
+
+ if ((ret = __insert_event(info, evt))) {
+ return ret;
+ }
+
+ v3_rb_insert_color(&(evt->tree_node), &(info->profiler.root));
+
+ return NULL;
+}
+
+
+static struct exit_event * get_exit(struct guest_info * info, uint_t exit_code) {
+ struct rb_node * n = info->profiler.root.rb_node;
+ struct exit_event * evt = NULL;
+
+ while (n) {
+ evt = rb_entry(n, struct exit_event, tree_node);
+
+ if (exit_code < evt->exit_code) {
+ n = n->rb_left;
+ } else if (exit_code > evt->exit_code) {
+ n = n->rb_right;
+ } else {
+ return evt;
+ }
+ }
+
+ return NULL;
+}
+
+
+static inline struct exit_event * create_exit(uint_t exit_code) {
+ struct exit_event * evt = V3_Malloc(sizeof(struct exit_event));
+
+ evt->exit_code = exit_code;
+ evt->exit_count = 0;
+ evt->handler_time = 0;
+
+ return evt;
+}
+
+void v3_profile_exit(struct guest_info * info, uint_t exit_code) {
+ uint_t time = (info->profiler.end_time - info->profiler.start_time);
+ struct exit_event * evt = get_exit(info, exit_code);
+
+ if (evt == NULL) {
+ evt = create_exit(exit_code);
+ insert_event(info, evt);
+ }
+
+
+
+ evt->handler_time = (evt->handler_time * .99) + (time * .01);
+
+
+ evt->exit_count++;
+
+ info->profiler.total_exits++;
+}
+
+
+void v3_print_profile(struct guest_info * info) {
+ struct exit_event * evt = NULL;
+ struct rb_node * node = v3_rb_first(&(info->profiler.root));
+
+ PrintDebug("GUEST_PF: %u\n", info->profiler.guest_pf_cnt);
+
+ do {
+ evt = rb_entry(node, struct exit_event, tree_node);
+ const char * code_str = vmexit_code_to_str(evt->exit_code);
+
+ PrintDebug("%s:%sCnt=%u,%sTime=%u\n",
+ code_str,
+ (strlen(code_str) > 14) ? "\t" : "\t\t",
+ evt->exit_count,
+ (evt->exit_count >= 100) ? "\t" : "\t\t",
+ evt->handler_time);
+
+ } while ((node = v3_rb_next(node)));
+}
--- /dev/null
+/*
+ Red Black Trees
+ (C) 1999 Andrea Arcangeli <andrea@suse.de>
+ (C) 2002 David Woodhouse <dwmw2@infradead.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ linux/lib/rbtree.c
+*/
+
+#include <palacios/vmm_rbtree.h>
+
+
+static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
+{
+ struct rb_node *right = node->rb_right;
+ struct rb_node *parent = rb_parent(node);
+
+ if ((node->rb_right = right->rb_left))
+ rb_set_parent(right->rb_left, node);
+ right->rb_left = node;
+
+ rb_set_parent(right, parent);
+
+ if (parent)
+ {
+ if (node == parent->rb_left)
+ parent->rb_left = right;
+ else
+ parent->rb_right = right;
+ }
+ else
+ root->rb_node = right;
+ rb_set_parent(node, right);
+}
+
+static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
+{
+ struct rb_node *left = node->rb_left;
+ struct rb_node *parent = rb_parent(node);
+
+ if ((node->rb_left = left->rb_right))
+ rb_set_parent(left->rb_right, node);
+ left->rb_right = node;
+
+ rb_set_parent(left, parent);
+
+ if (parent)
+ {
+ if (node == parent->rb_right)
+ parent->rb_right = left;
+ else
+ parent->rb_left = left;
+ }
+ else
+ root->rb_node = left;
+ rb_set_parent(node, left);
+}
+
+void v3_rb_insert_color(struct rb_node *node, struct rb_root *root)
+{
+ struct rb_node *parent, *gparent;
+
+ while ((parent = rb_parent(node)) && rb_is_red(parent))
+ {
+ gparent = rb_parent(parent);
+
+ if (parent == gparent->rb_left)
+ {
+ {
+ register struct rb_node *uncle = gparent->rb_right;
+ if (uncle && rb_is_red(uncle))
+ {
+ rb_set_black(uncle);
+ rb_set_black(parent);
+ rb_set_red(gparent);
+ node = gparent;
+ continue;
+ }
+ }
+
+ if (parent->rb_right == node)
+ {
+ register struct rb_node *tmp;
+ __rb_rotate_left(parent, root);
+ tmp = parent;
+ parent = node;
+ node = tmp;
+ }
+
+ rb_set_black(parent);
+ rb_set_red(gparent);
+ __rb_rotate_right(gparent, root);
+ } else {
+ {
+ register struct rb_node *uncle = gparent->rb_left;
+ if (uncle && rb_is_red(uncle))
+ {
+ rb_set_black(uncle);
+ rb_set_black(parent);
+ rb_set_red(gparent);
+ node = gparent;
+ continue;
+ }
+ }
+
+ if (parent->rb_left == node)
+ {
+ register struct rb_node *tmp;
+ __rb_rotate_right(parent, root);
+ tmp = parent;
+ parent = node;
+ node = tmp;
+ }
+
+ rb_set_black(parent);
+ rb_set_red(gparent);
+ __rb_rotate_left(gparent, root);
+ }
+ }
+
+ rb_set_black(root->rb_node);
+}
+
+
+static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
+ struct rb_root *root)
+{
+ struct rb_node *other;
+
+ while ((!node || rb_is_black(node)) && node != root->rb_node)
+ {
+ if (parent->rb_left == node)
+ {
+ other = parent->rb_right;
+ if (rb_is_red(other))
+ {
+ rb_set_black(other);
+ rb_set_red(parent);
+ __rb_rotate_left(parent, root);
+ other = parent->rb_right;
+ }
+ if ((!other->rb_left || rb_is_black(other->rb_left)) &&
+ (!other->rb_right || rb_is_black(other->rb_right)))
+ {
+ rb_set_red(other);
+ node = parent;
+ parent = rb_parent(node);
+ }
+ else
+ {
+ if (!other->rb_right || rb_is_black(other->rb_right))
+ {
+ struct rb_node *o_left;
+ if ((o_left = other->rb_left))
+ rb_set_black(o_left);
+ rb_set_red(other);
+ __rb_rotate_right(other, root);
+ other = parent->rb_right;
+ }
+ rb_set_color(other, rb_color(parent));
+ rb_set_black(parent);
+ if (other->rb_right)
+ rb_set_black(other->rb_right);
+ __rb_rotate_left(parent, root);
+ node = root->rb_node;
+ break;
+ }
+ }
+ else
+ {
+ other = parent->rb_left;
+ if (rb_is_red(other))
+ {
+ rb_set_black(other);
+ rb_set_red(parent);
+ __rb_rotate_right(parent, root);
+ other = parent->rb_left;
+ }
+ if ((!other->rb_left || rb_is_black(other->rb_left)) &&
+ (!other->rb_right || rb_is_black(other->rb_right)))
+ {
+ rb_set_red(other);
+ node = parent;
+ parent = rb_parent(node);
+ }
+ else
+ {
+ if (!other->rb_left || rb_is_black(other->rb_left))
+ {
+ register struct rb_node *o_right;
+ if ((o_right = other->rb_right))
+ rb_set_black(o_right);
+ rb_set_red(other);
+ __rb_rotate_left(other, root);
+ other = parent->rb_left;
+ }
+ rb_set_color(other, rb_color(parent));
+ rb_set_black(parent);
+ if (other->rb_left)
+ rb_set_black(other->rb_left);
+ __rb_rotate_right(parent, root);
+ node = root->rb_node;
+ break;
+ }
+ }
+ }
+ if (node)
+ rb_set_black(node);
+}
+
+void v3_rb_erase(struct rb_node *node, struct rb_root *root)
+{
+ struct rb_node *child, *parent;
+ int color;
+
+ if (!node->rb_left)
+ child = node->rb_right;
+ else if (!node->rb_right)
+ child = node->rb_left;
+ else
+ {
+ struct rb_node *old = node, *left;
+
+ node = node->rb_right;
+ while ((left = node->rb_left) != NULL)
+ node = left;
+ child = node->rb_right;
+ parent = rb_parent(node);
+ color = rb_color(node);
+
+ if (child)
+ rb_set_parent(child, parent);
+ if (parent == old) {
+ parent->rb_right = child;
+ parent = node;
+ } else
+ parent->rb_left = child;
+
+ node->rb_parent_color = old->rb_parent_color;
+ node->rb_right = old->rb_right;
+ node->rb_left = old->rb_left;
+
+ if (rb_parent(old))
+ {
+ if (rb_parent(old)->rb_left == old)
+ rb_parent(old)->rb_left = node;
+ else
+ rb_parent(old)->rb_right = node;
+ } else
+ root->rb_node = node;
+
+ rb_set_parent(old->rb_left, node);
+ if (old->rb_right)
+ rb_set_parent(old->rb_right, node);
+ goto color;
+ }
+
+ parent = rb_parent(node);
+ color = rb_color(node);
+
+ if (child)
+ rb_set_parent(child, parent);
+ if (parent)
+ {
+ if (parent->rb_left == node)
+ parent->rb_left = child;
+ else
+ parent->rb_right = child;
+ }
+ else
+ root->rb_node = child;
+
+ color:
+ if (color == RB_BLACK)
+ __rb_erase_color(child, parent, root);
+}
+
+
+/*
+ * This function returns the first node (in sort order) of the tree.
+ */
+struct rb_node *v3_rb_first(struct rb_root *root)
+{
+ struct rb_node *n;
+
+ n = root->rb_node;
+ if (!n)
+ return NULL;
+ while (n->rb_left)
+ n = n->rb_left;
+ return n;
+}
+
+
+struct rb_node *v3_rb_last(struct rb_root *root)
+{
+ struct rb_node *n;
+
+ n = root->rb_node;
+ if (!n)
+ return NULL;
+ while (n->rb_right)
+ n = n->rb_right;
+ return n;
+}
+
+
+struct rb_node *v3_rb_next(struct rb_node *node)
+{
+ struct rb_node *parent;
+
+ /* If we have a right-hand child, go down and then left as far
+ as we can. */
+ if (node->rb_right) {
+ node = node->rb_right;
+ while (node->rb_left)
+ node=node->rb_left;
+ return node;
+ }
+
+ /* No right-hand children. Everything down and left is
+ smaller than us, so any 'next' node must be in the general
+ direction of our parent. Go up the tree; any time the
+ ancestor is a right-hand child of its parent, keep going
+ up. First time it's a left-hand child of its parent, said
+ parent is our 'next' node. */
+ while ((parent = rb_parent(node)) && node == parent->rb_right)
+ node = parent;
+
+ return parent;
+}
+
+
+struct rb_node *v3_rb_prev(struct rb_node *node)
+{
+ struct rb_node *parent;
+
+ /* If we have a left-hand child, go down and then right as far
+ as we can. */
+ if (node->rb_left) {
+ node = node->rb_left;
+ while (node->rb_right)
+ node=node->rb_right;
+ return node;
+ }
+
+ /* No left-hand children. Go up till we find an ancestor which
+ is a right-hand child of its parent */
+ while ((parent = rb_parent(node)) && node == parent->rb_left)
+ node = parent;
+
+ return parent;
+}
+
+
+void v3_rb_replace_node(struct rb_node *victim, struct rb_node *new,
+ struct rb_root *root)
+{
+ struct rb_node *parent = rb_parent(victim);
+
+ /* Set the surrounding nodes to point to the replacement */
+ if (parent) {
+ if (victim == parent->rb_left)
+ parent->rb_left = new;
+ else
+ parent->rb_right = new;
+ } else {
+ root->rb_node = new;
+ }
+ if (victim->rb_left)
+ rb_set_parent(victim->rb_left, new);
+ if (victim->rb_right)
+ rb_set_parent(victim->rb_right, new);
+
+ /* Copy the pointers/colour from the victim to the replacement */
+ *new = *victim;
+}
+
-DEFINE_HASHTABLE_INSERT(add_cr3_to_cache, addr_t, struct hashtable *);
-DEFINE_HASHTABLE_SEARCH(find_cr3_in_cache, addr_t, struct hashtable *);
-DEFINE_HASHTABLE_REMOVE(del_cr3_from_cache, addr_t, struct hashtable *, 0);
+//DEFINE_HASHTABLE_INSERT(add_cr3_to_cache, addr_t, struct hashtable *);
+//DEFINE_HASHTABLE_SEARCH(find_cr3_in_cache, addr_t, struct hashtable *);
+//DEFINE_HASHTABLE_REMOVE(del_cr3_from_cache, addr_t, struct hashtable *, 0);
DEFINE_HASHTABLE_INSERT(add_pte_map, addr_t, addr_t);
DEFINE_HASHTABLE_SEARCH(find_pte_map, addr_t, addr_t);
-DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0);
+//DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0);
return (key1 == key2);
}
-static uint_t cr3_hash_fn(addr_t key) {
- return hash_long(key, 32);
-}
-
-static int cr3_equals(addr_t key1, addr_t key2) {
- return (key1 == key2);
-}
+static addr_t create_new_shadow_pt();
+static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
+static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access);
+#include "vmm_shadow_paging_32.h"
+#include "vmm_shadow_paging_32pae.h"
+#include "vmm_shadow_paging_64.h"
-static int activate_shadow_pt_32(struct guest_info * info);
-static int activate_shadow_pt_32pae(struct guest_info * info);
-static int activate_shadow_pt_64(struct guest_info * info);
-static int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
-static int handle_shadow_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
-static int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
-
-
-static int cache_page_tables_32(struct guest_info * info, addr_t pde);
-static int cache_page_tables_64(struct guest_info * info, addr_t pde);
-
int v3_init_shadow_page_state(struct guest_info * info) {
struct shadow_page_state * state = &(info->shdw_pg_state);
state->guest_cr3 = 0;
state->guest_cr0 = 0;
- state->cr3_cache = create_hashtable(0, &cr3_hash_fn, &cr3_equals);
-
- state->cached_cr3 = 0;
state->cached_ptes = NULL;
-
- return 0;
-}
-
-
-
-
-
-int v3_cache_page_tables(struct guest_info * info, addr_t cr3) {
- switch(v3_get_cpu_mode(info)) {
- case PROTECTED:
- return cache_page_tables_32(info, CR3_TO_PDE32_PA(cr3));
- default:
- return -1;
- }
-}
-
-static int cache_page_tables_32(struct guest_info * info, addr_t pde) {
- struct shadow_page_state * state = &(info->shdw_pg_state);
- addr_t pde_host_addr;
- pde32_t * tmp_pde;
- struct hashtable * pte_cache = NULL;
- int i = 0;
-
- if (pde == state->cached_cr3) {
- return 1;
- }
-
- if (state->cached_ptes != NULL) {
- hashtable_destroy(state->cached_ptes, 0, 0);
- state->cached_ptes = NULL;
- }
-
- state->cached_cr3 = pde;
-
- pte_cache = create_hashtable(0, &pte_hash_fn, &pte_equals);
- state->cached_ptes = pte_cache;
-
- if (guest_pa_to_host_va(info, pde, &pde_host_addr) == -1) {
- PrintError("Could not lookup host address of guest PDE\n");
- return -1;
- }
-
- tmp_pde = (pde32_t *)pde_host_addr;
-
- add_pte_map(pte_cache, pde, pde_host_addr);
-
-
- for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
- if ((tmp_pde[i].present) && (tmp_pde[i].large_page == 0)) {
- addr_t pte_host_addr;
-
- if (guest_pa_to_host_va(info, (addr_t)(BASE_TO_PAGE_ADDR(tmp_pde[i].pt_base_addr)), &pte_host_addr) == -1) {
- PrintError("Could not lookup host address of guest PDE\n");
- return -1;
- }
-
- add_pte_map(pte_cache, (addr_t)(BASE_TO_PAGE_ADDR(tmp_pde[i].pt_base_addr)), pte_host_addr);
- }
- }
-
- return 0;
-
-}
-
-
-static int cache_page_tables_64(struct guest_info * info, addr_t pde) {
- return -1;
-}
-
-
-int v3_replace_shdw_page32(struct guest_info * info, addr_t location, pte32_t * new_page, pte32_t * old_page) {
- pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
- pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(location)]);
-
- if (shadow_pde->large_page == 0) {
- pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr);
- pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(location)]);
-
- //if (shadow_pte->present == 1) {
- *(uint_t *)old_page = *(uint_t *)shadow_pte;
- //}
-
- *(uint_t *)shadow_pte = *(uint_t *)new_page;
-
- } else {
- // currently unhandled
- PrintError("Replacing large shadow pages not implemented\n");
- return -1;
- }
-
- return 0;
-}
-
-
-
-
-
-// We assume that shdw_pg_state.guest_cr3 is pointing to the page tables we want to activate
-// We also assume that the CPU mode has not changed during this page table transition
-static int activate_shadow_pt_32(struct guest_info * info) {
- struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3);
- struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
- int cached = 0;
-
- // Check if shadow page tables are in the cache
- cached = cache_page_tables_32(info, CR3_TO_PDE32_PA(*(addr_t *)guest_cr3));
-
- if (cached == -1) {
- PrintError("CR3 Cache failed\n");
- return -1;
- } else if (cached == 0) {
- addr_t shadow_pt;
-
- PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 );
- delete_page_tables_32(CR3_TO_PDE32_VA(*(uint_t*)shadow_cr3));
-
- shadow_pt = v3_create_new_shadow_pt();
-
- shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PAGE_BASE_ADDR(shadow_pt));
- PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pdt_base_addr );
- } else {
- PrintDebug("Reusing cached shadow Page table\n");
- }
-
- shadow_cr3->pwt = guest_cr3->pwt;
- shadow_cr3->pcd = guest_cr3->pcd;
+ state->cached_cr3 = 0;
return 0;
}
-static int activate_shadow_pt_32pae(struct guest_info * info) {
- PrintError("Activating 32 bit PAE page tables not implemented\n");
- return -1;
-}
-
-static int activate_shadow_pt_64_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
- PrintDebug("CB: Page: %p->%p (host_ptr=%p), Type: %s\n", (void *)vaddr, (void *)page_pa, (void *)page_ptr, v3_page_type_to_str(type));
- return 0;
-}
-
-
-static int activate_shadow_pt_64(struct guest_info * info) {
- // struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
- struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
- int cached = 0;
-
- v3_walk_guest_pt_64(info, info->shdw_pg_state.guest_cr3, activate_shadow_pt_64_cb, NULL);
-
-
-
- return -1;
-
-
- // Check if shadow page tables are in the cache
- cached = cache_page_tables_64(info, CR3_TO_PDE32_PA(*(addr_t *)guest_cr3));
- /*
- if (cached == -1) {
- PrintError("CR3 Cache failed\n");
- return -1;
- } else if (cached == 0) {
- addr_t shadow_pt;
-
- PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 );
- delete_page_tables_32(CR3_TO_PDE32_VA(*(uint_t*)shadow_cr3));
-
- shadow_pt = v3_create_new_shadow_pt();
-
- shadow_cr3->pml4t_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PAGE_BASE_ADDR(shadow_pt));
- PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pml4t_base_addr );
- } else {
- PrintDebug("Reusing cached shadow Page table\n");
- }
-
- shadow_cr3->pwt = guest_cr3->pwt;
- shadow_cr3->pcd = guest_cr3->pcd;
-
- return 0;
- */
-}
// Reads the guest CR3 register
if (info->mem_mode == PHYSICAL_MEM) {
// If paging is not turned on we need to handle the special cases
-
-#ifdef DEBUG_SHADOW_PAGING
- PrintHostPageTree(info->cpu_mode, fault_addr, info->ctrl_regs.cr3);
- PrintGuestPageTree(info, fault_addr, info->shdw_pg_state.guest_cr3);
-#endif
-
return handle_special_page_fault(info, fault_addr, fault_addr, error_code);
} else if (info->mem_mode == VIRTUAL_MEM) {
}
}
-addr_t v3_create_new_shadow_pt() {
+
+int v3_handle_shadow_invlpg(struct guest_info * info) {
+ uchar_t instr[15];
+ struct x86_instr dec_instr;
+ int ret = 0;
+ addr_t vaddr = 0;
+
+ if (info->mem_mode != VIRTUAL_MEM) {
+ // Paging must be turned on...
+ // should handle with some sort of fault I think
+ PrintError("ERROR: INVLPG called in non paged mode\n");
+ return -1;
+ }
+
+ if (info->mem_mode == PHYSICAL_MEM) {
+ ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ } else {
+ ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ }
+
+ if (ret == -1) {
+ PrintError("Could not read instruction into buffer\n");
+ return -1;
+ }
+
+ if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
+ PrintError("Decoding Error\n");
+ return -1;
+ }
+
+ if ((dec_instr.op_type != V3_OP_INVLPG) ||
+ (dec_instr.num_operands != 1) ||
+ (dec_instr.dst_operand.type != MEM_OPERAND)) {
+ PrintError("Decoder Error: Not a valid INVLPG instruction...\n");
+ return -1;
+ }
+
+ vaddr = dec_instr.dst_operand.operand;
+
+ info->rip += dec_instr.instr_length;
+
+ switch (info->cpu_mode) {
+ case PROTECTED:
+ return handle_shadow_invlpg_32(info, vaddr);
+ case PROTECTED_PAE:
+ return handle_shadow_invlpg_32pae(info, vaddr);
+ case LONG:
+ case LONG_32_COMPAT:
+ case LONG_16_COMPAT:
+ return handle_shadow_invlpg_64(info, vaddr);
+ default:
+ PrintError("Invalid CPU mode: %d\n", info->cpu_mode);
+ return -1;
+ }
+}
+
+
+
+
+static addr_t create_new_shadow_pt() {
void * host_pde = 0;
host_pde = V3_VAddr(V3_AllocPages(1));
static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
+ if (info->enable_profiler) {
+ info->profiler.guest_pf_cnt++;
+ }
+
info->ctrl_regs.cr2 = fault_addr;
v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
}
}
-
-
-/*
- * *
- * *
- * * 64 bit Page table fault handlers
- * *
- * *
- */
-
-static int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
- pt_access_status_t guest_access;
- pt_access_status_t shadow_access;
- int ret;
- PrintDebug("64 bit shadow page fault\n");
-
- ret = v3_check_guest_pt_32(info, info->shdw_pg_state.guest_cr3, fault_addr, error_code, &guest_access);
-
- PrintDebug("Guest Access Check: %d (access=%d)\n", ret, guest_access);
-
- ret = v3_check_host_pt_32(info->ctrl_regs.cr3, fault_addr, error_code, &shadow_access);
-
- PrintDebug("Shadow Access Check: %d (access=%d)\n", ret, shadow_access);
-
-
- PrintError("64 bit shadow paging not implemented\n");
- return -1;
-}
-
-
-/*
- * *
- * *
- * * 32 bit PAE Page table fault handlers
- * *
- * *
- */
-
-static int handle_shadow_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
- PrintError("32 bit PAE shadow paging not implemented\n");
- return -1;
-}
-
-
-
-
-
-
-
-/*
- * *
- * *
- * * 32 bit Page table fault handlers
- * *
- * *
- */
-static int handle_large_pagefault_32(struct guest_info * info,
- addr_t fault_addr, pf_error_t error_code,
- pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde);
-
-static int handle_shadow_pte32_fault(struct guest_info * info,
- addr_t fault_addr,
- pf_error_t error_code,
- pte32_t * shadow_pt,
- pte32_t * guest_pt);
-
-
-static int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
- pde32_t * guest_pd = NULL;
- pde32_t * shadow_pd = CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
- addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
- pt_access_status_t guest_pde_access;
- pt_access_status_t shadow_pde_access;
- pde32_t * guest_pde = NULL;
- pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]);
-
- PrintDebug("Shadow page fault handler: %p\n", (void*) fault_addr );
-
- if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
- PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
- return -1;
- }
-
- guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
-
-
- // Check the guest page permissions
- guest_pde_access = v3_can_access_pde32(guest_pd, fault_addr, error_code);
-
- // Check the shadow page permissions
- shadow_pde_access = v3_can_access_pde32(shadow_pd, fault_addr, error_code);
-
- /* Was the page fault caused by the Guest's page tables? */
- if (is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
- PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n",
- *(uint_t *)&guest_pde_access, *(uint_t *)&error_code);
- inject_guest_pf(info, fault_addr, error_code);
- return 0;
- }
-
-
- if (shadow_pde_access == PT_ACCESS_NOT_PRESENT)
- {
- pte32_t * shadow_pt = (pte32_t *)v3_create_new_shadow_pt();
-
- shadow_pde->present = 1;
- shadow_pde->user_page = guest_pde->user_page;
- // shadow_pde->large_page = guest_pde->large_page;
- shadow_pde->large_page = 0;
-
-
- // VMM Specific options
- shadow_pde->write_through = 0;
- shadow_pde->cache_disable = 0;
- shadow_pde->global_page = 0;
- //
-
- guest_pde->accessed = 1;
-
- shadow_pde->pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(shadow_pt));
-
- if (guest_pde->large_page == 0) {
- shadow_pde->writable = guest_pde->writable;
- } else {
- // ?? What if guest pde is dirty a this point?
- ((pde32_4MB_t *)guest_pde)->dirty = 0;
- shadow_pde->writable = 0;
- }
- }
- else if (shadow_pde_access == PT_ACCESS_OK)
- {
- //
- // PTE fault
- //
- pte32_t * shadow_pt = (pte32_t *)V3_VAddr( (void*)(addr_t) BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr) );
-
- if (guest_pde->large_page == 0) {
- pte32_t * guest_pt = NULL;
- if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
- // Machine check the guest
- PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
- v3_raise_exception(info, MC_EXCEPTION);
- return 0;
- }
-
- if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
- PrintError("Error handling Page fault caused by PTE\n");
- return -1;
- }
- } else if (guest_pde->large_page == 1) {
- if (handle_large_pagefault_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) {
- PrintError("Error handling large pagefault\n");
- return -1;
- }
- }
- }
- else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) &&
- (guest_pde->large_page == 1) &&
- (((pde32_4MB_t *)guest_pde)->dirty == 0))
- {
- //
- // Page Directory Entry marked read-only
- // Its a large page and we need to update the dirty bit in the guest
- //
-
- PrintDebug("Large page write error... Setting dirty bit and returning\n");
- ((pde32_4MB_t *)guest_pde)->dirty = 1;
- shadow_pde->writable = guest_pde->writable;
- return 0;
-
- }
- else if (shadow_pde_access == PT_ACCESS_USER_ERROR)
- {
- //
- // Page Directory Entry marked non-user
- //
- PrintDebug("Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n",
- shadow_pde_access, guest_pde_access);
- inject_guest_pf(info, fault_addr, error_code);
- return 0;
- }
- else
- {
- // inject page fault in guest
- inject_guest_pf(info, fault_addr, error_code);
- PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
- PrintDebug("Manual Says to inject page fault into guest\n");
-#ifdef DEBUG_SHADOW_PAGING
- PrintDebug("Guest PDE: (access=%d)\n\t", guest_pde_access);
- PrintPTEntry(PAGE_PD32, fault_addr, guest_pde);
- PrintDebug("Shadow PDE: (access=%d)\n\t", shadow_pde_access);
- PrintPTEntry(PAGE_PD32, fault_addr, shadow_pde);
-#endif
-
- return 0;
- }
-
- PrintDebug("Returning end of PDE function (rip=%p)\n", (void *)(addr_t)(info->rip));
- return 0;
-}
-
-
-
-/* The guest status checks have already been done,
- * only special case shadow checks remain
- */
-static int handle_large_pagefault_32(struct guest_info * info,
- addr_t fault_addr, pf_error_t error_code,
- pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde)
-{
- pt_access_status_t shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
- pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
-
- if (shadow_pte_access == PT_ACCESS_OK) {
- // Inconsistent state...
- // Guest Re-Entry will flush tables and everything should now workd
- PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
- return 0;
- }
-
-
- if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
- // Get the guest physical address of the fault
- addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);
- host_region_type_t host_page_type = get_shadow_addr_type(info, guest_fault_pa);
-
-
- if (host_page_type == HOST_REGION_INVALID) {
- // Inject a machine check in the guest
- PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
- v3_raise_exception(info, MC_EXCEPTION);
- return 0;
- }
-
- if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
- struct shadow_page_state * state = &(info->shdw_pg_state);
- addr_t shadow_pa = get_shadow_addr(info, guest_fault_pa);
-
- shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
-
- shadow_pte->present = 1;
-
- /* We are assuming that the PDE entry has precedence
- * so the Shadow PDE will mirror the guest PDE settings,
- * and we don't have to worry about them here
- * Allow everything
- */
- shadow_pte->user_page = 1;
-
- if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) {
- // Check if the entry is a page table...
- PrintDebug("Marking page as Guest Page Table (large page)\n");
- shadow_pte->vmm_info = PT32_GUEST_PT;
- shadow_pte->writable = 0;
- } else {
- shadow_pte->writable = 1;
- }
-
-
- //set according to VMM policy
- shadow_pte->write_through = 0;
- shadow_pte->cache_disable = 0;
- shadow_pte->global_page = 0;
- //
-
- } else {
- // Handle hooked pages as well as other special pages
- if (handle_special_page_fault(info, fault_addr, guest_fault_pa, error_code) == -1) {
- PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
- return -1;
- }
- }
- } else if ((shadow_pte_access == PT_ACCESS_WRITE_ERROR) &&
- (shadow_pte->vmm_info == PT32_GUEST_PT)) {
-
- struct shadow_page_state * state = &(info->shdw_pg_state);
- PrintDebug("Write operation on Guest PAge Table Page (large page)\n");
- state->cached_cr3 = 0;
- shadow_pte->writable = 1;
-
- } else {
- PrintError("Error in large page fault handler...\n");
- PrintError("This case should have been handled at the top level handler\n");
- return -1;
- }
-
- PrintDebug("Returning from large page fault handler\n");
- return 0;
-}
-
-
-
-
-/*
- * We assume the the guest pte pointer has already been translated to a host virtual address
- */
-static int handle_shadow_pte32_fault(struct guest_info * info,
- addr_t fault_addr,
- pf_error_t error_code,
- pte32_t * shadow_pt,
- pte32_t * guest_pt) {
-
- pt_access_status_t guest_pte_access;
- pt_access_status_t shadow_pte_access;
- pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
- pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
-
-
- // Check the guest page permissions
- guest_pte_access = v3_can_access_pte32(guest_pt, fault_addr, error_code);
-
- // Check the shadow page permissions
- shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
-
-#ifdef DEBUG_SHADOW_PAGING
- PrintDebug("Guest PTE: (access=%d)\n\t", guest_pte_access);
- PrintPTEntry(PAGE_PT32, fault_addr, guest_pte);
- PrintDebug("Shadow PTE: (access=%d)\n\t", shadow_pte_access);
- PrintPTEntry(PAGE_PT32, fault_addr, shadow_pte);
-#endif
-
- /* Was the page fault caused by the Guest's page tables? */
- if (is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
- PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n",
- guest_pte_access, *(uint_t*)&error_code);
- inject_guest_pf(info, fault_addr, error_code);
- return 0;
- }
-
-
- if (shadow_pte_access == PT_ACCESS_OK) {
- // Inconsistent state...
- // Guest Re-Entry will flush page tables and everything should now work
- PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
- return 0;
- }
-
-
- if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
-
- addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
-
- // Page Table Entry Not Present
- PrintDebug("guest_pa =%p\n", (void *)guest_pa);
-
- host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
-
- if (host_page_type == HOST_REGION_INVALID) {
- // Inject a machine check in the guest
- PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
- v3_raise_exception(info, MC_EXCEPTION);
- return 0;
- }
-
- // else...
-
- if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
- struct shadow_page_state * state = &(info->shdw_pg_state);
- addr_t shadow_pa = get_shadow_addr(info, guest_pa);
-
- shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
-
- shadow_pte->present = guest_pte->present;
- shadow_pte->user_page = guest_pte->user_page;
-
- //set according to VMM policy
- shadow_pte->write_through = 0;
- shadow_pte->cache_disable = 0;
- shadow_pte->global_page = 0;
- //
-
- guest_pte->accessed = 1;
-
- if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) {
- // Check if the entry is a page table...
- PrintDebug("Marking page as Guest Page Table %d\n", shadow_pte->writable);
- shadow_pte->vmm_info = PT32_GUEST_PT;
- }
-
- if (guest_pte->dirty == 1) {
- shadow_pte->writable = guest_pte->writable;
- } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
- shadow_pte->writable = guest_pte->writable;
- guest_pte->dirty = 1;
-
- if (shadow_pte->vmm_info == PT32_GUEST_PT) {
- // Well that was quick...
- struct shadow_page_state * state = &(info->shdw_pg_state);
- PrintDebug("Immediate Write operation on Guest PAge Table Page\n");
- state->cached_cr3 = 0;
- }
-
- } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) { // was =
- shadow_pte->writable = 0;
- }
-
-
-
- } else {
- // Page fault handled by hook functions
- if (handle_special_page_fault(info, fault_addr, guest_pa, error_code) == -1) {
- PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
- return -1;
- }
- }
-
- } else if ((shadow_pte_access == PT_ACCESS_WRITE_ERROR) &&
- (guest_pte->dirty == 0)) {
-
- PrintDebug("Shadow PTE Write Error\n");
- guest_pte->dirty = 1;
- shadow_pte->writable = guest_pte->writable;
-
- if (shadow_pte->vmm_info == PT32_GUEST_PT) {
- struct shadow_page_state * state = &(info->shdw_pg_state);
- PrintDebug("Write operation on Guest PAge Table Page\n");
- state->cached_cr3 = 0;
- }
-
- return 0;
-
- } else {
- // Inject page fault into the guest
- inject_guest_pf(info, fault_addr, error_code);
- PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
- PrintError("Manual Says to inject page fault into guest\n");
- return -1;
- }
-
- PrintDebug("Returning end of function\n");
- return 0;
-}
-
-
-
-
-
-
-/* Currently Does not work with Segmentation!!! */
-int v3_handle_shadow_invlpg(struct guest_info * info)
-{
- if (info->mem_mode != VIRTUAL_MEM) {
- // Paging must be turned on...
- // should handle with some sort of fault I think
- PrintError("ERROR: INVLPG called in non paged mode\n");
- return -1;
- }
-
-
- if (info->cpu_mode != PROTECTED) {
- PrintError("Unsupported CPU mode (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
- return -1;
- }
-
- uchar_t instr[15];
- int index = 0;
-
- int ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
- if (ret != 15) {
- PrintError("Could not read instruction 0x%p (ret=%d)\n", (void *)(addr_t)(info->rip), ret);
- return -1;
- }
-
-
- /* Can INVLPG work with Segments?? */
- while (is_prefix_byte(instr[index])) {
- index++;
- }
-
-
- if( (instr[index + 0] != (uchar_t) 0x0f) ||
- (instr[index + 1] != (uchar_t) 0x01) ) {
- PrintError("invalid Instruction Opcode\n");
- PrintTraceMemDump(instr, 15);
- return -1;
- }
-
- addr_t first_operand;
- addr_t second_operand;
- addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
-
- pde32_t * guest_pd = NULL;
-
- if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
- PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
- return -1;
- }
-
- index += 2;
-
- v3_operand_type_t addr_type = decode_operands32(&(info->vm_regs), instr + index, &index, &first_operand, &second_operand, REG32);
-
- if (addr_type != MEM_OPERAND) {
- PrintError("Invalid Operand type\n");
- return -1;
- }
-
- pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
- pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(first_operand)];
- pde32_t * guest_pde;
-
- //PrintDebug("PDE Index=%d\n", PDE32_INDEX(first_operand));
- //PrintDebug("FirstOperand = %x\n", first_operand);
-
- PrintDebug("Invalidating page for %p\n", (void *)first_operand);
-
- guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(first_operand)]);
-
- if (guest_pde->large_page == 1) {
- shadow_pde->present = 0;
- PrintDebug("Invalidating Large Page\n");
- } else
- if (shadow_pde->present == 1) {
- pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr);
- pte32_t * shadow_pte = (pte32_t *) V3_VAddr( (void*) &shadow_pt[PTE32_INDEX(first_operand)] );
-
-#ifdef DEBUG_SHADOW_PAGING
- PrintDebug("Setting not present\n");
- PrintPTEntry(PAGE_PT32, first_operand, shadow_pte);
-#endif
-
- shadow_pte->present = 0;
- }
-
- info->rip += index;
-
- return 0;
-}
-
-
--- /dev/null
+
+static int cache_page_tables_32(struct guest_info * info, addr_t pde) {
+ struct shadow_page_state * state = &(info->shdw_pg_state);
+ addr_t pde_host_addr;
+ pde32_t * tmp_pde;
+ struct hashtable * pte_cache = NULL;
+ int i = 0;
+
+ if (pde == state->cached_cr3) {
+ return 1;
+ }
+
+ if (state->cached_ptes != NULL) {
+ hashtable_destroy(state->cached_ptes, 0, 0);
+ state->cached_ptes = NULL;
+ }
+
+ state->cached_cr3 = pde;
+
+ pte_cache = create_hashtable(0, &pte_hash_fn, &pte_equals);
+ state->cached_ptes = pte_cache;
+
+ if (guest_pa_to_host_va(info, pde, &pde_host_addr) == -1) {
+ PrintError("Could not lookup host address of guest PDE\n");
+ return -1;
+ }
+
+ tmp_pde = (pde32_t *)pde_host_addr;
+
+ add_pte_map(pte_cache, pde, pde_host_addr);
+
+
+ for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
+ if ((tmp_pde[i].present) && (tmp_pde[i].large_page == 0)) {
+ addr_t pte_host_addr;
+
+ if (guest_pa_to_host_va(info, (addr_t)(BASE_TO_PAGE_ADDR(tmp_pde[i].pt_base_addr)), &pte_host_addr) == -1) {
+ PrintError("Could not lookup host address of guest PDE\n");
+ return -1;
+ }
+
+ add_pte_map(pte_cache, (addr_t)(BASE_TO_PAGE_ADDR(tmp_pde[i].pt_base_addr)), pte_host_addr);
+ }
+ }
+
+ return 0;
+
+}
+
+
+
+// We assume that shdw_pg_state.guest_cr3 is pointing to the page tables we want to activate
+// We also assume that the CPU mode has not changed during this page table transition
+static inline int activate_shadow_pt_32(struct guest_info * info) {
+ struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3);
+ struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
+ int cached = 0;
+
+ // Check if shadow page tables are in the cache
+ cached = cache_page_tables_32(info, CR3_TO_PDE32_PA(*(addr_t *)guest_cr3));
+
+ if (cached == -1) {
+ PrintError("CR3 Cache failed\n");
+ return -1;
+ } else if (cached == 0) {
+ addr_t shadow_pt;
+
+ PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 );
+ delete_page_tables_32(CR3_TO_PDE32_VA(*(uint_t*)shadow_cr3));
+
+ shadow_pt = create_new_shadow_pt();
+
+ shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PAGE_BASE_ADDR(shadow_pt));
+ PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pdt_base_addr );
+ } else {
+ PrintDebug("Reusing cached shadow Page table\n");
+ }
+
+ shadow_cr3->pwt = guest_cr3->pwt;
+ shadow_cr3->pcd = guest_cr3->pcd;
+
+ return 0;
+}
+
+/*
+ * *
+ * *
+ * * 32 bit Page table fault handlers
+ * *
+ * *
+ */
+static int handle_large_pagefault_32(struct guest_info * info,
+ addr_t fault_addr, pf_error_t error_code,
+ pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde);
+
+static int handle_shadow_pte32_fault(struct guest_info * info,
+ addr_t fault_addr,
+ pf_error_t error_code,
+ pte32_t * shadow_pt,
+ pte32_t * guest_pt);
+
+
+static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
+ pde32_t * guest_pd = NULL;
+ pde32_t * shadow_pd = CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
+ addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
+ pt_access_status_t guest_pde_access;
+ pt_access_status_t shadow_pde_access;
+ pde32_t * guest_pde = NULL;
+ pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]);
+
+ PrintDebug("Shadow page fault handler: %p\n", (void*) fault_addr );
+
+ if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
+ PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
+ return -1;
+ }
+
+ guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
+
+
+ // Check the guest page permissions
+ guest_pde_access = v3_can_access_pde32(guest_pd, fault_addr, error_code);
+
+ // Check the shadow page permissions
+ shadow_pde_access = v3_can_access_pde32(shadow_pd, fault_addr, error_code);
+
+ /* Was the page fault caused by the Guest's page tables? */
+ if (is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
+ PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n",
+ *(uint_t *)&guest_pde_access, *(uint_t *)&error_code);
+ inject_guest_pf(info, fault_addr, error_code);
+ return 0;
+ }
+
+
+ if (shadow_pde_access == PT_ACCESS_NOT_PRESENT)
+ {
+ pte32_t * shadow_pt = (pte32_t *)create_new_shadow_pt();
+
+ shadow_pde->present = 1;
+ shadow_pde->user_page = guest_pde->user_page;
+ // shadow_pde->large_page = guest_pde->large_page;
+ shadow_pde->large_page = 0;
+
+
+ // VMM Specific options
+ shadow_pde->write_through = 0;
+ shadow_pde->cache_disable = 0;
+ shadow_pde->global_page = 0;
+ //
+
+ guest_pde->accessed = 1;
+
+ shadow_pde->pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(shadow_pt));
+
+ if (guest_pde->large_page == 0) {
+ pte32_t * guest_pt = NULL;
+ shadow_pde->writable = guest_pde->writable;
+
+ if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
+ // Machine check the guest
+ PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
+ v3_raise_exception(info, MC_EXCEPTION);
+ return 0;
+ }
+
+ if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
+ PrintError("Error handling Page fault caused by PTE\n");
+ return -1;
+ }
+ } else {
+ // ?? What if guest pde is dirty a this point?
+ ((pde32_4MB_t *)guest_pde)->dirty = 0;
+ shadow_pde->writable = 0;
+ }
+ }
+ else if (shadow_pde_access == PT_ACCESS_OK)
+ {
+ //
+ // PTE fault
+ //
+ pte32_t * shadow_pt = (pte32_t *)V3_VAddr( (void*)(addr_t) BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr) );
+
+ if (guest_pde->large_page == 0) {
+ pte32_t * guest_pt = NULL;
+
+ if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
+ // Machine check the guest
+ PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
+ v3_raise_exception(info, MC_EXCEPTION);
+ return 0;
+ }
+
+ if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
+ PrintError("Error handling Page fault caused by PTE\n");
+ return -1;
+ }
+ } else if (guest_pde->large_page == 1) {
+ if (handle_large_pagefault_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) {
+ PrintError("Error handling large pagefault\n");
+ return -1;
+ }
+ }
+ }
+ else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) &&
+ (guest_pde->large_page == 1) &&
+ (((pde32_4MB_t *)guest_pde)->dirty == 0))
+ {
+ //
+ // Page Directory Entry marked read-only
+ // Its a large page and we need to update the dirty bit in the guest
+ //
+
+ PrintDebug("Large page write error... Setting dirty bit and returning\n");
+ ((pde32_4MB_t *)guest_pde)->dirty = 1;
+ shadow_pde->writable = guest_pde->writable;
+ return 0;
+
+ }
+ else if (shadow_pde_access == PT_ACCESS_USER_ERROR)
+ {
+ //
+ // Page Directory Entry marked non-user
+ //
+ PrintDebug("Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n",
+ shadow_pde_access, guest_pde_access);
+ inject_guest_pf(info, fault_addr, error_code);
+ return 0;
+ }
+ else
+ {
+ // inject page fault in guest
+ inject_guest_pf(info, fault_addr, error_code);
+ PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
+ PrintDebug("Manual Says to inject page fault into guest\n");
+#ifdef DEBUG_SHADOW_PAGING
+ PrintDebug("Guest PDE: (access=%d)\n\t", guest_pde_access);
+ PrintPTEntry(PAGE_PD32, fault_addr, guest_pde);
+ PrintDebug("Shadow PDE: (access=%d)\n\t", shadow_pde_access);
+ PrintPTEntry(PAGE_PD32, fault_addr, shadow_pde);
+#endif
+
+ return 0;
+ }
+
+ PrintDebug("Returning end of PDE function (rip=%p)\n", (void *)(addr_t)(info->rip));
+ return 0;
+}
+
+
+
+/* The guest status checks have already been done,
+ * only special case shadow checks remain
+ */
+static int handle_large_pagefault_32(struct guest_info * info,
+ addr_t fault_addr, pf_error_t error_code,
+ pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde)
+{
+ pt_access_status_t shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
+ pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
+ addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);
+
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
+
+
+ if ((shdw_reg == NULL) ||
+ (shdw_reg->host_type == SHDW_REGION_INVALID)) {
+ // Inject a machine check in the guest
+ PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
+ v3_raise_exception(info, MC_EXCEPTION);
+ return -1;
+ }
+
+ if (shadow_pte_access == PT_ACCESS_OK) {
+ // Inconsistent state...
+ // Guest Re-Entry will flush tables and everything should now workd
+ PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
+ return 0;
+ }
+
+
+ if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
+ // Get the guest physical address of the fault
+
+ if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
+ (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
+ struct shadow_page_state * state = &(info->shdw_pg_state);
+ addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_fault_pa);
+
+ shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
+
+ shadow_pte->present = 1;
+
+ /* We are assuming that the PDE entry has precedence
+ * so the Shadow PDE will mirror the guest PDE settings,
+ * and we don't have to worry about them here
+ * Allow everything
+ */
+ shadow_pte->user_page = 1;
+
+ if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) {
+ // Check if the entry is a page table...
+ PrintDebug("Marking page as Guest Page Table (large page)\n");
+ shadow_pte->vmm_info = PT32_GUEST_PT;
+ shadow_pte->writable = 0;
+ } else if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
+ shadow_pte->writable = 0;
+ } else {
+ shadow_pte->writable = 1;
+ }
+
+ //set according to VMM policy
+ shadow_pte->write_through = 0;
+ shadow_pte->cache_disable = 0;
+ shadow_pte->global_page = 0;
+ //
+
+ } else {
+ // Handle hooked pages as well as other special pages
+ // if (handle_special_page_fault(info, fault_addr, guest_fault_pa, error_code) == -1) {
+
+ if (v3_handle_mem_full_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
+ PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+ return -1;
+ }
+ }
+ } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
+
+ if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
+
+ if (v3_handle_mem_wr_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
+ PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+ return -1;
+ }
+ } else if (shadow_pte->vmm_info == PT32_GUEST_PT) {
+ struct shadow_page_state * state = &(info->shdw_pg_state);
+ PrintDebug("Write operation on Guest PAge Table Page (large page)\n");
+ state->cached_cr3 = 0;
+ shadow_pte->writable = 1;
+ }
+
+ } else {
+ PrintError("Error in large page fault handler...\n");
+ PrintError("This case should have been handled at the top level handler\n");
+ return -1;
+ }
+
+ PrintDebug("Returning from large page fault handler\n");
+ return 0;
+}
+
+
+
+
+/*
+ * We assume the the guest pte pointer has already been translated to a host virtual address
+ */
+static int handle_shadow_pte32_fault(struct guest_info * info,
+ addr_t fault_addr,
+ pf_error_t error_code,
+ pte32_t * shadow_pt,
+ pte32_t * guest_pt) {
+
+ pt_access_status_t guest_pte_access;
+ pt_access_status_t shadow_pte_access;
+ pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
+ pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
+ addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
+
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_pa);
+
+ if ((shdw_reg == NULL) ||
+ (shdw_reg->host_type == SHDW_REGION_INVALID)) {
+ // Inject a machine check in the guest
+ PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
+ v3_raise_exception(info, MC_EXCEPTION);
+ return 0;
+ }
+
+ // Check the guest page permissions
+ guest_pte_access = v3_can_access_pte32(guest_pt, fault_addr, error_code);
+
+ // Check the shadow page permissions
+ shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
+
+#ifdef DEBUG_SHADOW_PAGING
+ PrintDebug("Guest PTE: (access=%d)\n\t", guest_pte_access);
+ PrintPTEntry(PAGE_PT32, fault_addr, guest_pte);
+ PrintDebug("Shadow PTE: (access=%d)\n\t", shadow_pte_access);
+ PrintPTEntry(PAGE_PT32, fault_addr, shadow_pte);
+#endif
+
+ /* Was the page fault caused by the Guest's page tables? */
+ if (is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
+ PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n",
+ guest_pte_access, *(uint_t*)&error_code);
+ inject_guest_pf(info, fault_addr, error_code);
+ return 0;
+ }
+
+
+ if (shadow_pte_access == PT_ACCESS_OK) {
+ // Inconsistent state...
+ // Guest Re-Entry will flush page tables and everything should now work
+ PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
+ return 0;
+ }
+
+
+ if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
+ // Page Table Entry Not Present
+ PrintDebug("guest_pa =%p\n", (void *)guest_pa);
+
+ if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
+ (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
+ struct shadow_page_state * state = &(info->shdw_pg_state);
+ addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_pa);
+
+ shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
+
+ shadow_pte->present = guest_pte->present;
+ shadow_pte->user_page = guest_pte->user_page;
+
+ //set according to VMM policy
+ shadow_pte->write_through = 0;
+ shadow_pte->cache_disable = 0;
+ shadow_pte->global_page = 0;
+ //
+
+ guest_pte->accessed = 1;
+
+ if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) {
+ // Check if the entry is a page table...
+ PrintDebug("Marking page as Guest Page Table %d\n", shadow_pte->writable);
+ shadow_pte->vmm_info = PT32_GUEST_PT;
+ }
+
+ if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
+ shadow_pte->writable = 0;
+ } else if (guest_pte->dirty == 1) {
+ shadow_pte->writable = guest_pte->writable;
+ } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
+ shadow_pte->writable = guest_pte->writable;
+ guest_pte->dirty = 1;
+
+ if (shadow_pte->vmm_info == PT32_GUEST_PT) {
+ // Well that was quick...
+ struct shadow_page_state * state = &(info->shdw_pg_state);
+ PrintDebug("Immediate Write operation on Guest PAge Table Page\n");
+ state->cached_cr3 = 0;
+ }
+
+ } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) { // was =
+ shadow_pte->writable = 0;
+ }
+
+ } else {
+ // Page fault handled by hook functions
+
+ if (v3_handle_mem_full_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
+ PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
+ return -1;
+ }
+ }
+ } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
+ guest_pte->dirty = 1;
+
+ if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
+ if (v3_handle_mem_wr_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
+ PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
+ return -1;
+ }
+ } else {
+ PrintDebug("Shadow PTE Write Error\n");
+ shadow_pte->writable = guest_pte->writable;
+ }
+
+ if (shadow_pte->vmm_info == PT32_GUEST_PT) {
+ struct shadow_page_state * state = &(info->shdw_pg_state);
+ PrintDebug("Write operation on Guest PAge Table Page\n");
+ state->cached_cr3 = 0;
+ }
+
+ return 0;
+
+ } else {
+ // Inject page fault into the guest
+ inject_guest_pf(info, fault_addr, error_code);
+ PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
+ PrintError("Manual Says to inject page fault into guest\n");
+ return -1;
+ }
+
+ PrintDebug("Returning end of function\n");
+ return 0;
+}
+
+
+
+/* If we start to optimize we should look up the guest pages in the cache... */
+static inline int handle_shadow_invlpg_32(struct guest_info * info, addr_t vaddr) {
+ pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
+ pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(vaddr)];
+
+ addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
+ pde32_t * guest_pd = NULL;
+ pde32_t * guest_pde;
+
+ if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
+ PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
+ return -1;
+ }
+
+ guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(vaddr)]);
+
+ if (guest_pde->large_page == 1) {
+ shadow_pde->present = 0;
+ PrintDebug("Invalidating Large Page\n");
+ } else if (shadow_pde->present == 1) {
+ pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR_4KB(shadow_pde->pt_base_addr);
+ pte32_t * shadow_pte = (pte32_t *) V3_VAddr( (void*) &shadow_pt[PTE32_INDEX(vaddr)] );
+
+ PrintDebug("Setting not present\n");
+
+ shadow_pte->present = 0;
+ }
+ return 0;
+}
--- /dev/null
+
+
+
+static inline int activate_shadow_pt_32pae(struct guest_info * info) {
+ PrintError("Activating 32 bit PAE page tables not implemented\n");
+ return -1;
+}
+
+
+
+
+
+
+/*
+ * *
+ * *
+ * * 32 bit PAE Page table fault handlers
+ * *
+ * *
+ */
+
+static inline int handle_shadow_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
+ PrintError("32 bit PAE shadow paging not implemented\n");
+ return -1;
+}
+
+
+static inline int handle_shadow_invlpg_32pae(struct guest_info * info, addr_t vaddr) {
+ PrintError("32 bit PAE shadow paging not implemented\n");
+ return -1;
+}
+
+
+
+
--- /dev/null
+
+static inline int activate_shadow_pt_64(struct guest_info * info) {
+ // struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
+
+ return -1;
+}
+
+
+
+
+
+
+/*
+ * *
+ * *
+ * * 64 bit Page table fault handlers
+ * *
+ * *
+ */
+
+static inline int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
+ pt_access_status_t guest_access;
+ pt_access_status_t shadow_access;
+ int ret;
+ PrintDebug("64 bit shadow page fault\n");
+
+ ret = v3_check_guest_pt_32(info, info->shdw_pg_state.guest_cr3, fault_addr, error_code, &guest_access);
+
+ PrintDebug("Guest Access Check: %d (access=%d)\n", ret, guest_access);
+
+ ret = v3_check_host_pt_32(info->ctrl_regs.cr3, fault_addr, error_code, &shadow_access);
+
+ PrintDebug("Shadow Access Check: %d (access=%d)\n", ret, shadow_access);
+
+
+ PrintError("64 bit shadow paging not implemented\n");
+ return -1;
+}
+
+
+static inline int handle_shadow_invlpg_64(struct guest_info * info, addr_t vaddr) {
+ PrintError("64 bit shadow paging not implemented\n");
+ return -1;
+}
#include <xed/xed-interface.h>
#include "vm_guest.h"
#include "test.h"
+
#else
+
#include <palacios/vmm_decoder.h>
#include <palacios/vmm_xed.h>
#include <xed/xed-interface.h>
#include <palacios/vm_guest.h>
#include <palacios/vmm.h>
-
-
#endif
-static xed_state_t decoder_state;
+
+static uint_t tables_inited = 0;
+
#define GPR_REGISTER 0
#define SEGMENT_REGISTER 1
switch (length) { \
case 1: \
mask = mask_1; \
+ break; \
case 2: \
mask = mask_2; \
+ break; \
case 4: \
mask = mask_4; \
+ break; \
case 8: \
mask = mask_8; \
+ break; \
} \
val & mask;}) \
-// This returns a pointer to a V3_OPCODE_[*] array defined in vmm_decoder.h
-static int get_opcode(xed_iform_enum_t iform, addr_t * opcode);
+
+static v3_op_type_t get_opcode(xed_iform_enum_t iform);
static int xed_reg_to_v3_reg(struct guest_info * info, xed_reg_enum_t xed_reg, addr_t * v3_reg, uint_t * reg_len);
static int get_memory_operand(struct guest_info * info, xed_decoded_inst_t * xed_instr, uint_t index, struct x86_operand * operand);
-int v3_init_decoder() {
- xed_tables_init();
- xed_state_zero(&decoder_state);
+int v3_init_decoder(struct guest_info * info) {
+ // Global library initialization, only do it once
+ if (tables_inited == 0) {
+ xed_tables_init();
+ tables_inited = 1;
+ }
+
+ xed_state_t * decoder_state = (xed_state_t *)V3_Malloc(sizeof(xed_state_t));
+ xed_state_zero(decoder_state);
+
+ info->decoder_state = decoder_state;
+
return 0;
}
xed_error_enum_t xed_error;
- if (set_decoder_mode(info, &decoder_state) == -1) {
+ if (set_decoder_mode(info, info->decoder_state) == -1) {
PrintError("Could not set decoder mode\n");
return -1;
}
- xed_decoded_inst_zero_set_mode(&xed_instr, &decoder_state);
+ xed_decoded_inst_zero_set_mode(&xed_instr, info->decoder_state);
xed_error = xed_decode(&xed_instr,
REINTERPRET_CAST(const xed_uint8_t *, instr_ptr),
}
+static int decode_string_op(struct guest_info * info,
+ xed_decoded_inst_t * xed_instr, const xed_inst_t * xi,
+ struct x86_instr * instr) {
+
+ PrintDebug("String operation\n");
+
+ if (instr->op_type == V3_OP_MOVS) {
+ instr->num_operands = 2;
+
+ if (get_memory_operand(info, xed_instr, 0, &(instr->dst_operand)) == -1) {
+ PrintError("Could not get Destination memory operand\n");
+ return -1;
+ }
+
+ if (get_memory_operand(info, xed_instr, 1, &(instr->src_operand)) == -1) {
+ PrintError("Could not get Source memory operand\n");
+ return -1;
+ }
+
+ if (instr->prefixes.rep == 1) {
+ addr_t reg_addr = 0;
+ uint_t reg_length = 0;
+
+ xed_reg_to_v3_reg(info, xed_decoded_inst_get_reg(xed_instr, XED_OPERAND_REG0), ®_addr, ®_length);
+ instr->str_op_length = MASK(*(addr_t *)reg_addr, reg_length);
+ } else {
+ instr->str_op_length = 1;
+ }
+
+ }
+
+ return 0;
+}
+
+
int v3_decode(struct guest_info * info, addr_t instr_ptr, struct x86_instr * instr) {
xed_decoded_inst_t xed_instr;
xed_error_enum_t xed_error;
+ v3_get_prefixes((uchar_t *)instr_ptr, &(instr->prefixes));
- if (set_decoder_mode(info, &decoder_state) == -1) {
+ if (set_decoder_mode(info, info->decoder_state) == -1) {
PrintError("Could not set decoder mode\n");
return -1;
}
-
-
- xed_decoded_inst_zero_set_mode(&xed_instr, &decoder_state);
+ xed_decoded_inst_zero_set_mode(&xed_instr, info->decoder_state);
xed_error = xed_decode(&xed_instr,
REINTERPRET_CAST(const xed_uint8_t *, instr_ptr),
const xed_inst_t * xi = xed_decoded_inst_inst(&xed_instr);
instr->instr_length = xed_decoded_inst_get_length(&xed_instr);
- instr->num_operands = xed_decoded_inst_noperands(&xed_instr);
+
xed_iform_enum_t iform = xed_decoded_inst_get_iform_enum(&xed_instr);
+#ifdef DEBUG_XED
+ xed_iclass_enum_t iclass = xed_decoded_inst_get_iclass(&xed_instr);
+
+ PrintDebug("iform=%s, iclass=%s\n", xed_iform_enum_t2str(iform), xed_iclass_enum_t2str(iclass));
+#endif
- PrintDebug("iform=%s\n", xed_iform_enum_t2str(iform));
+ if ((instr->op_type = get_opcode(iform)) == V3_INVALID_OP) {
+ PrintError("Could not get opcode. (iform=%s)\n", xed_iform_enum_t2str(iform));
+ return -1;
+ }
+
+
+ // We special case the string operations...
+ if (xed_decoded_inst_get_category(&xed_instr) == XED_CATEGORY_STRINGOP) {
+ instr->is_str_op = 1;
+ return decode_string_op(info, &xed_instr, xi, instr);
+ } else {
+ instr->is_str_op = 0;
+ instr->str_op_length = 0;
+ }
+
+
+ instr->num_operands = xed_decoded_inst_noperands(&xed_instr);
if (instr->num_operands > 3) {
PrintDebug("Special Case Not Handled\n");
-
-
- if (get_opcode(iform, &(instr->opcode)) == -1) {
- PrintError("Could not get opcode. (iform=%s)\n", xed_iform_enum_t2str(iform));
- return -1;
- }
-
-
-
-
//PrintDebug("Number of operands: %d\n", instr->num_operands);
//PrintDebug("INSTR length: %d\n", instr->instr_length);
xed_reg,
&(v3_op->operand),
&(v3_op->size));
-
+
if (v3_reg_type == -1) {
PrintError("First operand is an Unhandled Operand: %s\n", xed_reg_enum_t2str(xed_reg));
v3_op->type = INVALID_OPERAND;
case XED_OPERAND_MEM0:
{
- /*
- struct x86_operand * operand = &(instr->dst_operand);
-
- if (xed_decoded_inst_mem_read(&xed_instr, 0)) {
- operand = &(instr->src_operand);
- } else if (xed_decoded_inst_mem_written(&xed_instr, 0)) {
- operand = &(instr->dst_operand);
- }
- */
-
if (get_memory_operand(info, &xed_instr, 0, v3_op) == -1) {
PrintError("Could not get first memory operand\n");
return -1;
case XED_OPERAND_MEM0:
{
-
- /*
- if (xed_decoded_inst_mem_read(&xed_instr, 0)) {
- v3_op = &(instr->src_operand);
- } else if (xed_decoded_inst_mem_written(&xed_instr, 0)) {
- v3_op = &(instr->dst_operand);
- }
- */
-
if (get_memory_operand(info, &xed_instr, 0, v3_op) == -1) {
PrintError("Could not get first memory operand\n");
return -1;
if (disp_bits) {
xed_int64_t xed_disp = xed_decoded_inst_get_memory_displacement(xed_instr, op_index);
- mem_op.displacement_size = disp_bits / 8;
+ mem_op.displacement_size = disp_bits;
mem_op.displacement = xed_disp;
-
}
operand->type = MEM_OPERAND;
- PrintDebug("Struct: Seg=%x, base=%x, index=%x, scale=%x, displacement=%x\n",
- mem_op.segment, mem_op.base, mem_op.index, mem_op.scale, mem_op.displacement);
+ PrintDebug("Struct: Seg=%p, base=%p, index=%p, scale=%p, displacement=%p (size=%d)\n",
+ (void *)mem_op.segment, (void*)mem_op.base, (void *)mem_op.index,
+ (void *)mem_op.scale, (void *)(addr_t)mem_op.displacement, mem_op.displacement_size);
+
+ PrintDebug("operand size: %d\n", operand->size);
- seg = mem_op.segment;
+ seg = MASK(mem_op.segment, mem_op.segment_size);
base = MASK(mem_op.base, mem_op.base_size);
index = MASK(mem_op.index, mem_op.index_size);
scale = mem_op.scale;
displacement = MASK(mem_op.displacement, mem_op.displacement_size);
- PrintDebug("Seg=%x, base=%x, index=%x, scale=%x, displacement=%x\n", seg, base, index, scale, displacement);
+ PrintDebug("Seg=%p, base=%p, index=%p, scale=%p, displacement=%p\n",
+ (void *)seg, (void *)base, (void *)index, (void *)scale, (void *)(addr_t)displacement);
operand->operand = seg + base + (scale * index) + displacement;
return 0;
return CTRL_REGISTER;
case XED_REG_CR4:
*v3_reg = (addr_t)&(info->ctrl_regs.cr4);
- *reg_len = 4;
+ *reg_len = 4;
return CTRL_REGISTER;
case XED_REG_CR8:
*v3_reg = (addr_t)&(info->ctrl_regs.cr8);
-static int get_opcode(xed_iform_enum_t iform, addr_t * opcode) {
+static v3_op_type_t get_opcode(xed_iform_enum_t iform) {
switch (iform) {
+
+ /* Control Instructions */
+
case XED_IFORM_MOV_CR_GPR64_CR:
case XED_IFORM_MOV_CR_GPR32_CR:
- *opcode = (addr_t)&V3_OPCODE_MOVCR2;
- break;
+ return V3_OP_MOVCR2;
case XED_IFORM_MOV_CR_CR_GPR64:
case XED_IFORM_MOV_CR_CR_GPR32:
- *opcode = (addr_t)&V3_OPCODE_MOV2CR;
- break;
+ return V3_OP_MOV2CR;
case XED_IFORM_SMSW_GPRv:
- *opcode = (addr_t)&V3_OPCODE_SMSW;
- break;
+ return V3_OP_SMSW;
case XED_IFORM_LMSW_GPR16:
- *opcode = (addr_t)&V3_OPCODE_LMSW;
- break;
+ return V3_OP_LMSW;
case XED_IFORM_CLTS:
- *opcode = (addr_t)&V3_OPCODE_CLTS;
- break;
+ return V3_OP_CLTS;
+
+ case XED_IFORM_INVLPG_MEMb:
+ return V3_OP_INVLPG;
+
+
+ /* Data Instructions */
+
+ case XED_IFORM_ADC_MEMv_GPRv:
+ case XED_IFORM_ADC_MEMv_IMM:
+ case XED_IFORM_ADC_MEMb_GPR8:
+ case XED_IFORM_ADC_MEMb_IMM:
+ return V3_OP_ADC;
+
+ case XED_IFORM_ADD_MEMv_GPRv:
+ case XED_IFORM_ADD_MEMb_IMM:
+ case XED_IFORM_ADD_MEMb_GPR8:
+ case XED_IFORM_ADD_MEMv_IMM:
+ return V3_OP_ADD;
+
+ case XED_IFORM_AND_MEMv_IMM:
+ case XED_IFORM_AND_MEMb_GPR8:
+ case XED_IFORM_AND_MEMv_GPRv:
+ case XED_IFORM_AND_MEMb_IMM:
+ return V3_OP_AND;
+
+ case XED_IFORM_SUB_MEMv_IMM:
+ case XED_IFORM_SUB_MEMb_GPR8:
+ case XED_IFORM_SUB_MEMb_IMM:
+ case XED_IFORM_SUB_MEMv_GPRv:
+ return V3_OP_SUB;
+
+ case XED_IFORM_MOV_MEMv_GPRv:
+ case XED_IFORM_MOV_MEMb_GPR8:
+ case XED_IFORM_MOV_MEMb_AL:
+ case XED_IFORM_MOV_MEMv_IMM:
+ case XED_IFORM_MOV_MEMb_IMM:
+ return V3_OP_MOV;
+
+ case XED_IFORM_DEC_MEMv:
+ case XED_IFORM_DEC_MEMb:
+ return V3_OP_DEC;
+ case XED_IFORM_INC_MEMb:
+ case XED_IFORM_INC_MEMv:
+ return V3_OP_INC;
+ case XED_IFORM_OR_MEMv_IMM:
+ case XED_IFORM_OR_MEMb_IMM:
+ case XED_IFORM_OR_MEMv_GPRv:
+ case XED_IFORM_OR_MEMb_GPR8:
+ return V3_OP_OR;
+
+ case XED_IFORM_XOR_MEMv_GPRv:
+ case XED_IFORM_XOR_MEMb_IMM:
+ case XED_IFORM_XOR_MEMb_GPR8:
+ case XED_IFORM_XOR_MEMv_IMM:
+ return V3_OP_XOR;
+
+ case XED_IFORM_NEG_MEMb:
+ case XED_IFORM_NEG_MEMv:
+ return V3_OP_NEG;
+
+ case XED_IFORM_NOT_MEMv:
+ case XED_IFORM_NOT_MEMb:
+ return V3_OP_NOT;
+
+ case XED_IFORM_XCHG_MEMv_GPRv:
+ case XED_IFORM_XCHG_MEMb_GPR8:
+ return V3_OP_XCHG;
+
+ case XED_IFORM_SETB_MEMb:
+ return V3_OP_SETB;
+
+ case XED_IFORM_SETBE_MEMb:
+ return V3_OP_SETBE;
+
+ case XED_IFORM_SETL_MEMb:
+ return V3_OP_SETL;
+
+ case XED_IFORM_SETLE_MEMb:
+ return V3_OP_SETLE;
+
+ case XED_IFORM_SETNB_MEMb:
+ return V3_OP_SETNB;
+
+ case XED_IFORM_SETNBE_MEMb:
+ return V3_OP_SETNBE;
+
+ case XED_IFORM_SETNL_MEMb:
+ return V3_OP_SETNL;
+
+ case XED_IFORM_SETNLE_MEMb:
+ return V3_OP_SETNLE;
+
+ case XED_IFORM_SETNO_MEMb:
+ return V3_OP_SETNO;
+
+ case XED_IFORM_SETNP_MEMb:
+ return V3_OP_SETNP;
+
+ case XED_IFORM_SETNS_MEMb:
+ return V3_OP_SETNS;
+
+ case XED_IFORM_SETNZ_MEMb:
+ return V3_OP_SETNZ;
+
+ case XED_IFORM_SETO_MEMb:
+ return V3_OP_SETO;
+
+ case XED_IFORM_SETP_MEMb:
+ return V3_OP_SETP;
+
+ case XED_IFORM_SETS_MEMb:
+ return V3_OP_SETS;
+
+ case XED_IFORM_SETZ_MEMb:
+ return V3_OP_SETZ;
+
+
+ case XED_IFORM_MOVSB:
+ case XED_IFORM_MOVSW:
+ case XED_IFORM_MOVSD:
+ case XED_IFORM_MOVSQ:
+ return V3_OP_MOVS;
default:
- *opcode = 0;
- return -1;
+ return V3_INVALID_OP;
}
-
- return 0;
}