Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


significant refactoring of RTM emulation code
Kyle Hale [Tue, 25 Mar 2014 00:09:37 +0000 (19:09 -0500)]
This is the first cut at a refactor of the transactional memory
extension. Most changes have been to reduce large function sizes, and
to reduce footprint in changes to Palacios core files. There are
several bug fixes as well, and this gets us to the point where we
can run thousand-transaction benchmarks on single-core.

At this point our main limitations are yet-to-be implemented
architectural features (coming soon) and limitations in the
quix86 decoder.

Signed-off-by: Maciek Swiech <dotpyfe@u.northwestern.edu>

palacios/include/extensions/tm_cache.h [new file with mode: 0644]
palacios/include/extensions/tm_util.h [new file with mode: 0644]
palacios/include/extensions/trans_mem.h [new file with mode: 0644]
palacios/src/extensions/Kconfig
palacios/src/extensions/Makefile
palacios/src/extensions/ext_trans_mem.c [new file with mode: 0644]
palacios/src/extensions/tm_util.c [new file with mode: 0644]
palacios/src/palacios/mmu/vmm_shdw_pg_tlb_64.h
palacios/src/palacios/svm.c
palacios/src/palacios/svm_handler.c
palacios/src/palacios/vmm_quix86.c

diff --git a/palacios/include/extensions/tm_cache.h b/palacios/include/extensions/tm_cache.h
new file mode 100644 (file)
index 0000000..79d7c3c
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National 
+ * Science Foundation and the Department of Energy.  
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico.  You can find out more at 
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2012, NWU EECS 441 Transactional Memory Team
+ * Copyright (c) 2012, The V3VEE Project <http://www.v3vee.org> 
+ * All rights reserved.
+ *
+ * Author: Maciek Swiech <dotpyfe@u.northwestern.edu>
+ *         Kyle Hale <kh@u.northwestern.edu>
+ *
+ * This is free software.  You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ *
+ */
+
+#ifndef __TM_CACHE_H__
+#define __TM_CACHE_H__
+
+#ifdef __V3VEE__
+
+enum TM_ERR_E {
+    TM_OK  = 0,
+    TM_WAR = 1,
+    TM_WAW = 2,
+    TM_RAW = 3
+};
+
+enum TM_OP {
+    TM_READ  = 0,
+    TM_WRITE = 1,
+    TM_BEGIN = 2,
+    TM_ABORT = 3,
+    TM_END   = 4
+};
+
+// one record in the redo log linked list
+struct rec {
+    enum TM_OP op;
+    addr_t vcorenum;
+    addr_t physaddr;
+    addr_t datalen;
+    struct list_head rec_node;
+};
+
+struct flag_bits {
+    uint8_t m : 1; // modified
+    uint8_t e : 1; // exclusive
+    uint8_t s : 1; // shared
+    uint8_t i : 1; // exclusive
+    uint8_t ws : 1;
+    uint8_t rs : 1;
+} __attribute__((packed));
+
+struct cache_line {
+    uint64_t tag;
+    struct flag_bits * flag;;
+};
+
+struct cache_spec {
+    uint64_t line_size;         // line size in bytes
+    uint64_t size;              // cache size in kb
+    uint64_t num_lines;
+    uint64_t associativity;
+    enum cache_policy policy;
+};
+
+// cache hardware we are emulating
+struct cache_box {
+    int (*init) (struct cache_spec * spec, struct cache_box * self);
+    struct cache_spec * spec;
+    struct cache_line ** cache_table;
+
+    enum TM_ERR_E (*read)  (struct guest_info *core, addr_t hva, addr_t len, struct cache_box * self);
+    enum TM_ERR_E (*write) (struct guest_info *core, addr_t hva, addr_t len, struct cache_box * self);
+    uint64_t (*invalidate) (struct guest_info *core, addr_t hva, addr_t len, struct cache_box * self);
+};
+
+// redo logger
+// TODO: dont need this anymore?
+/*
+struct logger {
+    // emulated cache
+    struct cache_box *model;
+    lock_t   global_lock;
+    uint64_t loglen;
+    uint64_t num_trans_active;
+
+    enum TM_ERR_E (*read) (struct guest_info *core, addr_t hva, addr_t len);
+    enum TM_ERR_E (*write) (struct guest_info *core, addr_t hva, addr_t len);
+
+    log_rec  *head;
+};
+*/
+/*
+ * error = handle_start_tx(logger,vcorenum);
+ * error = handle_abort(logger,vcorenum);
+ * error = handle_commit(logger,vcorenum);
+ *
+ * should_abort = handle_write(logger, vcorenum, physaddr, data, datalen);
+ * should_abort = handle_read(logger, vcorenum, physaddr, *data, datalen);
+ *
+ */
+
+/* FN SKEL
+ *
+ * handle_start_tx(logger,vcorenum) {
+ *  logger.record(BEGIN,vcorenum)
+ * }
+ *
+ * handle_abort(logger,vcorenum) {
+ *  logger.record(ABORT,vcorenum)
+ * }
+ *
+ * handle_commit(logger,vcorenum) {
+ *  logger.record(END,vcorenum)
+ *  logger.commit(vcorenum)
+ * }
+ *
+ * record(head,type,vcorenum,physaddr,datalen,data) {
+ *  new rec = {type, vcorenum, physaddr, datalen, data,head}
+ *  head = new rec
+ *  err = conflict_check(head,vcorenum)
+ * }
+ *
+ * read(logger,core,addr,*data,datalen) {
+ *  logger.record(READ,vcorenum)
+ *
+ *  // hmm, we want the most recent entry, should we keep track of tail as
+ *  // well?? or just keep a seperate log of current values?
+ *  cur = head
+ *  while cur {
+ *    if cur->addr == addr
+ *      data = cur->data
+ *      return
+ *    cur = cur->next
+ *  }
+ *
+ *  read_mem(data)
+ *  return
+ * }
+ *
+ * write(logger,core,addr,data,datalen) {
+ *  logger.record(WRITE,vcorenum,data)
+ * }
+ *
+ */
+
+#endif // ! __V3VEE__
+
+#endif
diff --git a/palacios/include/extensions/tm_util.h b/palacios/include/extensions/tm_util.h
new file mode 100644 (file)
index 0000000..3af8cfe
--- /dev/null
@@ -0,0 +1,87 @@
+/* 
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National 
+ * Science Foundation and the Department of Energy.  
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico.  You can find out more at 
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
+ * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
+ * All rights reserved.
+ *
+ * Author:  Maciek Swiech <dotpyfe@u.northwestern.edu>
+ *          Marcel Flores <marcel-flores@u.northwestern.edu>
+ *          Zachary Bischof <zbischof@u.northwestern.edu>
+ *          Kyle C. Hale <kh@u.northwestern.edu>
+ *
+ * This is free software.  You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#ifndef __TM_UTIL_H
+#define __TM_UTIL_H
+
+// new printing macros
+// used like TM_ERR(core, ABORT, "couldnt wangle the dangle");
+
+#define TM_ERR(core, label, msg, ...) \
+    do {                              \
+        typeof (core) _core = (core);  \
+        PrintError(_core->vm_info, _core, "TM %10s | " msg , #label, ##__VA_ARGS__); \
+    } while (0);
+
+#define TM_DBG(core, label, msg, ...) \
+    do {                              \
+        typeof (core) _core = (core);  \
+        PrintDebug(_core->vm_info, _core, "TM %10s | " msg , #label, ##__VA_ARGS__); \
+    } while (0);
+
+struct mem_op {
+    addr_t   guest_addr;
+    uint64_t data;
+    int      current;
+
+    struct list_head op_node;
+};
+
+void v3_clear_tm_lists(struct v3_trans_mem * tm);
+
+// note memory location touched in the list, avoids duplicate creation
+int add_mem_op_to_list(struct list_head * list, addr_t guest_addr);
+
+// searches for address in the list, returns pointer to elt if found, null
+struct mem_op * list_contains_guest_addr(struct list_head * list, addr_t guest_addr);
+
+// checks for current = 0 in list, updates to new value from staging page
+int update_list(struct v3_trans_mem * tm, struct list_head * list);
+
+// writes value to staging page, sets current = 0
+int stage_entry(struct v3_trans_mem * tm, struct list_head * list, addr_t guest_addr);
+
+// adds entry to list if it doesnt exist, used in copying
+int copy_add_entry(struct list_head * list, addr_t guest_addr, uint64_t data);
+
+// if TM block succesfully finishes, commits the list
+int commit_list(struct guest_info * core, struct v3_trans_mem * tm);
+
+// copy other lists to core's global lists
+int v3_copy_lists(struct guest_info *core);
+
+// set TM_MODE to TM_ON
+int v3_set_tm(struct v3_trans_mem * tm);
+
+// set TM_MODE to TM_OFF, clear data structures
+int v3_clr_tm(struct v3_trans_mem * tm);
+
+// clear the vtlb on a core
+int v3_clr_vtlb(struct guest_info *core);
+
+// set TM_STATE to TM_ABORT
+int v3_tm_set_abrt(struct v3_trans_mem * tm);
+
+// free the staging page of the core
+int v3_free_staging_page(struct v3_trans_mem * tm);
+
+#endif
diff --git a/palacios/include/extensions/trans_mem.h b/palacios/include/extensions/trans_mem.h
new file mode 100644 (file)
index 0000000..f8d980e
--- /dev/null
@@ -0,0 +1,271 @@
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National 
+ * Science Foundation and the Department of Energy.  
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico.  You can find out more at 
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2012, NWU EECS 441 Transactional Memory Team
+ * Copyright (c) 2012, The V3VEE Project <http://www.v3vee.org> 
+ * All rights reserved.
+ *
+ * Author: Maciek Swiech <dotpyfe@u.northwestern.edu>
+ *          Marcel Flores <marcel-flores@u.northwestern.edu>
+ *          Zachary Bischof <zbischof@u.northwestern.edu>
+ *          Kyle C. Hale <kh@u.northwestern.edu>
+ *
+ * This is free software.  You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ *
+ *
+ *
+ * We claim that we can have a single, shared "cache"-like box
+ * that handles all writes and reads when TM is on on any core.  The
+ * idea is that if TM is on on any core, we redirect reads/writes
+ * that we get to the box, and it records them internally for 
+ * future playback, and tells us whether an abort condition has
+ * occured or not:
+ *
+ * error = handle_start_tx(boxstate,vcorenum);
+ * error = handle_abort(boxstate,vcorenum);
+ * error = handle_commit(boxstate,vcorenum);
+ *
+ * should_abort = handle_write(boxstate, vcorenum, physaddr, data, datalen);
+ * should_abort = handle_read(boxstate, vcorenum,physaddr, *data, datalen);
+ *
+ * One implementation:
+ *
+ * struct rec {
+ *    enum {READ,WRITE,BEGIN,ABORT,END} op;
+ *    addr_t vcorenum,
+ *           physaddr,
+ *           datalen ;
+ *    struct rec *next;
+ * }
+ *
+ * struct cache_model {
+ *    void *init(xml spec);  // make a cache, return ptr to state
+ *    int write(void *priv, physaddr, datalen, int (*change_cb(int core,
+ *                             physaddrstart, len));
+ *    // similiar for read
+ *
+ * // Idea is that we pass writes to cache model, it calls us back to say which
+ * lines on which cores have changed
+ * }
+ *
+ *
+ * struct boxstate {
+ *    struct cache_model *model; //
+ *    lock_t     global_lock; // any handle_* func acquires this first
+ *    uint64_t   loglen;
+ *    uint64_t   numtransactionsactive;
+ *    struct rec *first;
+ * }
+ *
+ * int handle_write(box,vcore,physaddr,data,datalen) {
+ *
+ */
+
+#ifndef __TRANS_MEM_H__
+#define __TRANS_MEM_H__
+
+#include <palacios/vmm_lock.h>
+#include <palacios/vmcb.h>
+#include <palacios/vmm_paging.h>
+
+#define MAX_CORES 32
+
+#define TM_KICKBACK_CALL 0x1337
+
+#define HTABLE_SEARCH(h, k) ({ addr_t ret; v3_lock(h##_lock); ret = v3_htable_search((h), (k)); v3_unlock(h##_lock); ret; })
+#define HTABLE_INSERT(h, k, v) ({ addr_t ret; v3_lock(h##_lock); ret = v3_htable_insert((h), (k), (addr_t)(v)); v3_unlock(h##_lock); ret; })
+
+#define INSTR_INJECT_LEN 10
+#define INSTR_BUF_SZ  15
+#define ERR_STORE_MUST_ABORT -2
+#define ERR_STORE_FAIL -1
+#define ERR_DECODE_FAIL -1
+#define ERR_TRANS_FAULT_FAIL 0
+#define TRANS_FAULT_OK 1
+#define TRANS_HCALL_FAIL -1 
+#define TRANS_HCALL_OK 0
+
+/* conflict checking codes */
+#define ERR_CHECK_FAIL -1
+#define CHECK_MUST_ABORT -2
+#define CHECK_IS_CONFLICT 1
+#define CHECK_NO_CONFLICT 0
+
+/* RTM instruction handling */
+#define XBEGIN_INSTR_LEN 0x6
+#define XEND_INSTR_LEN   0x3
+#define XABORT_INSTR_LEN 0x3
+#define XTEST_INSTR_LEN  0x3
+
+
+struct v3_tm_access_type {
+    uint8_t r : 1;
+    uint8_t w : 1;
+} __attribute__((packed));
+
+struct v3_ctxt_tuple {
+    void * gva;
+    void * core_id;
+    void * core_lt;
+} __attribute__((packed));
+
+/* 441-tm: Are we currently in a transaction */
+enum TM_MODE_E { 
+    TM_OFF = 0, 
+    TM_ON = 1,
+};
+
+/* 441-tm: Current state of the transaction state machine */
+enum TM_STATE_E {
+    TM_NULL = 0,
+    TM_IFETCH = 1,
+    TM_EXEC = 2
+//    TM_ABORT = 3
+};
+
+typedef enum v3_tm_op {
+    OP_TYPE_READ,
+    OP_TYPE_WRITE
+} v3_tm_op_t;
+
+struct v3_trans_mem {
+    /* current transaction */
+    uint64_t t_num;
+
+    /* 441-tm: linked list to store core's reads and writes */
+    struct list_head trans_r_list;
+    struct list_head trans_w_list;
+
+    /* 441-tm: hash tables of addresses */
+    struct hashtable * addr_ctxt;       // records the core transaction context at time of address use
+    v3_lock_t addr_ctxt_lock;
+    uint64_t addr_ctxt_entries;
+
+    struct hashtable * access_type;     // hashes addr:corenum:t_num for each address use
+    v3_lock_t access_type_lock;
+    uint64_t access_type_entries;
+
+    /* 441-tm: lets remember things about the next instruction */
+    uint8_t dirty_instr_flag;
+    addr_t  dirty_hva;
+    addr_t  dirty_gva;
+    uchar_t dirty_instr[15];
+    int     cur_instr_len;
+
+    enum TM_MODE_E TM_MODE;
+    enum TM_STATE_E TM_STATE;
+    uint64_t TM_ABORT;
+
+    struct shadow_page_data * staging_page;
+
+    /* 441-tm: Remember the failsafe addr */
+    addr_t  fail_call;
+
+    /* 441-tm: Save the rax we are about to ruin */
+    v3_reg_t clobbered_rax;
+
+    // branching instrs
+    int to_branch;
+    addr_t offset;
+
+    // timing info
+    uint64_t entry_time;
+    uint64_t exit_time;
+    uint64_t entry_exits;
+
+    // cache box
+    struct cache_box * box;
+
+    struct guest_info * ginfo;
+
+};
+
+
+struct v3_tm_state {
+    v3_lock_t lock;
+    enum TM_MODE_E TM_MODE;
+    uint64_t cores_active;
+
+    uint64_t  * last_trans;
+};
+
+struct hash_chain {
+    uint64_t * curr_lt;
+
+    struct list_head lt_node;
+};
+
+// called from #PF handler, stages entries, catches reads / writes
+addr_t v3_handle_trans_mem_fault(struct guest_info *core, 
+                                 addr_t fault_addr, 
+                                 pf_error_t error);
+
+// restores instruction after core->rip
+int v3_restore_dirty_instr(struct guest_info *core);
+
+// restores instruction after core->rip
+int v3_restore_abort_instr(struct guest_info *core);
+
+// handles abort cleanup, called from INT/EXCP or XABORT
+int v3_handle_trans_abort(struct guest_info *core);
+
+// record a memory access in hashes
+int tm_record_access (struct v3_trans_mem * tm, 
+                      uint8_t write, 
+                      addr_t gva);
+
+// garbage collect hash recordings
+int tm_hash_gc (struct v3_trans_mem * tm);
+
+// check address for conflicts
+int tm_check_conflict(struct   v3_vm_info * vm_info,
+                      addr_t   gva,
+                      v3_tm_op_t op_type,
+                      uint64_t core_num, 
+                      uint64_t curr_ctxt);
+
+// increment transaction number
+int v3_tm_inc_tnum(struct v3_trans_mem * tm);
+
+
+/* exception-related functions */
+int v3_tm_handle_exception(struct guest_info * info, addr_t exit_code);
+
+void v3_tm_set_excp_intercepts(vmcb_ctrl_t * ctrl_area);
+
+void v3_tm_check_intr_state(struct guest_info * info, 
+        vmcb_ctrl_t * guest_ctrl, 
+        vmcb_saved_state_t * guest_state);
+
+
+/* paging-related functions */
+int v3_tm_handle_pf_64 (struct guest_info * info,
+                        pf_error_t error_code,
+                        addr_t fault_addr,
+                        addr_t * page_to_use);
+
+void v3_tm_handle_usr_tlb_miss(struct guest_info * info,
+                               pf_error_t error_code,
+                               addr_t page_to_use,
+                               addr_t * shadow_pa);
+
+void v3_tm_handle_read_fault(struct guest_info * info,
+                             pf_error_t error_code,
+                             pte64_t * shadow_pte);
+
+#include <palacios/vmm_decoder.h>
+
+/* decoding-related functions */
+int v3_tm_decode_rtm_instrs(struct guest_info * info, 
+                            addr_t instr_ptr, 
+                            struct x86_instr * instr);
+
+
+#endif
index eb646bc..897d368 100644 (file)
@@ -53,4 +53,20 @@ config DEBUG_EXT_CPU_MAPPER_EDF
        default n
        depends on DEBUG_ON && EXT_CPU_MAPPER_EDF
 
+config TM_FUNC
+        bool "Enable Intel RTM Emulation Support"
+        default n
+        help
+          Enable Palacios to emulate Intel's hardware
+          transactional memory extensions. This is the
+          Restricted Transactional Memory (RTM) featureset,
+          part of Intel's TSX extensions.
+
+config DEBUG_TM_FUNC
+    bool "Enable RTM debugging output"
+    depends on TM_FUNC
+    default n
+    help
+        Enable Transactional Memory debugging output
+
 endmenu
index 1c66658..9ba073c 100644 (file)
@@ -7,3 +7,6 @@ obj-$(V3_CONFIG_EXT_MACH_CHECK) += ext_mcheck.o
 obj-$(V3_CONFIG_EXT_VMWARE) += ext_vmware.o
 obj-$(V3_CONFIG_EXT_SCHED_EDF) += ext_sched_edf.o
 obj-$(V3_CONFIG_EXT_CPU_MAPPER_EDF) += ext_cpu_mapper_edf.o
+
+obj-$(V3_CONFIG_TM_FUNC) += ext_trans_mem.o \
+                                           tm_util.o
diff --git a/palacios/src/extensions/ext_trans_mem.c b/palacios/src/extensions/ext_trans_mem.c
new file mode 100644 (file)
index 0000000..4b417e1
--- /dev/null
@@ -0,0 +1,2285 @@
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National 
+ * Science Foundation and the Department of Energy.  
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico.  You can find out more at 
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2012, NWU EECS 441 Transactional Memory Team
+ * Copyright (c) 2012, The V3VEE Project <http://www.v3vee.org> 
+ * All rights reserved.
+ *
+ * Author:  Maciek Swiech <dotpyfe@u.northwestern.edu>
+ *          Kyle C. Hale <kh@u.northwestern.edu>
+ *          Marcel Flores <marcel-flores@u.northwestern.edu>
+ *          Zachary Bischof <zbischof@u.northwestern.edu>
+ *          
+ *
+ * This is free software.  You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#include <palacios/vmm_mem.h>
+#include <palacios/vmm.h>
+#include <palacios/vmcb.h>
+#include <palacios/vmm_decoder.h>
+#include <palacios/vm_guest_mem.h>
+#include <palacios/vmm_ctrl_regs.h>
+#include <palacios/vmm_paging.h>
+#include <palacios/vmm_direct_paging.h>
+#include <palacios/svm.h>
+#include <palacios/svm_handler.h>
+#include <palacios/vmm_excp.h>
+#include <palacios/vmm_extensions.h>
+#include <palacios/vmm_sprintf.h>
+#include <palacios/vmm_hashtable.h>
+
+#include <extensions/trans_mem.h>
+#include <extensions/tm_util.h>
+
+#if !V3_CONFIG_DEBUG_TM_FUNC
+#undef PrintDebug
+#define PrintDebug(fmt, args...)
+#endif
+
+/* TODO LIST: 
+ * - save/restore register state on XBEGIN/XABORT
+ * - put status codes in RAX
+ * - Implement proper exceptions for failed XBEGINS etc.
+ */
+
+/* this includes a mov to rax */
+static const char * vmmcall_bytes = "\x48\xc7\xc0\x37\x13\x00\x00\x0f\x01\xd9"; 
+static struct v3_tm_state * tm_global_state = NULL;
+
+
+static void 
+tm_translate_rip (struct guest_info * core, addr_t * target) 
+{
+
+    if (core->mem_mode == PHYSICAL_MEM) {
+        v3_gpa_to_hva(core, 
+                get_addr_linear(core, core->rip, &(core->segments.cs)), 
+                target);
+    } else if (core->mem_mode == VIRTUAL_MEM) {
+        v3_gva_to_hva(core, 
+                get_addr_linear(core, core->rip, &(core->segments.cs)), 
+                target);
+    }
+
+}
+
+
+static void 
+tm_read_instr (struct guest_info * core, 
+                           addr_t addr, 
+                           uchar_t * dst, 
+                           uint_t size) 
+{
+
+    if (core->mem_mode == PHYSICAL_MEM) {
+        v3_read_gpa_memory(core, 
+                get_addr_linear(core, addr , &(core->segments.cs)), 
+                size, 
+                dst);
+
+    } else { 
+       v3_read_gva_memory(core, 
+                get_addr_linear(core, addr, &(core->segments.cs)), 
+                size, 
+                dst);
+    }
+
+}
+
+
+static int 
+tm_handle_decode_fail (struct guest_info * core) 
+{
+    addr_t cur_rip;
+    uint_t core_num;
+
+    tm_translate_rip(core, &cur_rip);
+
+#ifdef V3_CONFIG_DEBUG_TM_FUNC
+    v3_dump_mem((uint8_t *)cur_rip, INSTR_BUF_SZ);
+#endif
+
+    /* If we can't decode an instruction, we treat it as a catastrophic event, aborting *everyone* */
+    for (core_num = 0; core_num < core->vm_info->num_cores; core_num++ ) {
+        struct v3_trans_mem * remote_tm;
+
+        /* skip local core */
+        if (core_num == core->vcpu_id) {
+            continue;
+        }
+        
+        remote_tm = v3_get_ext_core_state(&(core->vm_info->cores[core_num]), "trans_mem");
+        if (!remote_tm) {
+            PrintError(core->vm_info, core,"++ TM DECODE ++ couldnt get remote_tm\n");
+            return -1;
+        }
+
+        /* skip cores who aren't in transacitonal context */
+        if (remote_tm->TM_MODE == TM_OFF) {
+            continue;
+        }
+
+        PrintDebug(core->vm_info, core,"++ TM DECODE ++ setting abort for core %d due to decoding error\n", core_num);
+        remote_tm->TM_ABORT = 1;
+    }
+
+    return 0;
+}
+                                  
+
+/* special casing for control-flow instructions
+ * returns 1 if we need to jump 
+ * returns -1 on error
+ */
+static int 
+tm_handle_ctrl_flow (struct guest_info * core,
+                                 struct v3_trans_mem * tm,
+                                 addr_t * instr_location,
+                                 struct x86_instr * struct_instr)
+
+{
+    /* special casing for control flow instructions */
+    struct rflags * flags = (struct rflags *)&(core->ctrl_regs.rflags);
+    addr_t offset;
+    int to_jmp = 0;
+
+    switch (struct_instr->op_type) {
+
+        case V3_OP_JLE:
+            PrintDebug(core->vm_info, core, "!!++ JLE\n");
+            to_jmp = (flags->zf || flags->sf != flags->of);
+            offset = struct_instr->dst_operand.operand;
+
+            *instr_location = core->rip + tm->cur_instr_len + (to_jmp ? offset : 0);
+            tm->offset = offset;
+            tm->to_branch = to_jmp;
+            break;
+        case V3_OP_JAE:
+            PrintDebug(core->vm_info, core,"!!++ JAE\n");
+            to_jmp = (flags->cf == 0);
+            offset = struct_instr->dst_operand.operand;
+
+            *instr_location = core->rip + tm->cur_instr_len + (to_jmp ? offset : 0);
+            tm->offset = offset;
+            tm->to_branch = to_jmp;
+            break;
+        case V3_OP_JMP:
+            PrintDebug(core->vm_info, core,"!!++ JMP\n");
+            to_jmp = 1;
+            offset = struct_instr->dst_operand.operand;
+
+            *instr_location = core->rip + tm->cur_instr_len + (to_jmp ? offset : 0);
+            tm->offset = offset;
+            tm->to_branch = to_jmp;
+            break;
+        case V3_OP_JNZ:
+            PrintDebug(core->vm_info, core,"!!++ JNZ\n");
+            to_jmp = (flags->zf == 0);
+            offset = struct_instr->dst_operand.operand;
+
+            *instr_location = core->rip + tm->cur_instr_len + (to_jmp ? offset : 0);
+            tm->offset = offset;
+            tm->to_branch = to_jmp;
+            break;
+        case V3_OP_JL:
+            PrintDebug(core->vm_info, core,"!!++ JL\n");
+            to_jmp = (flags->sf != flags->of);
+            offset = struct_instr->dst_operand.operand;
+
+            *instr_location = core->rip + tm->cur_instr_len + (to_jmp ? offset : 0);
+            tm->offset = offset;
+            tm->to_branch = to_jmp;
+            break;
+        case V3_OP_JNS:
+            PrintDebug(core->vm_info, core,"!!++ JNS\n");
+            to_jmp = (flags->sf == 0);
+            offset = struct_instr->dst_operand.operand;
+
+            *instr_location = core->rip + tm->cur_instr_len + (to_jmp ? offset : 0);
+            tm->offset = offset;
+            tm->to_branch = to_jmp;
+            break;
+        default:
+            *instr_location = core->rip + tm->cur_instr_len;
+            break;
+    }
+    return to_jmp;
+}
+
+
+/* entry points :
+ *
+ * called inside #UD and VMMCALL handlers
+ * only affects global state in case of quix86 fall over
+ *  -> set other cores TM_ABORT to 1, return -2
+ */
+static int 
+v3_store_next_instr (struct guest_info * core, struct v3_trans_mem * tm) 
+{
+    struct x86_instr struct_instr;
+    uchar_t cur_instr[INSTR_BUF_SZ];
+    addr_t  instr_location;
+
+    // Fetch the current instruction
+    tm_read_instr(core, core->rip, cur_instr, INSTR_BUF_SZ);
+
+    PrintDebug(core->vm_info, core,"-- TM STORE -- storing next instruction, current rip: %llx\n", (uint64_t)core->rip);
+
+    /* Attempt to decode current instruction to determine its length */
+    if (v3_decode(core, (addr_t)cur_instr, &struct_instr) == ERR_DECODE_FAIL) {
+        
+        PrintError(core->vm_info, core,"++ TM Error ++ Could not decode currrent instruction (at %llx)\n", (uint64_t)core->rip);
+
+        /* this will attempt to abort all the remote cores */
+        if (tm_handle_decode_fail(core) == -1) {
+            PrintError(core->vm_info, core, "++ TM Error ++ Could not handle failed decode\n");
+            return -1;
+        }
+
+        /* we need to trigger a local abort */
+        return ERR_STORE_MUST_ABORT;
+    }
+
+
+    /* we can't currently handle REP prefixes, abort */
+    if (struct_instr.op_type != V3_INVALID_OP &&
+            (struct_instr.prefixes.repne ||
+             struct_instr.prefixes.repnz ||
+             struct_instr.prefixes.rep   ||
+             struct_instr.prefixes.repe  ||
+             struct_instr.prefixes.repz)) {
+
+        PrintError(core->vm_info, core,"Encountered REP prefix, aborting\n");
+        return ERR_STORE_MUST_ABORT;
+    }
+
+    tm->cur_instr_len = struct_instr.instr_length;
+
+    /* handle jump instructions */
+    tm_handle_ctrl_flow(core, tm, &instr_location, &struct_instr);
+
+    /* save next 10 bytes after current instruction, we'll put vmmcall here */
+    tm_read_instr(core, instr_location, cur_instr, INSTR_INJECT_LEN);
+
+    /* store the next instruction and its length in info */
+    memcpy(tm->dirty_instr, cur_instr, INSTR_INJECT_LEN);
+
+    return 0;
+}
+
+
+static int 
+v3_overwrite_next_instr (struct guest_info * core, struct v3_trans_mem * tm) 
+{
+    addr_t ptr;
+
+    // save rax
+    tm->clobbered_rax = (core->vm_regs).rax;
+
+    ptr = core->rip;
+
+    /* we can't currently handle instructions that span page boundaries */
+    if ((ptr + tm->cur_instr_len) % PAGE_SIZE_4KB < (ptr % PAGE_SIZE_4KB)) {
+        PrintError(core->vm_info, core,"++ TM OVERWRITE ++ emulated instr straddling page boundary\n");
+        return -1;
+    }
+
+    ptr = core->rip + tm->cur_instr_len + (tm->to_branch ? tm->offset : 0);
+
+    if ((ptr + INSTR_INJECT_LEN) % PAGE_SIZE_4KB < (ptr % PAGE_SIZE_4KB)) {
+        PrintError(core->vm_info, core,"++ TM OVERWRITE ++ injected instr straddling page boundary\n");
+        return -1;
+    }
+
+    if (v3_gva_to_hva(core,
+                get_addr_linear(core, ptr, &(core->segments.cs)),
+                &ptr) == -1) {
+
+        PrintError(core->vm_info, core,"++ TM Error ++ Calculating next rip hva failed\n");
+        return -1;
+    }
+
+    PrintDebug(core->vm_info, core,"-- TM REPLACE -- Replacing next instruction at addr %llx with vmm hyper call, len=%d\n",
+            core->rip + tm->cur_instr_len + (tm->to_branch ? tm->offset : 0), (int)tm->cur_instr_len );
+
+    /* Copy VMM call into the memory address of beginning of next instruction (ptr) */
+    memcpy((char*)ptr, vmmcall_bytes, INSTR_INJECT_LEN);
+
+    /* KCH: flag that we've dirtied an instruction, and store its host address */
+    tm->dirty_instr_flag = 1;
+    tm->dirty_gva        = core->rip + tm->cur_instr_len + (tm->to_branch ? tm->offset : 0);
+    tm->dirty_hva        = ptr;
+    tm->to_branch        = 0;
+
+    return 0;
+}
+
+
+/* entry points:
+ *
+ * this should only be called if TM_STATE == TM_NULL, additionally we check if our dirtied flag if set
+ */
+int 
+v3_restore_dirty_instr (struct guest_info * core) 
+{
+    struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(core, "trans_mem");
+
+    /* Restore next instruction, transition to IFETCH state */
+    PrintDebug(core->vm_info, core,"-- TM RESTORE -- Restoring next instruction.\n");
+
+    /* check if we've actually done an instruction overwrite */
+    if (!(tm->dirty_instr_flag)) {
+        PrintDebug(core->vm_info, core,"++ TM RESTORE ++ nothing to restore here...\n");
+        return 0;
+    }
+
+    // Actually restore instruction
+    memcpy((char*)tm->dirty_hva, tm->dirty_instr, INSTR_INJECT_LEN);
+
+    // Put rax back
+    (core->vm_regs).rax = tm->clobbered_rax; 
+
+    // Scoot rip back up
+    PrintDebug(core->vm_info, core,"--  TM RESTORE -- RIP in vmmcall: %llx\n", core->rip);
+    core->rip = tm->dirty_gva;
+
+    // clean up
+    tm->dirty_instr_flag = 0;
+    tm->dirty_gva = 0;
+    tm->dirty_hva = 0;
+    memset(tm->dirty_instr, 0, 15);
+
+    PrintDebug(core->vm_info, core,"-- TM RESTORE -- RIP after scooting it back up: %llx\n", core->rip);
+
+    return 0;
+}
+
+
+static addr_t 
+tm_handle_fault_ifetch (struct guest_info * core, 
+                        struct v3_trans_mem * tm)
+{
+    int sto;
+
+    PrintDebug(core->vm_info, core,"-- TM IFETCH -- Page fault caused by IFETCH: rip is the same as faulting address, we must be at an ifetch.\n");
+
+    sto = v3_store_next_instr(core, tm);
+
+    if (sto == ERR_STORE_FAIL) {
+        PrintError(core->vm_info, core,"++ TM EXIT ++ Could not store next instruction in transaction\n");
+        return ERR_TRANS_FAULT_FAIL;
+    } else if (sto == ERR_STORE_MUST_ABORT) {
+        PrintDebug(core->vm_info, core,"-- TM EXIT -- aborting for some reason\n");
+        v3_handle_trans_abort(core);
+        return TRANS_FAULT_OK;
+    }
+
+    if (v3_overwrite_next_instr(core, tm) == -1) {
+        PrintError(core->vm_info, core,"++ TM PF ++ problem overwriting instruction\n");
+        return ERR_TRANS_FAULT_FAIL;
+    }
+
+    tm->TM_STATE = TM_EXEC;
+
+    return TRANS_FAULT_OK;
+}
+
+
+static addr_t
+tm_handle_fault_read (struct guest_info * core, 
+                      struct v3_trans_mem * tm,
+                      addr_t fault_addr,
+                      pf_error_t error)
+
+{
+    // This page fault was caused by a read to memory in the current instruction for a core in TM mode
+    PrintDebug(core->vm_info, core,"-- TM DATA -- Page fault caused by read.\n");
+    PrintDebug(core->vm_info, core,"-- TM PF -- Adding %p to read list and hash\n", (void*)fault_addr);
+
+    if (add_mem_op_to_list(&(tm->trans_r_list), fault_addr) == -1) {
+        PrintError(core->vm_info, core,"++ TM PF ++ problem adding to list\n");
+        return ERR_TRANS_FAULT_FAIL;
+    }
+
+    if (tm_record_access(tm, error.write, fault_addr) == -1) {
+        PrintError(core->vm_info, core,"++ TM PF ++ problem recording access\n");
+        return ERR_TRANS_FAULT_FAIL;
+    }
+
+    /* if we have previously written to this address, we need to update our
+     * staging page and map it in */
+    if (list_contains_guest_addr(&(tm->trans_w_list), fault_addr)) {
+
+        PrintDebug(core->vm_info, core,"-- TM PF -- Saw a read from something in the write list\n");
+
+        /* write the value from linked list to staging page */
+        if (stage_entry(tm, &(tm->trans_w_list), fault_addr) == -1) {
+            PrintError(core->vm_info, core, "could not stage entry!\n");
+            return ERR_TRANS_FAULT_FAIL;
+        }
+
+        /* Hand it the staging page */
+        return (addr_t)(tm->staging_page);                
+
+    } else {
+
+        //Add it to the read set
+        addr_t shadow_addr = 0;
+
+        PrintDebug(core->vm_info, core,"-- TM PF -- Saw a read from a fresh address\n");
+
+        if (v3_gva_to_hva(core, (uint64_t)fault_addr, &shadow_addr) == -1) {
+            PrintError(core->vm_info, core,"Could not translate gva to hva for transaction read\n");
+            return ERR_TRANS_FAULT_FAIL;
+        }
+
+    }
+
+    return TRANS_FAULT_OK;
+}
+
+
+static addr_t
+tm_handle_fault_write (struct guest_info * core,
+                       struct v3_trans_mem * tm,
+                       addr_t fault_addr,
+                       pf_error_t error)
+{
+        void * data_loc;
+        addr_t virt_data_loc;
+        addr_t shadow_addr = 0;
+
+        PrintDebug(core->vm_info, core,"-- TM DATA -- Page fault cause by write\n");
+        PrintDebug(core->vm_info, core,"-- TM PF -- Adding %p to write list and hash\n", (void*)fault_addr);
+
+        if (add_mem_op_to_list(&(tm->trans_w_list), fault_addr) == -1) {
+            PrintError(core->vm_info, core,"could not add to list!\n");
+            return ERR_TRANS_FAULT_FAIL;
+        }
+
+        if (tm_record_access(tm, error.write, fault_addr) == -1) {
+            PrintError(core->vm_info, core,"could not record access!\n");
+            return ERR_TRANS_FAULT_FAIL;
+        }
+
+        if (v3_gva_to_hva(core, (uint64_t)fault_addr, &shadow_addr) == -1) {
+            PrintError(core->vm_info, core,"could not translate gva to hva for transaction read\n");
+            return ERR_TRANS_FAULT_FAIL;
+        }
+
+        // Copy existing values to the staging page, populating that field
+        // This avoids errors in optimized code such as ++, where the original
+        // value is not read, but simply incremented
+        data_loc = (void*)((addr_t)(tm->staging_page) + (shadow_addr % PAGE_SIZE_4KB)); 
+        
+        if (v3_hpa_to_hva((addr_t)(data_loc), &virt_data_loc) == -1) {
+            PrintError(core->vm_info, core,"Could not convert address on staging page to virt addr\n");
+            return ERR_TRANS_FAULT_FAIL;
+        }
+
+        PrintDebug(core->vm_info, core,"\tValue being copied (core %d): %p\n", core->vcpu_id, *((void**)(virt_data_loc)));
+        //memcpy((void*)virt_data_loc, (void*)shadow_addr, sizeof(uint64_t));
+        *(uint64_t*)virt_data_loc = *(uint64_t*)shadow_addr;
+
+        return (addr_t)(tm->staging_page);                
+}
+
+
+static addr_t
+tm_handle_fault_extern_ifetch (struct guest_info * core,
+                               struct v3_trans_mem * tm,
+                               addr_t fault_addr,
+                               pf_error_t error)
+{
+    int sto;
+
+    // system is in tm state, record the access
+    PrintDebug(core->vm_info, core,"-- TM IFETCH -- Page fault caused by IFETCH: we are not in TM, recording.\n");
+
+    sto = v3_store_next_instr(core,tm);
+
+    if (sto == ERR_STORE_FAIL) {
+        PrintError(core->vm_info, core,"++ TM Error ++ Could not store next instruction in transaction\n");
+        return ERR_TRANS_FAULT_FAIL;
+
+    } else if (sto == ERR_STORE_MUST_ABORT) {
+        PrintError(core->vm_info, core,"++ TM IFETCH ++ decode failed, going out of single stepping\n");
+        v3_handle_trans_abort(core);
+        return TRANS_FAULT_OK;
+    }
+
+    if (v3_overwrite_next_instr(core, tm) == -1) {
+        PrintError(core->vm_info, core,"could not overwrite next instr!\n");
+        return ERR_TRANS_FAULT_FAIL;
+    }
+
+    tm->TM_STATE = TM_EXEC;
+
+    if (tm_record_access(tm, error.write, fault_addr) == -1) {
+        PrintError(core->vm_info, core,"could not record access!\n");
+        return ERR_TRANS_FAULT_FAIL;
+    }
+
+    return TRANS_FAULT_OK;
+}
+
+
+static addr_t
+tm_handle_fault_extern_access (struct guest_info * core,
+                               struct v3_trans_mem * tm,
+                               addr_t fault_addr,
+                               pf_error_t error)
+{
+    PrintDebug(core->vm_info, core,"-- TM PF HANDLE -- recording access\n");
+    if (tm_record_access(tm, error.write, fault_addr) == -1) {
+        PrintError(core->vm_info, core,"could not record access!\n");
+        return ERR_TRANS_FAULT_FAIL;
+    }
+
+    return TRANS_FAULT_OK;
+}
+
+
+static addr_t
+tm_handle_fault_tmoff (struct guest_info * core)
+{
+    PrintDebug(core->vm_info, core, "-- TM PF HANDLE -- in pf handler but noone is in tm mode anymore (core %d), i should try to eliminate hypercalls\n", core->vcpu_id);
+
+    if (v3_restore_dirty_instr(core) == -1) {
+        PrintError(core->vm_info, core,"could not restore dirty instr!\n");
+        return ERR_TRANS_FAULT_FAIL;
+    }
+
+    return TRANS_FAULT_OK;
+}
+
+
+/* entry points:
+ *
+ * called from MMU - should mean at least tms->TM_MODE is on
+ *
+ * tm->on : ifetch -> store instr, overwrite instr
+ *          r/w    -> record hash, write log, store instr, overwrite instr
+ * tm->off: ifetch -> store instr, overwrite instr
+ *          r/w    -> record hash, store instr, overwrite instr
+ *
+ *          returns ERR_TRANS_FAULT_FAIL on error
+ *          TRANS_FAULT_OK when things are fine
+ *          addr when we're passing back a staging page
+ *
+ */
+addr_t 
+v3_handle_trans_mem_fault (struct guest_info * core, 
+                                  addr_t fault_addr, 
+                                  pf_error_t error) 
+{
+    struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(core, "trans_mem");
+    struct v3_tm_state * tms = (struct v3_tm_state *)v3_get_extension_state(core->vm_info, "trans_mem");
+
+    if (!tm) {
+        PrintError(core->vm_info, core, "+++ TM ERROR +++ : coudln't get core state\n");
+        return ERR_TRANS_FAULT_FAIL;
+    }
+
+    if (!tms) {
+        PrintError(core->vm_info, core, "+++ TM ERROR +++ : couldn't get vm trans_mem state\n");
+        return ERR_TRANS_FAULT_FAIL;
+    }
+
+    PrintDebug(core->vm_info, core,"++ TM PF ++ PF handler core->mode : %d, system->mode : %d\n", tm->TM_MODE, tms->TM_MODE);
+
+    if ((tm->TM_MODE == TM_ON) && 
+        ((void *)fault_addr == (void *)(core->rip))) {
+
+        return tm_handle_fault_ifetch(core, tm);
+
+    } else if ((tm->TM_MODE == TM_ON)    && 
+               (tm->TM_STATE == TM_EXEC) && 
+               (error.write == 0)) {
+
+        return tm_handle_fault_read(core, tm, fault_addr, error);
+
+    } else if ((tm->TM_MODE == TM_ON)    && 
+               (tm->TM_STATE == TM_EXEC) && 
+               (error.write == 1)) {
+
+        return tm_handle_fault_write(core, tm, fault_addr, error);
+
+
+    } else if ((tms->TM_MODE == TM_ON) &&
+              ((void *)fault_addr == (void *)(core->rip))) {
+
+        return tm_handle_fault_extern_ifetch(core, tm, fault_addr, error);
+
+    } else if ((tms->TM_MODE == TM_ON) && 
+               (tm->TM_STATE == TM_EXEC)) {
+
+        return tm_handle_fault_extern_access(core, tm, fault_addr, error);
+    } else {
+
+        return tm_handle_fault_tmoff(core);
+
+    }
+
+    return TRANS_FAULT_OK;
+}
+
+
+static int 
+tm_handle_hcall_tmoff (struct guest_info * core, struct v3_trans_mem * tm)
+{
+    if (tm->TM_MODE == TM_ON) {
+        PrintError(core->vm_info, core,"++ TM EXIT ++ we are in tm mode but system is not!\n");
+        return TRANS_HCALL_FAIL;
+    }
+
+    // we got to an exit when things were off!
+    PrintDebug(core->vm_info, core,"-- TM EXIT -- system is off, restore the instruction and go away\n");
+
+    if (v3_restore_dirty_instr(core) == -1) {
+        PrintError(core->vm_info, core,"could not restore dirty instr!\n");
+        return TRANS_HCALL_FAIL;
+    }
+
+    tm->TM_STATE = TM_NULL;
+
+    return TRANS_HCALL_OK;
+}
+
+
+static int
+tm_handle_hcall_dec_abort (struct guest_info * core, 
+                           struct v3_trans_mem * tm)
+{
+    // only ever get here from TM DECODE
+    PrintDebug(core->vm_info, core,"-- TM EXIT -- we are in ABORT, call the abort handler\n");
+    tm->TM_ABORT = 0;
+
+    v3_handle_trans_abort(core);
+
+    PrintDebug(core->vm_info, core,"-- TM EXIT -- RIP after abort: %p\n", ((void*)(core->rip)));
+
+    return TRANS_HCALL_OK;
+}
+
+
+static int
+tm_handle_hcall_ifetch_start (struct guest_info * core, 
+                              struct v3_trans_mem * tm)
+{
+    tm->TM_STATE = TM_IFETCH;
+
+    PrintDebug(core->vm_info, core,"-- TM EXIT -- VMEXIT after TM_EXEC, blast away VTLB and go into TM_IFETCH\n");
+
+    // Finally, invalidate the shadow page table 
+    v3_invalidate_shadow_pts(core);
+
+    return TRANS_HCALL_OK;
+}
+
+
+static int 
+tm_check_list_conflict (struct guest_info * core,
+                        struct v3_trans_mem * tm,
+                        struct list_head * access_list,
+                        v3_tm_op_t op_type)
+{
+    struct mem_op * curr = NULL;
+    struct mem_op * tmp  = NULL;
+    int conflict = 0;
+
+    list_for_each_entry_safe(curr, tmp, access_list, op_node) {
+
+        conflict = tm_check_conflict(tm->ginfo->vm_info, curr->guest_addr, op_type, core->vcpu_id, tm->t_num);
+
+        if (conflict == ERR_CHECK_FAIL) {
+
+            PrintError(core->vm_info, core,"++ TM EXIT ++ error checking for conflicts\n");
+            return TRANS_HCALL_FAIL;
+
+        } else if (conflict == CHECK_IS_CONFLICT) {
+
+            PrintDebug(core->vm_info, core,"-- TM EXIT -- we have a conflict, aborting\n");
+            v3_handle_trans_abort(core);
+            return CHECK_MUST_ABORT;
+
+        }
+
+    }
+
+    return TRANS_HCALL_OK;
+}
+
+
+static int 
+tm_handle_hcall_check_conflicts (struct guest_info * core,
+                                 struct v3_trans_mem * tm)
+{
+    int ret;
+
+    PrintDebug(core->vm_info, core,"-- TM EXIT -- still TM_ON\n");
+    PrintDebug(core->vm_info, core,"-- TM EXIT -- checking for conflicts\n");
+
+    if ((ret = tm_check_list_conflict(core, tm, &(tm->trans_w_list), OP_TYPE_WRITE)) == TRANS_HCALL_FAIL) {
+        return TRANS_HCALL_FAIL;
+    } else if (ret == CHECK_MUST_ABORT) {
+        return TRANS_HCALL_OK;
+    }
+    
+    if ((ret = tm_check_list_conflict(core, tm, &(tm->trans_r_list), OP_TYPE_READ)) == TRANS_HCALL_FAIL) {
+        return TRANS_HCALL_FAIL;
+    } else if (ret == CHECK_MUST_ABORT) {
+        return TRANS_HCALL_OK;
+    }
+
+    tm->TM_STATE = TM_IFETCH;
+
+    return TRANS_HCALL_OK;
+}
+
+
+/* trans mem hypercall handler 
+ * entry points:
+ *
+ * running mime (tm or tms on)
+ *   update record log
+ *   restore instr
+ *   overwrite instr
+ *   check for conflicts
+ *   flush vtlb
+ * abort (due to quix86)
+ *   restore instr
+ *   set all to abort
+ */
+static int 
+tm_handle_hcall (struct guest_info * core, 
+                 unsigned int hcall_id, 
+                 void * priv_data) 
+{
+    struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(core, "trans_mem");
+    struct v3_tm_state * tms = (struct v3_tm_state *)v3_get_extension_state(core->vm_info, "trans_mem");
+
+    if (tms->TM_MODE == TM_OFF) {
+        return tm_handle_hcall_tmoff(core, tm);
+    }
+
+    // Previous instruction has finished, copy staging page back into linked list!
+    if (update_list(tm, &(tm->trans_w_list)) == -1) {
+        PrintError(core->vm_info, core,"could not update_list!\n");
+        return TRANS_HCALL_FAIL;
+    }
+
+    // Done handling previous instruction, must put back the next instruction, reset %rip and go back to IFETCH state
+    PrintDebug(core->vm_info, core,"-- TM EXIT -- saw VMEXIT, need to restore previous state and proceed\n");
+
+    if (v3_restore_dirty_instr(core) == -1) {
+        PrintError(core->vm_info, core,"could not restore dirty instr!\n");
+        return TRANS_HCALL_FAIL;
+    }
+    
+    /* Check TM_STATE */
+    if (tm->TM_ABORT == 1 && 
+        tms->TM_MODE == TM_ON) {
+
+        return tm_handle_hcall_dec_abort(core, tm);
+
+    } else if (tm->TM_STATE == TM_EXEC) {
+        return tm_handle_hcall_ifetch_start(core, tm);
+    }
+
+    /* Check TM_MODE */
+    if (tm->TM_MODE == TM_ON && 
+        tms->TM_MODE == TM_ON) {
+
+        return tm_handle_hcall_check_conflicts(core, tm);
+
+    } else if (tm->TM_MODE == TM_OFF) {
+        PrintDebug(core->vm_info, core,"-- TM EXIT -- we are in TM_OFF\n");
+    }
+
+    return TRANS_HCALL_OK;
+}
+
+
+int 
+v3_tm_inc_tnum (struct v3_trans_mem * tm) 
+{
+    addr_t irqstate;
+    uint64_t new_ctxt;
+    uint64_t * lt;
+
+    lt = tm_global_state->last_trans;
+
+    // grab global last_trans
+    irqstate = v3_lock_irqsave(tm_global_state->lock);
+    new_ctxt = ++(lt[tm->ginfo->vcpu_id]);
+    v3_unlock_irqrestore(tm_global_state->lock, irqstate);
+
+    tm->t_num++;
+    /*
+    PrintDebug(tm->ginfo->vm_info, tm->ginfo,"-- TM INC TNUM -- global state is |%d|%d|, my tnum is %d\n", (int)lt[0],
+                                                                        (int)lt[1], (int)tm->t_num);
+                                                                        */
+    if (new_ctxt != tm->t_num) {
+        PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM INC TNUM ++ misaligned global and local context value\n");
+        return -1;
+    }
+
+    return 0;
+}
+
+
+int 
+v3_handle_trans_abort (struct guest_info * core) 
+{
+    struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(core, "trans_mem");
+
+    // Free the staging page
+    if (v3_free_staging_page(tm) == -1) {
+        PrintError(core->vm_info, core,"++ TM ABORT ++ problem freeing staging page\n");
+        return -1;
+    }
+
+    // Clear the VTLB which still has our staging page in it
+    if (v3_clr_vtlb(core) == -1) {
+        PrintError(core->vm_info, core,"++ TM ABORT ++ problem clearing vtlb\n");
+        return -1;
+    }
+
+    // Free the lists
+    v3_clear_tm_lists(tm);
+
+    PrintDebug(core->vm_info, core,"-- TM ABORT -- handler - TM_MODE: %d | RIP: %llx | XABORT RIP: %llx\n", tm->TM_MODE, (uint64_t)core->rip, (uint64_t)tm->fail_call);
+
+    if (tm->TM_MODE == TM_ON) {
+        PrintDebug(core->vm_info, core,"-- TM ABORT -- Setting RIP to %llx\n", (uint64_t)tm->fail_call);
+        core->rip = tm->fail_call;
+
+        // Turn TM off
+        v3_clr_tm(tm);
+
+        // transaction # ++
+        v3_tm_inc_tnum(tm);
+    }
+    
+  
+    // time to garbage collect
+    if (tm_hash_gc(tm) == -1) {
+        PrintError(core->vm_info, core,"could not gc!\n");
+        return -1;
+    }
+
+    return 0;
+}
+
+
+static uint_t 
+tm_hash_fn (addr_t key) 
+{
+    return v3_hash_long(key, sizeof(void *));
+}
+
+
+static int 
+tm_eq_fn (addr_t key1, addr_t key2) 
+{
+    return (key1 == key2);
+}
+
+
+static uint_t 
+tm_hash_buf_fn (addr_t key) 
+{
+    return v3_hash_long(key, sizeof(addr_t));
+}
+
+
+static int 
+tm_eq_buf_fn(addr_t key1, addr_t key2) 
+{
+    return (key1 == key2);
+}
+
+
+/* this checks if the remote access was done on the same
+ * local transaction number as the current one */
+static int 
+tm_check_context (struct v3_vm_info * vm,
+                  addr_t gva,
+                  uint64_t core_num,
+                  uint64_t curr_ctxt,
+                  uint64_t * curr_lt,
+                  v3_tm_op_t op_type)
+{
+    uint64_t  core_id_sub;
+    struct v3_tm_access_type * type = NULL;
+
+    for (core_id_sub = 0; core_id_sub < vm->num_cores; core_id_sub++) {
+        struct v3_trans_mem * remote_tm;
+        void * buf[3];
+        addr_t key;
+
+        /* skip the core that's doing the checking */
+        if (core_id_sub == core_num) {
+            continue;
+        }
+
+        remote_tm = v3_get_ext_core_state(&(vm->cores[core_id_sub]), "trans_mem");
+        if (!remote_tm) {
+            PrintError(vm, VCORE_NONE, "Could not get ext core state for core %llu\n", core_id_sub);
+            return ERR_CHECK_FAIL;
+        }
+
+        buf[0] = (void *)gva;
+        buf[1] = (void *)core_id_sub;
+        buf[2] = (void *)curr_lt[core_id_sub];
+
+        key = v3_hash_buffer((uchar_t*)buf, sizeof(void*)*3);
+
+        type = (struct v3_tm_access_type *)HTABLE_SEARCH(remote_tm->access_type, key);
+
+        if (type) {
+            // conflict!
+            if ( (op_type == OP_TYPE_WRITE && (type->w || type->r)) || // so basically if write?
+                    (op_type != OP_TYPE_WRITE && type->w)) {
+                return CHECK_IS_CONFLICT;
+            }
+        }
+    }
+
+    return CHECK_NO_CONFLICT;
+}
+
+
+/* check all the contexts in the list for a conflict */
+static int 
+tm_check_all_contexts (struct v3_vm_info * vm,
+                       struct list_head * hash_list,
+                       addr_t   gva,
+                       v3_tm_op_t  op_type,
+                       uint64_t core_num, 
+                       uint64_t curr_ctxt) 
+{
+    struct hash_chain * curr = NULL;
+    struct hash_chain * tmp  = NULL;
+    uint64_t * curr_lt       = NULL;
+    int ret = 0;
+
+    list_for_each_entry_safe(curr, tmp, hash_list, lt_node) {
+
+        curr_lt = curr->curr_lt;
+
+        if (curr_lt[core_num] == curr_ctxt) {
+
+            ret = tm_check_context(vm, gva, core_num, curr_ctxt, curr_lt, op_type);
+
+            if (ret == ERR_CHECK_FAIL) {
+                return ERR_CHECK_FAIL;
+            } else if (ret == CHECK_IS_CONFLICT) {
+                return CHECK_IS_CONFLICT;
+            }
+
+        }
+
+    }
+
+    return CHECK_NO_CONFLICT;
+}
+
+
+/* The following access patterns trigger an abort:
+ * We: Read     |   Anyone Else: Write
+ * We: Write    |   Anyone Else: Read, Write
+ *
+ * (pg 8-2 of haswell manual)
+ *
+ * returns ERR_CHECK_FAIL on error
+ * returns CHECK_IS_CONFLICT if there is a conflict
+ * returns CHECK_NO_CONFLICT  if there isn't
+ */
+int 
+tm_check_conflict (struct v3_vm_info * vm,
+                   addr_t gva,
+                   v3_tm_op_t op_type,
+                   uint64_t core_num, 
+                   uint64_t curr_ctxt) 
+{
+    uint64_t core_id;
+
+    /* loop over other cores -> core_id */
+    for (core_id = 0; core_id < vm->num_cores; core_id++) {
+
+        struct guest_info * core = NULL;
+        struct v3_trans_mem * tm = NULL;
+        struct list_head * hash_list;
+
+        /* only check other cores */
+        if (core_id == core_num) {
+            continue;
+        }
+
+        core = &(vm->cores[core_id]);
+        tm = (struct v3_trans_mem*)v3_get_ext_core_state(core, "trans_mem");
+        
+        if (!tm) {
+            PrintError(vm, VCORE_NONE, "+++ TM ERROR +++ Couldn't get core state for core %llu\n", core_id);
+            return ERR_CHECK_FAIL;
+        }
+
+        /* this core didn't access the address, move on */
+        if (!(hash_list = (struct list_head *)HTABLE_SEARCH(tm->addr_ctxt, gva))) {
+            continue;
+
+        } else {
+
+            /* loop over chained hash for gva, find fields with curr_ctxt -> curr_lt*/
+            int ret = tm_check_all_contexts(vm, hash_list, gva, op_type, core_num, curr_ctxt);
+
+            if (ret == ERR_CHECK_FAIL) {
+                return ERR_CHECK_FAIL;
+            } else if (ret == CHECK_IS_CONFLICT) {
+                return CHECK_IS_CONFLICT;
+            } 
+
+        }
+    }
+
+    return CHECK_NO_CONFLICT;
+}
+
+
+static int 
+tm_need_to_gc (struct v3_trans_mem * tm,
+               struct hash_chain * curr,
+               uint64_t * lt_copy,
+               uint64_t tmoff)
+{
+    uint64_t to_gc = 1;
+    uint64_t i;
+
+    /* if none of the cores are in transactional context, 
+     * we know we can collect this context 
+     */
+    if (!tmoff) {
+
+        for (i = 0; i < tm->ginfo->vm_info->num_cores; i++) {
+            /* if *any* of the cores are active in a transaction 
+             * number that is current (listed in this context),
+             * we know we can't collect this context, as it 
+             * will be needed when that core's transaction ends
+             */
+            if (curr->curr_lt[i] >= lt_copy[i]) {
+                to_gc = 0;
+                break;
+            }
+        }
+
+    }
+    return to_gc;
+}
+
+
+static void 
+tm_del_stale_ctxt (struct hash_chain * curr)
+{
+        list_del(&(curr->lt_node));
+        V3_Free(curr->curr_lt);
+        V3_Free(curr);
+}
+
+
+static void 
+tm_del_acc_entry (struct v3_trans_mem * tm, addr_t key)
+{
+    v3_htable_remove(tm->access_type, key, 0);
+    (tm->access_type_entries)--;
+}
+
+
+static int 
+tm_collect_context (struct v3_trans_mem * tm, 
+                    struct hashtable_iter * ctxt_iter, 
+                    struct hash_chain * curr, 
+                    uint64_t * begin_time,
+                    uint64_t * end_time,
+                    addr_t gva)
+{
+        uint64_t i;
+
+        for (i = 0; i < tm->ginfo->vm_info->num_cores; i++) {
+            void * buf[3];
+            struct v3_tm_access_type * type;
+            addr_t key;
+
+            rdtscll(*end_time);
+            if ((*end_time - *begin_time) > 100000000) {
+                PrintError(tm->ginfo->vm_info, tm->ginfo,"++ GC ++ time threshhold exceeded, exiting!!!\n");
+                return -1;
+            }
+            
+            buf[0] = (void *)gva;
+            buf[1] = (void *)i;
+            buf[2] = (void *)curr->curr_lt[i];
+
+            key = v3_hash_buffer((uchar_t*)buf, sizeof(void*)*3);
+
+            type = (struct v3_tm_access_type *)v3_htable_search(tm->access_type, key);
+
+            if (!type) { // something has gone terribly wrong
+                PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM GC ++ could not find accesstype entry to gc, THIS! IS! WRONG!\n");
+                return -1;
+            }
+
+            /* delete the access type entry */
+            tm_del_acc_entry(tm, key);
+        }
+
+        /* delete the stale context */
+        tm_del_stale_ctxt(curr);
+
+        return 0;
+}
+
+
+static int 
+tm_collect_all_contexts (struct v3_trans_mem * tm,
+                         struct hashtable_iter * ctxt_iter,
+                         uint64_t tmoff,
+                         uint64_t * lt_copy,
+                         uint64_t * begin_time,
+                         uint64_t * end_time)
+{
+    struct hash_chain * tmp;
+    struct hash_chain * curr;
+    struct list_head * chain_list;
+    addr_t gva;
+
+    gva = (addr_t)v3_htable_get_iter_key(ctxt_iter);
+    
+    chain_list = (struct list_head *)v3_htable_get_iter_value(ctxt_iter);
+
+    /* this is a chained hash, so for each address, we will have
+     * a list of contexts. We now check each context to see
+     * whether or not it can be collected
+     */
+    list_for_each_entry_safe(curr, tmp, chain_list, lt_node) {
+
+        uint64_t to_gc = tm_need_to_gc(tm, curr, lt_copy, tmoff);
+
+        /* not garbage, go on to the next context in the list */
+        if (!to_gc) {
+            PrintDebug(tm->ginfo->vm_info, tm->ginfo,"-- TM GC -- not garbage collecting entries for address %llx\n", (uint64_t)gva);
+            continue;
+        }
+
+        PrintDebug(tm->ginfo->vm_info, tm->ginfo,"-- TM GC -- garbage collecting entries for address %llx\n", (uint64_t)gva);
+
+        /* found one, delete corresponding entries in access_type */
+        if (tm_collect_context(tm, ctxt_iter, curr, begin_time, end_time, gva) == -1) {
+            PrintError(tm->ginfo->vm_info, tm->ginfo, "++ TM GC ++ ERROR collecting context\n");
+            return -1;
+        }
+
+    }
+
+    /* if context list (hash chain) is now empty, remove the hash entry */
+    if (list_empty(chain_list)) {
+        v3_htable_iter_remove(ctxt_iter, 0);
+        (tm->addr_ctxt_entries)--;
+    } else {
+        v3_htable_iter_advance(ctxt_iter);
+    }
+
+    /* give the CPU away NONONO NEVER YIELD WHILE HOLDING A LOCK */
+    //V3_Yield();
+
+    return 0;
+}
+
+
+int 
+tm_hash_gc (struct v3_trans_mem * tm) 
+{
+    addr_t irqstate, irqstate2;
+    int rc = 0;
+    uint64_t begin_time, end_time, tmoff;
+    uint64_t * lt_copy;
+    struct v3_tm_state * tms = NULL;
+    struct hashtable_iter * ctxt_iter = NULL;
+
+    tms = (struct v3_tm_state *)v3_get_extension_state(tm->ginfo->vm_info, "trans_mem");
+    if (!tms) {
+        PrintError(tm->ginfo->vm_info, tm->ginfo,"++ %d : TM GC ++ could not alloc tms\n", tm->ginfo->vcpu_id);
+        return -1;
+    }
+
+    PrintDebug(tm->ginfo->vm_info, tm->ginfo,"-- TM GC -- beginning garbage collection\n");
+    PrintDebug(tm->ginfo->vm_info, tm->ginfo,"\t %d entries in addr_ctxt (pre)\n", (int)v3_htable_count(tm->addr_ctxt));
+    PrintDebug(tm->ginfo->vm_info, tm->ginfo,"\t %d entries in access_type (pre)\n", (int)v3_htable_count(tm->access_type));
+
+    tmoff = (tms->cores_active == 0);
+
+    lt_copy = V3_Malloc(sizeof(uint64_t)*(tm->ginfo->vm_info->num_cores));
+    if (!lt_copy) {
+        PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM GC ++ Could not allocate space for lt_copy\n");
+        return -1;
+    }
+
+    memset(lt_copy, 0, sizeof(uint64_t)*(tm->ginfo->vm_info->num_cores));
+
+    rdtscll(begin_time);
+
+    /* lt_copy holds the last transaction number for each core */
+    irqstate = v3_lock_irqsave(tm_global_state->lock);
+    memcpy(lt_copy, tm_global_state->last_trans, sizeof(uint64_t)*(tm->ginfo->vm_info->num_cores));
+    v3_unlock_irqrestore(tm_global_state->lock, irqstate);
+
+    /* lock both hashes */
+    irqstate = v3_lock_irqsave(tm->addr_ctxt_lock);
+    irqstate2 = v3_lock_irqsave(tm->access_type_lock);
+
+    /* loop over hash entries in addr_ctxt */
+    ctxt_iter = v3_create_htable_iter(tm->addr_ctxt);
+    if (!ctxt_iter) {
+        PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM GC ++ could not create htable iterator\n");
+        rc = -1;
+        goto out;
+    }
+
+    /* we check each address stored in the hash */
+    while (ctxt_iter->entry) {
+        /* NOTE: this call advances the hash iterator */
+        if (tm_collect_all_contexts(tm, ctxt_iter, tmoff, lt_copy, &begin_time, &end_time) == -1) {
+            rc = -1;
+            goto out1;
+        }
+    }
+
+out1:
+    v3_destroy_htable_iter(ctxt_iter);
+out:
+    V3_Free(lt_copy);
+    v3_unlock_irqrestore(tm->access_type_lock, irqstate);
+    v3_unlock_irqrestore(tm->addr_ctxt_lock, irqstate2);
+
+    rdtscll(end_time);
+
+    if (rc == -1) {
+        PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM GC ++ garbage collection failed, time spent: %d cycles\n", (int)(end_time - begin_time));
+    } else {
+        PrintDebug(tm->ginfo->vm_info, tm->ginfo,"++ TM GC ++ ended garbage collection succesfuly, time spent: %d cycles\n", (int)(end_time - begin_time));
+    }
+
+    PrintDebug(tm->ginfo->vm_info, tm->ginfo,"\t %d entries in addr_ctxt (post)\n", (int)v3_htable_count(tm->addr_ctxt));
+    PrintDebug(tm->ginfo->vm_info, tm->ginfo,"\t %d entries in access_type (post)\n", (int)v3_htable_count(tm->access_type));
+
+    return rc;
+}
+    
+
+/* TODO: break out the for loops in these functions */
+static int
+tm_update_ctxt_list (struct v3_trans_mem * tm, 
+                     uint64_t * lt_copy,
+                     addr_t gva,
+                     uint8_t write,
+                     struct list_head * hash_list)
+{
+    struct hash_chain * curr = NULL;
+    struct hash_chain * tmp  = NULL;
+    uint64_t num_cores = tm->ginfo->vm_info->num_cores;
+    uint64_t core_id;
+    uint_t new_le = 1;
+    uint_t new_e;
+
+    list_for_each_entry_safe(curr, tmp, hash_list, lt_node) {
+        uint_t i;
+        uint8_t same = 1;
+
+        for (i = 0; i < num_cores; i++) {
+            if (curr->curr_lt[i] != lt_copy[i]) {
+                same = 0;
+                break;
+            }
+        }
+
+        if (same) {
+            new_le = 0;
+            break;
+        }
+
+    }
+
+    if (new_le) {
+        struct hash_chain * new_l = V3_Malloc(sizeof(struct hash_chain));
+
+        if (!new_l) {
+            PrintError(tm->ginfo->vm_info, tm->ginfo,"++ %d : TM HASH ++ Could not allocate new list\n", tm->ginfo->vcpu_id);
+            return -1;
+        }
+
+        memset(new_l, 0, sizeof(struct hash_chain));
+
+        new_l->curr_lt = lt_copy;
+
+        list_add_tail(&(new_l->lt_node), hash_list);
+    }
+
+    for (core_id = 0; core_id < num_cores; core_id++) {
+        struct v3_tm_access_type * type;
+        struct v3_ctxt_tuple tup;
+        tup.gva     = (void*)gva;
+        tup.core_id = (void*)core_id;
+        tup.core_lt = (void*)lt_copy[core_id];
+        addr_t key;
+
+        key = v3_hash_buffer((uchar_t*)&tup, sizeof(struct v3_ctxt_tuple));
+
+        new_e = 0;
+
+        type = (struct v3_tm_access_type *)HTABLE_SEARCH(tm->access_type, key);
+
+        if (!type) {
+            // no entry yet
+            new_e = 1;
+            type = V3_Malloc(sizeof(struct v3_tm_access_type));
+
+            if (!type) {
+                PrintError(tm->ginfo->vm_info, tm->ginfo,"could not allocate type access struct\n");
+                return -1;
+            }
+        }
+
+        if (write) {
+            type->w = 1;
+        } else {
+            type->r = 1;
+        }
+
+        if (new_e) {
+            if (HTABLE_INSERT(tm->access_type, key, type) == 0) {
+                PrintError(tm->ginfo->vm_info, tm->ginfo,"TM: problem inserting new mem access in htable\n");
+                return -1;
+            }
+            (tm->access_type_entries)++;
+        }
+    }
+
+    return 0;
+}
+
+
+/* no entry in addr-ctxt yet, create one */
+static int
+tm_create_ctxt_key (struct v3_trans_mem * tm,
+                    uint64_t * lt_copy,
+                    addr_t gva,
+                    uint8_t write)
+{
+    struct list_head * hash_list = NULL;
+    struct hash_chain * new_l = NULL;
+    uint64_t num_cores = tm->ginfo->vm_info->num_cores;
+
+    hash_list = (struct list_head *)V3_Malloc(sizeof(struct list_head));
+
+    if (!hash_list) {
+        PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM HASH ++ Problem allocating hash_list\n");
+        return -1;
+    }
+
+    INIT_LIST_HEAD(hash_list);
+
+    new_l = V3_Malloc(sizeof(struct hash_chain));
+
+    if (!new_l) {
+        PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM HASH ++ Problem allocating hash_chain\n");
+        goto out_err;
+    }
+
+    memset(new_l, 0, sizeof(struct hash_chain));
+
+    new_l->curr_lt = lt_copy;
+
+    /* add the context to the hash chain */
+    list_add_tail(&(new_l->lt_node), hash_list);
+
+    if (!(HTABLE_INSERT(tm->addr_ctxt, gva, hash_list))) {
+        PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM HASH CHAIN ++ problem inserting new chain into hash\n");
+        goto out_err1;
+    }
+
+    (tm->addr_ctxt_entries)++;
+
+    uint64_t core_id;
+    /* TODO: we need a way to unwind and deallocate for all cores on failure here */
+    for (core_id = 0; core_id < num_cores; core_id++) {
+        struct v3_tm_access_type * type = NULL;
+        struct v3_ctxt_tuple tup;
+        tup.gva     = (void*)gva;
+        tup.core_id = (void*)core_id;
+        tup.core_lt = (void*)lt_copy[core_id];
+        addr_t key;
+
+        type = V3_Malloc(sizeof(struct v3_tm_access_type));
+
+        if (!type) {
+            PrintError(tm->ginfo->vm_info, tm->ginfo,"could not allocate access type struct\n");
+            goto out_err1;
+        }
+
+        if (write) {
+            type->w = 1;
+        } else {
+            type->r = 1;
+        }
+
+        key = v3_hash_buffer((uchar_t*)&tup, sizeof(struct v3_ctxt_tuple));
+
+        if (HTABLE_INSERT(tm->access_type, key, type) == 0) {
+            PrintError(tm->ginfo->vm_info, tm->ginfo,"TM: problem inserting new mem access in htable\n");
+            goto out_err1;
+        }
+        (tm->access_type_entries)++;
+    }
+
+    return 0;
+
+out_err1:
+    list_del(&(new_l->lt_node));
+    V3_Free(new_l);
+out_err:
+    V3_Free(hash_list);
+    return -1;
+}
+
+
+/* entry points:
+ *
+ * called during MIME execution
+ * record memory access in conflict logs
+ *   this locks the table during insertion
+ */
+int 
+tm_record_access (struct  v3_trans_mem * tm, 
+                  uint8_t write,
+                  addr_t  gva) 
+{
+    uint64_t * lt_copy;
+    struct list_head * hash_list;
+    addr_t irqstate;
+    uint64_t num_cores;
+
+    num_cores = tm->ginfo->vm_info->num_cores;
+
+    PrintDebug(tm->ginfo->vm_info, tm->ginfo,"-- TM REC -- recording addr %llx, addr-ctxt.cnt = %d, access-type.cnt = %d\n", (uint64_t)gva,
+                                        (int)v3_htable_count(tm->addr_ctxt), (int)v3_htable_count(tm->access_type));
+    PrintDebug(tm->ginfo->vm_info, tm->ginfo,"\tWe think that addr-ctxt.cnt = %d, access-type.cnt = %d\n",(int)tm->addr_ctxt_entries,(int)tm->access_type_entries);
+
+    lt_copy = V3_Malloc(sizeof(uint64_t)*num_cores);
+    if (!lt_copy) {
+        PrintError(tm->ginfo->vm_info, tm->ginfo,"Allocating array failed\n");
+        return -1;
+    }
+
+    memset(lt_copy, 0, sizeof(uint64_t)*num_cores);
+
+    irqstate = v3_lock_irqsave(tm_global_state->lock);
+    memcpy(lt_copy, tm_global_state->last_trans, sizeof(uint64_t)*num_cores);
+    v3_unlock_irqrestore(tm_global_state->lock, irqstate);
+
+    if (!(hash_list = (struct list_head *)HTABLE_SEARCH(tm->addr_ctxt, gva))) {
+        /* we haven't created a context list for this address yet, go do it */
+        return tm_create_ctxt_key(tm, lt_copy, gva, write);
+
+    } else {
+        /* we have a context list for this addres already, do we need to create a new context? */
+        return tm_update_ctxt_list(tm, lt_copy, gva, write, hash_list);
+    }
+
+    return 0;
+}
+
+
+static int 
+init_trans_mem (struct v3_vm_info * vm, 
+                v3_cfg_tree_t * cfg, 
+                void ** priv_data) 
+{
+    struct v3_tm_state * tms; 
+     
+    PrintDebug(vm, VCORE_NONE, "Trans Mem. Init\n");
+
+    tms = V3_Malloc(sizeof(struct v3_tm_state));
+    if (!tms) {
+        PrintError(vm, VCORE_NONE, "Problem allocating v3_tm_state\n");
+        return -1;
+    }
+
+    memset(tms, 0, sizeof(struct v3_tm_state));
+
+    if (v3_register_hypercall(vm, TM_KICKBACK_CALL, tm_handle_hcall, NULL) == -1) {
+      PrintError(vm, VCORE_NONE, "TM could not register hypercall\n");
+      goto out_err;
+    }
+
+    v3_lock_init(&(tms->lock));
+
+    tms->TM_MODE      = TM_OFF;
+    tms->cores_active = 0;
+
+    uint64_t * lt = V3_Malloc(sizeof(uint64_t) * vm->num_cores);
+    if (!lt) {
+        PrintError(vm, VCORE_NONE, "Problem allocating last_trans array\n");
+        goto out_err1;
+    }
+
+    memset(lt, 0, sizeof(uint64_t) * vm->num_cores);
+
+    int i;
+    for (i = 0; i < vm->num_cores; i++) {
+        lt[i] = 0;
+    }
+
+    tms->last_trans = lt;
+
+    *priv_data = tms;
+    tm_global_state = tms;
+
+    return 0;
+
+out_err1:
+    v3_lock_deinit(&(tms->lock));
+    v3_remove_hypercall(vm, TM_KICKBACK_CALL);
+out_err:
+    V3_Free(tms);
+    return -1;
+}
+
+
+static int 
+init_trans_mem_core (struct guest_info * core, 
+                     void * priv_data, 
+                     void ** core_data) 
+{
+    struct v3_trans_mem * tm = V3_Malloc(sizeof(struct v3_trans_mem));
+    PrintDebug(core->vm_info, core, "Trans Mem. Core Init\n");
+
+    if (!tm) {
+        PrintError(core->vm_info, core, "Problem allocating TM state\n");
+        return -1;
+    }
+
+    memset(tm, 0, sizeof(struct v3_trans_mem));
+
+    INIT_LIST_HEAD(&tm->trans_r_list);
+    INIT_LIST_HEAD(&tm->trans_w_list);
+
+    tm->addr_ctxt  = v3_create_htable(0, tm_hash_fn, tm_eq_fn);
+    if (!(tm->addr_ctxt)) {
+        PrintError(core->vm_info, core,"++ TM INIT ++ problem creating addr_ctxt\n");
+        goto out_err;
+    }
+
+    tm->access_type = v3_create_htable(0, tm_hash_buf_fn, tm_eq_buf_fn);
+    if (!(tm->access_type)) {
+        PrintError(core->vm_info, core,"++ TM INIT ++ problem creating access_type\n");
+        goto out_err1;
+    }
+    
+    v3_lock_init(&(tm->addr_ctxt_lock));
+    v3_lock_init(&(tm->access_type_lock));
+
+    tm->TM_STATE = TM_NULL;
+    tm->TM_MODE  = TM_OFF;
+    tm->TM_ABORT = 0;
+    tm->ginfo    = core;
+    tm->t_num = 0;
+    tm->to_branch = 0;
+    tm->offset = 0;
+    tm->access_type_entries = 0;
+    tm->addr_ctxt_entries = 0;
+    tm->dirty_instr_flag = 0;
+
+    /* TODO: Cache Model */
+    //tm->box = (struct cache_box *)V3_Malloc(sizeof(struct cache_box *));
+    //tm->box->init = init_cache;
+    //tm->box->init(sample_spec, tm->box);
+
+    *core_data = tm;
+
+    return 0;
+
+out_err1:
+    v3_free_htable(tm->addr_ctxt, 0, 0);
+out_err:
+    V3_Free(tm);
+    return -1;
+}
+
+
+static int 
+deinit_trans_mem (struct v3_vm_info * vm, void * priv_data) 
+{
+    struct v3_tm_state * tms = (struct v3_tm_state *)priv_data;
+
+    if (v3_remove_hypercall(vm, TM_KICKBACK_CALL) == -1) {
+        PrintError(vm, VCORE_NONE, "Problem removing TM hypercall\n");
+        return -1;
+    }
+
+    v3_lock_deinit(&(tms->lock));
+
+    if (tms) {
+        V3_Free(tms);
+    }
+
+    return 0;
+}
+
+
+static int 
+deinit_trans_mem_core (struct guest_info * core, 
+                       void * priv_data, 
+                       void * core_data) 
+{
+    struct v3_trans_mem * tm = (struct v3_trans_mem *)core_data;
+    struct hashtable_iter * ctxt_iter = NULL;
+
+    v3_clear_tm_lists(tm);
+
+    if (tm->staging_page) {
+        PrintError(core->vm_info, core,"++ TM DEINIT CORE ++ WARNING: staging page not freed!\n");
+    }
+
+    ctxt_iter = v3_create_htable_iter(tm->addr_ctxt);
+    if (!ctxt_iter) {
+        PrintError(core->vm_info, core, "++ TM DEINIT CORE ++ could not create htable iterator\n");
+        return -1;
+    }
+
+    /* delete all context entries for each hashed address */
+    while (ctxt_iter->entry) {
+        struct hash_chain * tmp;
+        struct hash_chain * curr;
+        struct list_head * chain_list;
+        addr_t gva;
+
+        gva = (addr_t)v3_htable_get_iter_key(ctxt_iter);
+        chain_list = (struct list_head *)v3_htable_get_iter_value(ctxt_iter);
+
+        /* delete the context */
+        list_for_each_entry_safe(curr, tmp, chain_list, lt_node) {
+            tm_del_stale_ctxt(curr);
+        }
+
+        v3_htable_iter_advance(ctxt_iter);
+    }
+
+    v3_destroy_htable_iter(ctxt_iter);
+
+    /* we've already deleted the values in this one */
+    v3_free_htable(tm->addr_ctxt, 0, 0);
+
+    /* KCH WARNING: we may not want to free access type values here */
+    v3_free_htable(tm->access_type, 1, 0);
+
+    v3_lock_deinit(&(tm->addr_ctxt_lock));
+    v3_lock_deinit(&(tm->access_type_lock));
+
+    if (tm) {
+        V3_Free(tm);
+    }
+
+    return 0;
+}
+
+
+static struct v3_extension_impl trans_mem_impl = {
+    .name = "trans_mem",
+    .init = NULL,
+    .vm_init = init_trans_mem,
+    .vm_deinit = deinit_trans_mem,
+    .core_init = init_trans_mem_core,
+    .core_deinit = deinit_trans_mem_core,
+    .on_entry = NULL,
+    .on_exit = NULL
+};
+
+register_extension(&trans_mem_impl);
+
+
+/* entry conditions
+ * tms->on => commit our list, free sp, clear our lists, clr_tm will handle global state, then gc
+ * tms->off => commit our list, free sp, clear our lists, clr_tm will handle global state, then gc
+ */
+static int 
+tm_handle_xend (struct guest_info * core,
+                struct v3_trans_mem * tm)
+{
+    rdtscll(tm->exit_time);
+
+    // Error checking! make sure that we have gotten here in a legitimate manner
+    if (tm->TM_MODE != TM_ON) {
+        TM_ERR(core, UD, "Encountered XEND while not in a transactional region\n");
+        v3_free_staging_page(tm);
+        v3_clr_vtlb(core);
+        v3_clear_tm_lists(tm);
+        v3_raise_exception(core, UD_EXCEPTION);
+        return 0;
+    }
+
+    /* Our transaction finished! */
+    /* Copy over data from the staging page */
+    TM_DBG(core, UD,"Copying data from our staging page back into 'real' memory\n");
+
+    if (commit_list(core, tm) == -1) {
+        TM_ERR(core,UD,"error commiting tm list to memory\n");
+        return -1;
+    }
+
+    TM_DBG(core,UD,"Freeing staging page and internal data structures\n");
+
+    // Free the staging page
+    if (v3_free_staging_page(tm) == -1) {
+        TM_ERR(core,XEND,"couldnt free staging page\n");
+        return -1;
+    }
+
+    // clear vtlb, as it may still contain our staging page
+    if (v3_clr_vtlb(core) == -1) {
+        TM_ERR(core,XEND,"couldnt clear vtlb\n");
+        return -1;
+    }
+
+    // Clear the lists
+    v3_clear_tm_lists(tm);
+
+    /* Set the state and advance the RIP */
+    TM_DBG(core,XEND,"advancing rip to %llx\n", core->rip + XEND_INSTR_LEN);
+    core->rip += XEND_INSTR_LEN; 
+
+    v3_clr_tm(tm);
+
+    // time to garbage collect
+    v3_tm_inc_tnum(tm);
+    if (tm_hash_gc(tm) == -1) {
+        TM_ERR(core,XEND,"could not gc!\n");
+        return -1;
+    }
+
+    return 0;
+}
+
+
+/* entry conditions
+ * tms->on => handle our abort code, handle_trans_abort will clear necessary state
+ * tms->off => handle our abort code, handle_trans_abort will clear necessary state
+ */
+static int
+tm_handle_xabort (struct guest_info * core,
+                  struct v3_trans_mem * tm)
+{
+        /* TODO: this probably needs to move somewhere else */
+        rdtscll(tm->exit_time);
+
+        // Error checking! make sure that we have gotten here in a legitimate manner
+        if (tm->TM_MODE != TM_ON) {
+            TM_DBG(core, UD, "We got here while not in a transactional core!\n");
+            v3_raise_exception(core, UD_EXCEPTION);
+        }
+
+        TM_DBG(core,UD,"aborting\n");
+
+        if (tm->TM_STATE != TM_NULL) {
+            v3_restore_dirty_instr(core);
+        }
+
+        // Handle the exit
+        v3_handle_trans_abort(core);
+
+        return 0;
+}
+
+
+/* entry conditions
+ * tms->on => we set up our running env, set_tm will clear other vtlb's to start single stepping
+ * tms->off => we set up our running env, set_tm will not clear anyone elses vtlb
+ */
+static int
+tm_handle_xbegin (struct guest_info * core,
+                  struct v3_trans_mem * tm,
+                  uchar_t * instr)
+{
+    sint32_t rel_addr = 0;
+
+    if (tm->TM_MODE == TM_ON) {
+        TM_ERR(core,UD,"We got here while already in a transactional region!");
+        v3_raise_exception(core, UD_EXCEPTION);
+    }
+
+    rdtscll(tm->entry_time);
+    tm->entry_exits = core->num_exits;
+
+    /* set the tm_mode for this core */
+    v3_set_tm(tm);
+
+    TM_DBG(core,UD,"Set the system in TM Mode, save fallback address");
+
+    // Save the fail_call address (first 2 bytes = opcode, last 4 = fail call addr)
+    rel_addr = *(sint32_t*)(instr+2);
+    tm->fail_call = core->rip + XBEGIN_INSTR_LEN + rel_addr;
+
+    TM_DBG(core,UD,"we set fail_call to %llx, rip is %llx, rel_addr is %x", (uint64_t)tm->fail_call,(uint64_t)core->rip,rel_addr);
+
+    /* flush the shadow page tables */
+    TM_DBG(core,UD,"Throwing out the shadow table");
+    v3_clr_vtlb(core);
+
+    // Increase RIP, ready to go to next instruction
+    core->rip += XBEGIN_INSTR_LEN; 
+
+    return 0;
+}
+
+
+/* entry conditions
+ * tms->on => we set up our running env, set_tm will clear other vtlb's to start single stepping
+ * tms->off => we set up our running env, set_tm will not clear anyone elses vtlb
+ */
+static int
+tm_handle_xtest (struct guest_info * core,
+                 struct v3_trans_mem * tm)
+{
+    // if we are in tm mode, set zf to 0, otherwise 1
+    if (tm->TM_MODE == TM_ON) {
+        core->ctrl_regs.rflags &= ~(1ULL << 6);
+    } else {
+        core->ctrl_regs.rflags |= (1ULL << 6);
+    }
+
+    core->rip += XTEST_INSTR_LEN;
+
+    return 0;
+}
+
+
+/* instructions:
+ * XBEGIN c7 f8 rel32
+ * XABORT c6 f8 imm8
+ * XEND   0f 01 d5
+ */
+static int 
+tm_handle_ud (struct guest_info * core) 
+{
+    struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(core, "trans_mem");
+    uchar_t instr[INSTR_BUF_SZ];
+    uint8_t byte1, byte2, byte3;
+
+    tm_read_instr(core, (addr_t)core->rip, instr, INSTR_BUF_SZ);
+
+    byte1 = *(uint8_t *)((addr_t)instr);
+    byte2 = *(uint8_t *)((addr_t)instr + 1);
+    byte3 = *(uint8_t *)((addr_t)instr + 2);
+
+
+    if (byte1 == 0xc7 && byte2 == 0xf8) {  /* third byte is an immediate */
+
+        TM_DBG(core,UD,"Encountered Haswell-specific XBEGIN %x %x %d at %llx", byte1, byte2, byte3, (uint64_t)core->rip);
+
+        if (tm_handle_xbegin(core, tm, instr) == -1) {
+            TM_ERR(core, UD, "Problem handling XBEGIN\n");
+            return -1;
+        }
+
+    } else if (byte1 == 0xc6 && byte2 == 0xf8) { /* third byte is an immediate */
+
+        TM_DBG(core, UD, "Encountered Haswell-specific XABORT %x %x %d at %llx\n", byte1, byte2, byte3, (uint64_t)core->rip);
+
+        if (tm_handle_xabort(core, tm) == -1) {
+            TM_ERR(core, UD, "Problem handling XABORT\n");
+            return -1;
+        }
+
+    } else if (byte1 == 0x0f && byte2 == 0x01 && byte3 == 0xd5) {
+
+        TM_DBG(core, UD, "Encountered Haswell-specific XEND %x %x %d at %llx\n", byte1, byte2, byte3, (uint64_t)core->rip);
+
+        if (tm_handle_xend(core, tm) == -1) {
+            TM_ERR(core, UD, "Problem handling XEND\n");
+            return -1;
+        }
+
+
+    } else if (byte1 == 0x0f && byte2 == 0x01 && byte3 == 0xd6) {  /* third byte is an immediate */
+
+        TM_DBG(core,UD,"Encountered Haswell-specific XTEST %x %x %x at %llx\n", byte1, byte2, byte3, (uint64_t)core->rip);
+
+        if (tm_handle_xtest(core, tm) == -1) {
+            TM_ERR(core, UD, "Problem handling XTEST\n");
+            return -1;
+        }
+
+    } else {
+
+        /* oh no, this is still unknown, pass the error back to the guest! */
+        TM_DBG(core,UD,"Encountered:%x %x %x\n", byte1, byte2, byte3);
+        v3_raise_exception(core, UD_EXCEPTION);
+    }
+
+    return 0;
+}
+
+
+int 
+v3_tm_handle_exception (struct guest_info * info,
+                        addr_t exit_code)
+{
+    struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(info, "trans_mem");
+
+    if (!tm) {
+        PrintError(info->vm_info, info, "++ TM ERR ++ TM extension state not found\n");
+        return -1;
+    } 
+
+    switch (exit_code) {
+        /* any of these exceptions should abort current transactions */
+        case SVM_EXIT_EXCP6:
+            if (tm_handle_ud(info) == -1) {
+                return -1;
+            }
+            break;
+        case SVM_EXIT_EXCP0:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, DE_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to DE exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+        case SVM_EXIT_EXCP1:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, DB_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to DB exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+        case SVM_EXIT_EXCP3:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, BP_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to BP exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+        case SVM_EXIT_EXCP4:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, OF_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to OF exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+        case SVM_EXIT_EXCP5:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, BR_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to BR exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+        case SVM_EXIT_EXCP7:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, NM_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to NM exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+        case SVM_EXIT_EXCP10:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, TS_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to TS exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+        case SVM_EXIT_EXCP11:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, NP_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to NP exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+        case SVM_EXIT_EXCP12:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, SS_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to SS exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+        case SVM_EXIT_EXCP13:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, GPF_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to GPF exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+        case SVM_EXIT_EXCP16:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, MF_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to MF exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+        case SVM_EXIT_EXCP17:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, AC_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to AC exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+        case SVM_EXIT_EXCP19:
+            if (tm->TM_MODE != TM_ON) {
+                v3_raise_exception(info, XF_EXCEPTION);
+            }
+            else {
+                PrintDebug(info->vm_info, info,"-- TM EXCP -- aborting due to XF exception\n");
+                v3_handle_trans_abort(info);
+            }
+            break;
+
+            PrintDebug(info->vm_info, info,"-- TM EXCP -- exception # %d\n", (int)exit_code - 0x40);
+    }
+
+    return 0;
+}
+
+
+void 
+v3_tm_set_excp_intercepts (vmcb_ctrl_t * ctrl_area) 
+{
+    ctrl_area->exceptions.de = 1; // 0  : divide by zero
+    ctrl_area->exceptions.db = 1; // 1  : debug
+    ctrl_area->exceptions.bp = 1; // 3  : breakpoint
+    ctrl_area->exceptions.of = 1; // 4  : overflow
+    ctrl_area->exceptions.br = 1; // 5  : bound range
+    ctrl_area->exceptions.ud = 1; // 6  : undefined opcode
+    ctrl_area->exceptions.nm = 1; // 7  : device not available
+    ctrl_area->exceptions.ts = 1; // 10 : invalid tss
+    ctrl_area->exceptions.np = 1; // 11 : segment not present
+    ctrl_area->exceptions.ss = 1; // 12 : stack
+    ctrl_area->exceptions.gp = 1; // 13 : general protection
+    ctrl_area->exceptions.mf = 1; // 16 : x87 exception pending
+    ctrl_area->exceptions.ac = 1; // 17 : alignment check
+    ctrl_area->exceptions.xf = 1; // 19 : simd floating point
+}
+
+
+extern void v3_stgi();
+extern void v3_clgi();
+
+/* 441-tm: if we are in TM mode, we need to check for any interrupts here,
+ * and if there are any, need to do some aborting! Make sure not to die here
+ * if we are already 'aborting', this results in infiloop
+ */
+void 
+v3_tm_check_intr_state (struct guest_info * info, 
+                        vmcb_ctrl_t * guest_ctrl,
+                        vmcb_saved_state_t * guest_state)
+                        
+{
+    struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(info, "trans_mem");
+
+    if (!tm) {
+        PrintError(info->vm_info, info,"++ : SVM ++ TM extension state not found\n");
+        v3_stgi();
+        return;
+    }
+
+    /* TODO: work this in */
+    if (0 && (tm->TM_MODE == TM_ON) && 
+             (tm->TM_ABORT != 1)) {
+
+        if (guest_ctrl->guest_ctrl.V_IRQ ||
+            guest_ctrl->EVENTINJ.valid) {
+
+            rdtscll(tm->exit_time);
+            PrintDebug(info->vm_info, info,"+=+ TM INTR DEBUG +=+ %lld exits happened, time delta is %lld",(info->num_exits - tm->entry_exits),(tm->entry_time - tm->exit_time));
+
+            // We do indeed have pending interrupts
+            v3_stgi();
+            PrintDebug(info->vm_info, info,"-- ITR -- we have a pending interrupt!\n");
+
+            v3_handle_trans_abort(info);
+            // Copy new RIP state into arch dependent structure
+            guest_state->rip = info->rip;
+            PrintDebug(info->vm_info, info,"currently guest state rip is %llx\n",(uint64_t)guest_state->rip);
+            v3_clgi();
+        }
+
+    }
+
+}
+
+
+int
+v3_tm_handle_pf_64 (struct guest_info * info,
+                    pf_error_t error_code,
+                    addr_t fault_addr,
+                    addr_t * page_to_use)
+{
+    struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(info, "trans_mem");
+    struct v3_tm_state * tms = (struct v3_tm_state *)v3_get_extension_state(info->vm_info, "trans_mem");
+
+    if (!tm) {
+        PrintError(info->vm_info, info, "+++ TM +++ ERROR: couldn't get tm core state\n");
+        return -1;
+    }
+
+    if (!tms) {
+        PrintError(info->vm_info, info, "+++ TM +++ ERROR: couldn't get tm global state\n");
+        return -1;
+    }
+
+    if ((tms->TM_MODE == TM_ON) && 
+            (error_code.user == 1)) {
+
+        PrintDebug(info->vm_info, info,"++ TM #PF ++ Core reporting in, got a #PF (tms->mode is %d)\n", tms->TM_MODE);
+
+        *page_to_use = v3_handle_trans_mem_fault(info, fault_addr,  error_code);
+
+        if (*page_to_use == ERR_TRANS_FAULT_FAIL){
+            PrintError(info->vm_info, info, "could not handle transaction page fault\n");
+            return -1;
+        }
+
+        if ((tm->TM_MODE == TM_ON) && 
+                (tm->staging_page == NULL)) {
+
+            tm->staging_page = V3_AllocPages(1);
+
+            if (!(tm->staging_page)) {
+                PrintError(info->vm_info, info,"++ TM MMU ++ Problem allocating staging page\n");
+                return -1;
+            }
+
+            PrintDebug(info->vm_info, info,"-- TM MMU -- Created staging page at %p\n", (void *)tm->staging_page);
+        }
+    }
+
+    return 0;
+}
+
+
+void 
+v3_tm_handle_usr_tlb_miss (struct guest_info * info,
+                           pf_error_t error_code,
+                           addr_t page_to_use,
+                           addr_t * shadow_pa)
+{
+    struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(info, "trans_mem");
+
+    /* TLB miss from user */
+    if ((tm->TM_MODE == TM_ON) && 
+            (error_code.user == 1)) {
+
+        if (page_to_use > TRANS_FAULT_OK) {
+            PrintDebug(info->vm_info, info, "-- TM MMU -- Using alternate page at: %llx\n", (uint64_t)page_to_use);
+            *shadow_pa = page_to_use;
+        }
+
+    }
+
+}
+
+
+void
+v3_tm_handle_read_fault (struct guest_info * info,
+                         pf_error_t error_code,
+                         pte64_t * shadow_pte)
+{
+    struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(info, "trans_mem");
+    struct v3_tm_state * tms = (struct v3_tm_state *)v3_get_extension_state(info->vm_info, "trans_mem");
+
+    // If we are about to read, make it read only 
+    if ((tms->TM_MODE == TM_ON) && 
+        (tm->TM_STATE == TM_EXEC) && 
+        (error_code.write == 0) && 
+        (error_code.user == 1)) {
+
+        PrintDebug(info->vm_info, info, "-- TM MMU -- Flagging the page read only\n");
+        shadow_pte->writable = 0;
+    }
+}
+
+
+int 
+v3_tm_decode_rtm_instrs (struct guest_info * info,
+                         addr_t instr_ptr,
+                         struct x86_instr * instr)
+{
+    uint8_t byte1, byte2, byte3;
+    struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(info, "trans_mem");
+
+    if (tm->TM_MODE == TM_ON) {
+
+        byte1 = *(uint8_t *)(instr_ptr);
+        byte2 = *(uint8_t *)(instr_ptr + 1);
+        byte3 = *(uint8_t *)(instr_ptr + 2);
+
+        if (byte1 == 0xc7 && 
+            byte2 == 0xf8) {  /* third byte is an immediate */
+
+            TM_DBG(info, DECODE,"Decoding XBEGIN %x %x %d\n", byte1, byte2, byte3);
+            instr->instr_length = 6;
+            return 0;
+
+        } else if (byte1 == 0xc6 && 
+                   byte2 == 0xf8) { /* third byte is an immediate */
+
+            TM_DBG(info, DECODE, "Decoding XABORT %x %x %d\n", byte1, byte2, byte3);
+            instr->instr_length = 3;
+            return 0;
+
+        } else if (byte1 == 0x0f && 
+                   byte2 == 0x01 && 
+                   byte3 == 0xd5) {
+
+            TM_DBG(info, DECODE, "Decoding XEND %x %x %x\n", byte1, byte2, byte3);
+            instr->instr_length = 3;
+            return 0;
+
+        }
+
+    }
+
+    return 0;
+}
+
diff --git a/palacios/src/extensions/tm_util.c b/palacios/src/extensions/tm_util.c
new file mode 100644 (file)
index 0000000..2401441
--- /dev/null
@@ -0,0 +1,320 @@
+/* 
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National 
+ * Science Foundation and the Department of Energy.  
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico.  You can find out more at 
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
+ * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
+ * All rights reserved.
+ *
+ * Author:  Maciek Swiech <dotpyfe@u.northwestern.edu>
+ *          Marcel Flores <marcel-flores@u.northwestern.edu>
+ *          Zachary Bischof <zbischof@u.northwestern.edu>
+ *          Kyle C. Hale <kh@u.northwestern.edu>
+ *
+ * This is free software.  You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#include <palacios/vmm_mem.h>
+#include <palacios/vmm.h>
+#include <palacios/vmcb.h>
+#include <palacios/vmm_decoder.h>
+#include <palacios/vm_guest_mem.h>
+#include <palacios/vmm_ctrl_regs.h>
+#include <palacios/vmm_direct_paging.h>
+#include <palacios/svm.h>
+#include <palacios/vmm_excp.h>
+#include <palacios/vmm_list.h>
+#include <palacios/vmm_hashtable.h>
+
+#include <extensions/trans_mem.h>
+#include <extensions/tm_util.h>
+
+extern void v3_stgi();
+extern void v3_clgi();
+
+#if !V3_CONFIG_DEBUG_TM_FUNC
+#undef PrintDebug
+#define PrintDebug(fmt, args...)
+#endif
+
+/* TM Read/Write List data structure and API *********************************
+ */
+
+static void free_mem_op_list (struct list_head * list) {
+    struct mem_op * curr = NULL;
+    struct mem_op * tmp  = NULL;
+
+    list_for_each_entry_safe(curr, tmp, list, op_node) {
+        list_del(&(curr->op_node));
+        V3_Free(curr);
+    }
+}
+
+
+void v3_clear_tm_lists (struct v3_trans_mem * tm) {
+    free_mem_op_list(&(tm->trans_w_list));
+    free_mem_op_list(&(tm->trans_r_list));
+}
+
+
+int add_mem_op_to_list (struct list_head * list, addr_t guest_addr) {
+    struct mem_op * new;
+
+    new = list_contains_guest_addr(list, guest_addr);
+
+    if (new) {
+        new->current = 0;
+        return 0;
+    }
+
+    new = (struct mem_op *)V3_Malloc(sizeof(struct mem_op));
+    if (!new) {
+        return -1;
+    }
+
+    new->guest_addr = guest_addr;
+    new->current    = 0;
+
+    list_add_tail(&(new->op_node), list);
+
+    return 0;
+}
+
+
+struct mem_op * list_contains_guest_addr (struct list_head * list, addr_t guest_addr) {
+    struct mem_op * curr = NULL;
+    struct mem_op * tmp = NULL;
+
+    list_for_each_entry_safe(curr, tmp, list, op_node) {
+        if (curr->guest_addr == guest_addr) {
+            return curr;
+        }
+    }
+
+    return NULL;
+}
+
+
+
+
+int update_list(struct v3_trans_mem * tm, struct list_head * list) {
+    struct mem_op * curr = NULL;
+    struct mem_op * tmp  = NULL;
+    void * sp_loc;
+    addr_t v_sp_loc;
+
+    list_for_each_entry_safe(curr, tmp, list, op_node) {
+        if (!curr->current) {
+            /* we do not have the most current entry! grab it from the staging
+             * page
+             */
+            sp_loc = (void *)((addr_t)(tm->staging_page) + (curr->guest_addr % PAGE_SIZE));
+            if ((curr->guest_addr % PAGE_SIZE_4KB) > (PAGE_SIZE_4KB - 8)) {
+                PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM UDATE LIST ++ data ref spans page boundary\n");
+                return -1;
+            }
+
+            if (v3_hpa_to_hva((addr_t)(sp_loc), &v_sp_loc) == -1) {
+                PrintError(tm->ginfo->vm_info, tm->ginfo,"Could not convert address on staging page to virtual address\n");
+                return -1;
+            }
+
+            memcpy((void*)(&(curr->data)), (void*)v_sp_loc, sizeof(uint64_t));
+            curr->current = 1;
+        }
+    }
+
+    return 0;
+}
+
+
+int stage_entry (struct v3_trans_mem * tm, struct list_head * list, addr_t guest_addr) {
+    void * sp_loc;
+    addr_t v_sp_loc;
+    struct mem_op * curr = list_contains_guest_addr(list, guest_addr);
+
+    if (!curr) {
+        PrintDebug(tm->ginfo->vm_info, tm->ginfo,"tried to stage entry from addr %p that doesn't exist in this list\n", (void*)guest_addr);
+        return -1;
+    }
+
+    sp_loc = (void*)((addr_t)(tm->staging_page) + (guest_addr % PAGE_SIZE_4KB));
+
+    if ((curr->guest_addr % PAGE_SIZE_4KB) > (PAGE_SIZE_4KB - 8)) {
+        PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM UDATE LIST ++ data ref spans page boundary\n");
+        return -1;
+    }
+
+    if (v3_hpa_to_hva((addr_t)(sp_loc), &v_sp_loc) == -1) {
+        PrintError(tm->ginfo->vm_info, tm->ginfo,"Could not convert address on staging page to virt addr\n");
+        return -1;
+    }
+
+    /* write data back to the data page */
+    memcpy((void*)v_sp_loc,(void*)(&(curr->data)), sizeof(uint64_t));
+
+    /* mark entry as not current so we grab it back later */
+    curr->current = 0;
+    return 0;
+}
+
+
+int copy_add_entry(struct list_head * list, addr_t guest_addr, uint64_t data){
+    struct mem_op * new;
+
+    // Don't repeatedly add
+    new = list_contains_guest_addr(list, guest_addr);
+
+    if (new) {
+        new->current = 1;
+        new->data = data;
+    } else {
+        new = (struct mem_op*)V3_Malloc(sizeof(struct mem_op));
+
+        if (!new) {
+            return -1;
+        }
+
+        new->guest_addr = guest_addr;
+        new->current = 1;
+        new->data = data;
+        list_add_tail(&(new->op_node), list);
+    }
+    return 0;
+}
+
+
+int commit_list(struct guest_info * core, struct v3_trans_mem * tm) {
+    // We should not be interruptable here, needs to happen atomically
+    PrintDebug(core->vm_info, core,"-- TM COMMIT -- commiting data\n");
+    v3_clgi();
+
+    struct mem_op * curr = NULL;
+    struct mem_op * tmp  = NULL;
+
+    list_for_each_entry_safe(curr, tmp, &(tm->trans_w_list), op_node) {
+        addr_t v_ga_loc;
+        
+        if (v3_gva_to_hva(core, (addr_t)(curr->guest_addr), &v_ga_loc) == -1) {
+            PrintError(core->vm_info, core,"Could not translate gva to hva\n");
+            return -1;
+        }        
+
+        PrintDebug(core->vm_info, core,"\tValue being copied: %p\n", (void*)(curr->data));
+        memcpy((void*)v_ga_loc, (void*)(&(curr->data)) , sizeof(uint64_t));
+    }
+
+    v3_stgi();
+    return 0;
+}
+
+
+int v3_copy_lists(struct guest_info *core) {
+    PrintError(core->vm_info, core, "TM: unimplemented (%s)\n", __FUNCTION__);
+    return -1;
+}
+
+
+/* TM State functions ********************************************************
+ *
+ * int v3_set_tm(struct guest_info *core)
+ * int v3_clr_tm(struct guest_info *core)
+ * int v3_clr_vtlb(struct guest_info *core)
+ * int v3_tm_set_abrt(struct guest_info *core)
+ *
+ */
+
+int v3_set_tm (struct v3_trans_mem * tm) {
+    struct v3_tm_state * tms = (struct v3_tm_state *)v3_get_extension_state(tm->ginfo->vm_info, "trans_mem");
+    if (tm->TM_MODE == TM_ON) {
+        PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM SET ++ tried to set tm but it was already on\n");
+        return -1;
+    }
+
+    tm->TM_MODE = TM_ON;
+    tm->TM_STATE = TM_NULL;
+
+    addr_t flags;
+    enum TM_MODE_E sys_tm;
+    
+    flags = v3_lock_irqsave(tms->lock);
+    (tms->cores_active)++;
+    sys_tm = tms->TM_MODE;
+    v3_unlock_irqrestore(tms->lock, flags);
+
+    // need to flush everyone elses VTLB to get them to start single stepping IF THEY ARENT ALREADY
+
+    if (sys_tm == TM_OFF) {
+        int core_num;
+        for (core_num = 0; core_num < tm->ginfo->vm_info->num_cores; core_num++) {
+            if (core_num == tm->ginfo->vcpu_id) {
+                continue;
+            }
+
+            struct guest_info * r_core = &(tm->ginfo->vm_info->cores[core_num]);
+
+            // TODO: what if this happens at an inopportune time?
+            v3_clr_vtlb(r_core);
+        }
+    }
+    flags = v3_lock_irqsave(tms->lock);
+    tms->TM_MODE = TM_ON;
+    v3_unlock_irqrestore(tms->lock, flags);
+
+    return 0;
+}
+
+int v3_clr_tm (struct v3_trans_mem * tm) {
+    PrintDebug(tm->ginfo->vm_info, tm->ginfo,"++ CLR TM ++ clearing tm state\n");
+
+    struct v3_tm_state * tms = (struct v3_tm_state *)v3_get_extension_state(tm->ginfo->vm_info, "trans_mem");
+    tm->TM_MODE = TM_OFF;
+    tm->TM_STATE = TM_NULL;
+    tm->cur_instr_len = -1;
+
+    // last core to turn off?
+    addr_t flags;
+    int num_act;
+    
+    flags = v3_lock_irqsave(tms->lock);
+    num_act = --(tms->cores_active);
+    v3_unlock_irqrestore(tms->lock, flags);
+
+    if (num_act == 0) {
+        PrintDebug(tm->ginfo->vm_info, tm->ginfo,"++ CLR TM ++ we are the last tm->ginfo in TM, turn off system state\n");
+        tms->TM_MODE = TM_OFF;
+    }
+    return 1;
+}
+
+int v3_clr_vtlb (struct guest_info * core) {
+    PrintDebug(core->vm_info, core,"++ TM VTLB ++ flushing core %d's VTLB\n", core->vcpu_id);
+    v3_invalidate_shadow_pts(core);
+    return 0;
+}
+
+/*
+int v3_tm_set_abrt(struct v3_trans_mem * tm) {
+    tm->TM_STATE = TM_ABORT;
+    return 0;
+}
+*/
+
+/* TM extra ******************************************************************
+ */
+
+int v3_free_staging_page(struct v3_trans_mem * tm) {
+    if (!(tm->staging_page)) {
+        PrintDebug(tm->ginfo->vm_info, tm->ginfo,"++ %d : TM FREE ++ tried to dealloc null staging page\n", tm->ginfo->vcpu_id);
+        return 0;
+    }
+    V3_FreePages(tm->staging_page, 1);
+    tm->staging_page = NULL; 
+    return 0;
+}
index 76d22f9..0d809af 100644 (file)
@@ -17,6 +17,9 @@
  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
  */
 
+#ifdef V3_CONFIG_TM_FUNC
+#include <extensions/trans_mem.h>
+#endif
 
 static inline int activate_shadow_pt_64(struct guest_info * info) {
     struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
@@ -465,11 +468,17 @@ static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault
        return 0;
     }
 
-
     if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
        // Page Table Entry Not Present
        PrintDebug(info->vm_info, info, "guest_pa =%p\n", (void *)guest_pa);
 
+#ifdef V3_CONFIG_TM_FUNC
+        addr_t page_to_use;
+        if (v3_tm_handle_pf_64(info, error_code, fault_addr, &page_to_use) == -1) {
+            return -1;
+        }
+#endif
+
        if ((shdw_reg->flags.alloced == 1) ||
            (shdw_reg->flags.read == 1)) {
            addr_t shadow_pa = 0;
@@ -479,6 +488,10 @@ static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault
                return -1;
            }
 
+#ifdef V3_CONFIG_TM_FUNC
+            v3_tm_handle_usr_tlb_miss(info, error_code, page_to_use, &shadow_pa);
+#endif
+
            shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
       
            shadow_pte->present = guest_pte->present;
@@ -507,6 +520,9 @@ static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault
                shadow_pte->writable = 0;
            }
 
+#ifdef V3_CONFIG_TM_FUNC
+        v3_tm_handle_read_fault(info, error_code, shadow_pte);
+#endif
        } else {
            // Pass to unhandled call back
            if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
index 05d4b7a..ede619b 100644 (file)
@@ -53,6 +53,9 @@
 
 #include <palacios/vmm_sprintf.h>
 
+#ifdef V3_CONFIG_TM_FUNC
+#include <extensions/trans_mem.h>
+#endif
 
 #ifndef V3_CONFIG_DEBUG_SVM
 #undef PrintDebug
@@ -60,6 +63,7 @@
 #endif
 
 
+
 uint32_t v3_last_exit;
 
 // This is a global pointer to the host's VMCB
@@ -155,6 +159,10 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
       
       ctrl_area->exceptions.nmi = 1;
     */
+
+#ifdef V3_CONFIG_TM_FUNC
+    v3_tm_set_excp_intercepts(ctrl_area);
+#endif
     
 
     ctrl_area->instrs.NMI = 1;
@@ -677,6 +685,10 @@ int v3_svm_enter(struct guest_info * info) {
     update_irq_entry_state(info);
 #endif
 
+#ifdef V3_CONFIG_TM_FUNC
+    v3_tm_check_intr_state(info, guest_ctrl, guest_state);
+#endif
+
 
     /* ** */
 
index 7411e4f..48509be 100644 (file)
 #include <palacios/vmm_cpuid.h>
 #include <palacios/vmm_direct_paging.h>
 
+#ifdef V3_CONFIG_TM_FUNC
+#include <extensions/trans_mem.h>
+#endif
+
 #ifndef V3_CONFIG_DEBUG_SVM
 #undef PrintDebug
 #define PrintDebug(fmt, args...)
@@ -56,8 +60,6 @@ int v3_handle_svm_exit(struct guest_info * info, addr_t exit_code, addr_t exit_i
     }
 #endif
 
-
-
     //    PrintDebug(info->vm_info, info, "SVM Returned: Exit Code: %p\n", (void *)exit_code); 
 
     switch (exit_code) {
@@ -202,6 +204,28 @@ int v3_handle_svm_exit(struct guest_info * info, addr_t exit_code, addr_t exit_i
            }
            break;
        } 
+
+#ifdef V3_CONFIG_TM_FUNC
+    case SVM_EXIT_EXCP6:
+    case SVM_EXIT_EXCP0:
+    case SVM_EXIT_EXCP1:
+    case SVM_EXIT_EXCP3:
+    case SVM_EXIT_EXCP4:
+    case SVM_EXIT_EXCP5:
+    case SVM_EXIT_EXCP7:
+    case SVM_EXIT_EXCP10:
+    case SVM_EXIT_EXCP11:
+    case SVM_EXIT_EXCP12:
+    case SVM_EXIT_EXCP13:
+    case SVM_EXIT_EXCP16:
+    case SVM_EXIT_EXCP17:
+    case SVM_EXIT_EXCP19:
+        if (v3_tm_handle_exception(info, exit_code) == -1) {
+            return -1;
+        }
+        break;
+#endif
+
        case SVM_EXIT_NPF: {
            addr_t fault_addr = exit_info2;
            pf_error_t * error_code = (pf_error_t *)&(exit_info1);
index acd9a64..befc9d1 100644 (file)
 #include <extensions/trans_mem.h>
 #endif
 
+#ifdef V3_CONFIG_DEBUG_TM_FUNC
+#define PrintTMDebug(...) V3_Print(__VA_ARGS__)
+#else
+#define PrintTMDebug(...)
+#endif
+
 #ifndef V3_CONFIG_DEBUG_DECODER
 #undef PrintDebug
 #define PrintDebug(fmt, args...)
@@ -152,35 +158,15 @@ int v3_decode(struct guest_info * info, addr_t instr_ptr, struct x86_instr * ins
     qx86_insn qx86_inst;
     uint8_t inst_buf[QX86_INSN_SIZE_MAX];
 
-    /* 441-tm: add 'escape' trap for Haswell instructions, dont want to stumble
-     * on them!
-     */
+    memset(instr, 0, sizeof(struct x86_instr));
+    memset(&qx86_inst, 0, sizeof(qx86_inst));
+
 #ifdef V3_CONFIG_TM_FUNC
-    {
-        struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(info, "trans_mem");
-        if (tm->TM_MODE == TM_ON) {
-          int byte1 = *(uint8_t *)(instr_ptr);
-          int byte2 = *(uint8_t *)(instr_ptr + 1);
-          int byte3 = *(uint8_t *)(instr_ptr + 2);
-          if (byte1 == 0xc7 && byte2 == 0xf8) {  /* third byte is an immediate */
-            //V3_Print("Decoding  %x %x %d\n", byte1, byte2, byte3);
-            instr->instr_length = 6;
-            return 0;
-          } else if (byte1 == 0xc6 && byte2 == 0xf8) { /* third byte is an immediate */
-            //V3_Print("Decoding XABORT %x %x %d\n", byte1, byte2, byte3);
-            instr->instr_length = 3;
-            return 0;
-          } else if (byte1 == 0x0f && byte2 == 0x01 && byte3 == 0xd5) {
-            //V3_Print("Decoding XEND %x %x %x\n", byte1, byte2, byte3);
-            instr->instr_length = 3;
-            return 0;
-          }
-        }
+    if (v3_tm_decode_rtm_instrs(info, instr_ptr, instr) == -1) {
+        return -1;
     }
 #endif
 
-    memset(instr, 0, sizeof(struct x86_instr));
-    memset(&qx86_inst, 0, sizeof(qx86_inst));
 
     v3_get_prefixes((uchar_t *)instr_ptr, &(instr->prefixes));
 
@@ -237,7 +223,7 @@ int v3_decode(struct guest_info * info, addr_t instr_ptr, struct x86_instr * ins
 
     // 441 - dump memory for quix86 debugging
     if ((instr->op_type = get_opcode(&qx86_inst,info)) == V3_INVALID_OP) {
-        PrintError(info->vm_info, info, "++==++ QX86 DECODE ++==++\n");
+        PrintError(info->vm_info, info, "++==++ QX86 DECODE ++==++, guest RIP: %llx\n", info->rip);
         v3_dump_mem((void *)instr_ptr, 15);
         PrintError(info->vm_info, info, "Could not get opcode. (mnemonic=%s)\n",
                 qx86_minfo(qx86_inst.mnemonic)->name);