}
+int v3_basic_mem_decode(struct guest_info * info, addr_t instr_ptr, uint_t * size, uint_t * instr_length) {
+ xed_decoded_inst_t xed_instr;
+ xed_error_enum_t xed_error;
+
+
+ if (set_decoder_mode(info, &decoder_state) == -1) {
+ PrintError("Could not set decoder mode\n");
+ return -1;
+ }
+
+
+ xed_decoded_inst_zero_set_mode(&xed_instr, &decoder_state);
+
+ xed_error = xed_decode(&xed_instr,
+ REINTERPRET_CAST(const xed_uint8_t *, instr_ptr),
+ XED_MAX_INSTRUCTION_BYTES);
+
+ if (xed_error != XED_ERROR_NONE) {
+ PrintError("Xed error: %s\n", xed_error_enum_t2str(xed_error));
+ return -1;
+ }
+
+ *instr_length = xed_decoded_inst_get_length(&xed_instr);
+
+
+ if (xed_decoded_inst_number_of_memory_operands(&xed_instr) == 0) {
+ PrintError("Tried to decode memory operation with no memory operands\n");
+ return -1;
+ }
+
+ *size = xed_decoded_inst_get_memory_operand_length(&xed_instr,0);
+
+ return 0;
+}
+
+
+
int v3_decode(struct guest_info * info, addr_t instr_ptr, struct x86_instr * instr) {
xed_decoded_inst_t xed_instr;
xed_error_enum_t xed_error;
# Makefile for GeekOS kernel, userspace, and tools
# Copyright (c) 2004,2005 David H. Hovemeyer <daveho@cs.umd.edu>
-# $Revision: 1.53 $
+# $Revision: 1.54 $
# This is free software. You are permitted to use,
# redistribute, and modify it as specified in the file "COPYING".
vmm_intr.c vmm_time.c \
vmm_shadow_paging.c vm_guest_mem.c \
vm_dev.c vmm_dev_mgr.c vmm_decoder.c \
- svm_halt.c svm_pause.c vmm_config.c \
+ svm_halt.c svm_pause.c vmm_config.c vmm_hashtable.c \
+ vmm_string.c \
$(DECODER_SRCS)
#\
# vmx.c vmcs_gen.c vmcs.c
/*
* String library
* Copyright (c) 2001,2004 David H. Hovemeyer <daveho@cs.umd.edu>
- * $Revision: 1.2 $
+ * $Revision: 1.3 $
*
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "COPYING".
int fprintf(FILE *file, char *fmt, ...);
//int fflush(FILE *file);
+
+
void abort (void) __attribute__ ((__noreturn__));
#define _tolower(c) ((c) + 'a' - 'A')
typedef enum {SHADOW_PAGING, NESTED_PAGING} vmm_paging_mode_t;
+typedef enum {VM_RUNNING, VM_STOPPED, VM_SUSPENDED, VM_ERROR, VM_EMULATING} vm_operating_mode_t;
+
+
typedef enum {REAL, /*UNREAL,*/ PROTECTED, PROTECTED_PAE, LONG, LONG_32_COMPAT, LONG_16_COMPAT} vm_cpu_mode_t;
typedef enum {PHYSICAL_MEM, VIRTUAL_MEM} vm_mem_mode_t;
+
+
struct guest_info {
ullong_t rip;
+ vm_operating_mode_t run_state;
void * vmm_data;
};
#include <palacios/vmm.h>
-typedef enum {INVALID_OPERAND, REG_OPERAND, MEM_OPERAND} operand_type_t;
+typedef enum {INVALID_OPERAND, REG_OPERAND, MEM_OPERAND, IMM_OPERAND} operand_type_t;
struct x86_operand {
addr_t operand;
uint_t instr_length;
addr_t opcode; // a pointer to the V3_OPCODE_[*] arrays defined below
uint_t num_operands;
- struct x86_operand first_operand;
- struct x86_operand second_operand;
+ struct x86_operand dst_operand;
+ struct x86_operand src_operand;
struct x86_operand third_operand;
void * decoder_data;
};
int v3_encode(struct guest_info * info, struct x86_instr * instr, char * instr_buf);
+/*
+ * Gets the operand size for a memory operation
+ *
+ */
+int v3_basic_mem_decode(struct guest_info * info, addr_t instr_ptr, uint_t * size, uint_t * instr_len);
--- /dev/null
+/* Copyright (C) 2002 Christopher Clark <firstname.lastname@cl.cam.ac.uk> */
+/* Modifications made by Jack Lange <jarusl@cs.northwestern.edu> */
+
+#ifndef __VMM_HASHTABLE_H__
+#define __VMM_HASHTABLE_H__
+
+#ifdef __V3VEE__
+
+struct hashtable;
+
+/* Example of use:
+ *
+ * struct hashtable *h;
+ * struct some_key *k;
+ * struct some_value *v;
+ *
+ * static uint_t hash_from_key_fn( void *k );
+ * static int keys_equal_fn ( void *key1, void *key2 );
+ *
+ * h = create_hashtable(16, hash_from_key_fn, keys_equal_fn);
+ * k = (struct some_key *) malloc(sizeof(struct some_key));
+ * v = (struct some_value *) malloc(sizeof(struct some_value));
+ *
+ * (initialise k and v to suitable values)
+ *
+ * if (! hashtable_insert(h,k,v) )
+ * { exit(-1); }
+ *
+ * if (NULL == (found = hashtable_search(h,k) ))
+ * { printf("not found!"); }
+ *
+ * if (NULL == (found = hashtable_remove(h,k) ))
+ * { printf("Not found\n"); }
+ *
+ */
+
+/* Macros may be used to define type-safe(r) hashtable access functions, with
+ * methods specialized to take known key and value types as parameters.
+ *
+ * Example:
+ *
+ * Insert this at the start of your file:
+ *
+ * DEFINE_HASHTABLE_INSERT(insert_some, struct some_key, struct some_value);
+ * DEFINE_HASHTABLE_SEARCH(search_some, struct some_key, struct some_value);
+ * DEFINE_HASHTABLE_REMOVE(remove_some, struct some_key, struct some_value);
+ *
+ * This defines the functions 'insert_some', 'search_some' and 'remove_some'.
+ * These operate just like hashtable_insert etc., with the same parameters,
+ * but their function signatures have 'struct some_key *' rather than
+ * 'void *', and hence can generate compile time errors if your program is
+ * supplying incorrect data as a key (and similarly for value).
+ *
+ * Note that the hash and key equality functions passed to create_hashtable
+ * still take 'void *' parameters instead of 'some key *'. This shouldn't be
+ * a difficult issue as they're only defined and passed once, and the other
+ * functions will ensure that only valid keys are supplied to them.
+ *
+ * The cost for this checking is increased code size and runtime overhead
+ * - if performance is important, it may be worth switching back to the
+ * unsafe methods once your program has been debugged with the safe methods.
+ * This just requires switching to some simple alternative defines - eg:
+ * #define insert_some hashtable_insert
+ *
+ */
+
+ulong_t hash_long(ulong_t val, uint_t bits);
+ulong_t hash_buffer(uchar_t * msg, uint_t length);
+
+
+
+
+#define DEFINE_HASHTABLE_INSERT(fnname, keytype, valuetype) \
+ int fnname (struct hashtable * htable, keytype * key, valuetype * value) { \
+ return hashtable_insert(htable, key, value); \
+ }
+
+#define DEFINE_HASHTABLE_SEARCH(fnname, keytype, valuetype) \
+ valuetype * fnname (struct hashtable * htable, keytype * key) { \
+ return (valuetype *) (hashtable_search(htable, key)); \
+ }
+
+#define DEFINE_HASHTABLE_REMOVE(fnname, keytype, valuetype) \
+ valuetype * fnname (struct hashtable * htable, keytype * key) { \
+ return (valuetype *) (hashtable_remove(htable, key)); \
+ }
+
+
+
+
+
+
+struct hashtable * create_hashtable(uint_t min_size,
+ uint_t (*hashfunction) (void * key),
+ int (*key_eq_fn) (void * key1, void * key2));
+
+void hashtable_destroy(struct hashtable * htable, int free_values);
+
+/*
+ * returns non-zero for successful insertion
+ *
+ * This function will cause the table to expand if the insertion would take
+ * the ratio of entries to table size over the maximum load factor.
+ *
+ * This function does not check for repeated insertions with a duplicate key.
+ * The value returned when using a duplicate key is undefined -- when
+ * the hashtable changes size, the order of retrieval of duplicate key
+ * entries is reversed.
+ * If in doubt, remove before insert.
+ */
+int hashtable_insert(struct hashtable * htable, void * key, void * value);
+
+int hashtable_change(struct hashtable * htable, void * key, void * value);
+
+
+// returns the value associated with the key, or NULL if none found
+void * hashtable_search(struct hashtable * htable, void * key);
+
+// returns the value associated with the key, or NULL if none found
+void * hashtable_remove(struct hashtable * htable, void * key);
+
+uint_t hashtable_count(struct hashtable * htable);
+
+ /* ************ */
+ /* ITERATOR API */
+/* ************ */
+
+#define DEFINE_HASHTABLE_ITERATOR_SEARCH(fnname, keytype) \
+ int fnname (struct hashtable_itr * iter, struct hashtable * htable, keytype * key) { \
+ return (hashtable_iterator_search(iter, htable, key)); \
+ }
+
+
+
+/*****************************************************************************/
+/* This struct is only concrete here to allow the inlining of two of the
+ * accessor functions. */
+struct hashtable_iter {
+ struct hashtable * htable;
+ struct hash_entry * entry;
+ struct hash_entry * parent;
+ uint_t index;
+};
+
+
+struct hashtable_iter * create_hashtable_iterator(struct hashtable * htable);
+
+/* - return the value of the (key,value) pair at the current position */
+//extern inline
+void * hashtable_get_iter_key(struct hashtable_iter * iter);
+/* {
+ return iter->entry->key;
+ }
+*/
+
+
+/* value - return the value of the (key,value) pair at the current position */
+//extern inline
+void * hashtable_get_iter_value(struct hashtable_iter * iter);
+/* {
+ return iter->entry->value;
+ }
+*/
+
+
+
+/* returns zero if advanced to end of table */
+int hashtable_iterator_advance(struct hashtable_iter * iter);
+
+/* remove current element and advance the iterator to the next element
+ * NB: if you need the value to free it, read it before
+ * removing. ie: beware memory leaks!
+ * returns zero if advanced to end of table
+ */
+int hashtable_iterator_remove(struct hashtable_iter * iter);
+
+
+/* search - overwrite the supplied iterator, to point to the entry
+ * matching the supplied key.
+ * returns zero if not found. */
+int hashtable_iterator_search(struct hashtable_iter * iter, struct hashtable * htable, void * key);
+
+
+
+
+#endif // ! __V3VEE__
+
+
+#endif /* __VMM_HASHTABLE_H__ */
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * Copyright (c) 2002, Christopher Clark
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of the original author; nor the names of any contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
/*
* String library
* Copyright (c) 2001,2004 David H. Hovemeyer <daveho@cs.umd.edu>
- * $Revision: 1.1 $
+ * $Revision: 1.2 $
*
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "COPYING".
#ifndef STRING_H
#define STRING_H
+#ifdef __V3VEE__
+
#include <stddef.h>
void* memset(void* s, int c, size_t n);
char *strpbrk(const char *s, const char *accept);
+double ceil(double x);
+
+
+#endif // !__V3VEE__
#endif /* STRING_H */
Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
+ info->run_state = VM_STOPPED;
+
// info->rip = 0;
info->vm_regs.rdi = 0;
vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
uint_t num_exits = 0;
+
+
PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
//PrintDebugVMCB((vmcb_t*)(info->vmm_data));
+ info->run_state = VM_RUNNING;
+
while (1) {
ullong_t tmp_tsc;
addr_t host_addr;
addr_t linear_addr = 0;
+ info->run_state = VM_ERROR;
+
PrintDebug("SVM ERROR!!\n");
PrintDebug("RIP: %x\n", guest_state->rip);
+
// First Attempt = 494 lines
// current = 106 lines
int handle_cr0_write(struct guest_info * info) {
if (opcode_cmp(V3_OPCODE_LMSW, (const uchar_t *)(dec_instr.opcode)) == 0) {
struct cr0_real *real_cr0 = (struct cr0_real*)&(info->ctrl_regs.cr0);
- struct cr0_real *new_cr0 = (struct cr0_real *)(dec_instr.first_operand.operand);
+ struct cr0_real *new_cr0 = (struct cr0_real *)(dec_instr.src_operand.operand);
uchar_t new_cr0_val;
PrintDebug("LMSW\n");
} else {
// 32 bit registers
struct cr0_32 *real_cr0 = (struct cr0_32*)&(info->ctrl_regs.cr0);
- struct cr0_32 *new_cr0= (struct cr0_32 *)(dec_instr.second_operand.operand);
+ struct cr0_32 *new_cr0= (struct cr0_32 *)(dec_instr.src_operand.operand);
- PrintDebug("OperandVal = %x, length=%d\n", *new_cr0, dec_instr.first_operand.size);
+ PrintDebug("OperandVal = %x, length=%d\n", *new_cr0, dec_instr.dst_operand.size);
PrintDebug("Old CR0=%x\n", *real_cr0);
}
if (opcode_cmp(V3_OPCODE_MOVCR2, (const uchar_t *)(dec_instr.opcode)) == 0) {
- struct cr0_32 * virt_cr0 = (struct cr0_32 *)(dec_instr.first_operand.operand);
+ struct cr0_32 * virt_cr0 = (struct cr0_32 *)(dec_instr.dst_operand.operand);
struct cr0_32 * real_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
PrintDebug("MOVCR2\n");
PrintDebug("returned CR0: %x\n", *(uint_t*)virt_cr0);
} else if (opcode_cmp(V3_OPCODE_SMSW, (const uchar_t *)(dec_instr.opcode)) == 0) {
struct cr0_real *real_cr0= (struct cr0_real*)&(info->ctrl_regs.cr0);
- struct cr0_real *virt_cr0 = (struct cr0_real *)(dec_instr.first_operand.operand);
+ struct cr0_real *virt_cr0 = (struct cr0_real *)(dec_instr.dst_operand.operand);
char cr0_val = *(char*)real_cr0 & 0x0f;
PrintDebug("SMSW\n");
PrintDebug("CR3 at 0x%x\n", &(info->ctrl_regs.cr3));
if (info->shdw_pg_mode == SHADOW_PAGING) {
- struct cr3_32 * new_cr3 = (struct cr3_32 *)(dec_instr.second_operand.operand);
+ struct cr3_32 * new_cr3 = (struct cr3_32 *)(dec_instr.src_operand.operand);
struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.shadow_cr3);
PrintDebug("Old Shadow CR3=%x; Old Guest CR3=%x\n",
*(uint_t*)shadow_cr3, *(uint_t*)guest_cr3);
- if (!CR3_32_SAME_BASE(new_cr3, guest_cr3)) {
+ if (1 || !CR3_32_SAME_BASE(new_cr3, guest_cr3)) {
addr_t shadow_pt;
if (opcode_cmp(V3_OPCODE_MOVCR2, (const uchar_t *)(dec_instr.opcode)) == 0) {
PrintDebug("MOVCR32\n");
- struct cr3_32 * virt_cr3 = (struct cr3_32 *)(dec_instr.first_operand.operand);
+ struct cr3_32 * virt_cr3 = (struct cr3_32 *)(dec_instr.dst_operand.operand);
PrintDebug("CR3 at 0x%x\n", &(info->ctrl_regs.cr3));
--- /dev/null
+/* Copyright (C) 2004 Christopher Clark <firstname.lastname@cl.cam.ac.uk> */
+/* Modifications made by Jack Lange <jarusl@cs.northwestern.edu> */
+
+#include <palacios/vmm.h>
+#include <palacios/vmm_hashtable.h>
+#include <palacios/vmm_string.h>
+
+
+
+
+
+struct hash_entry {
+ void * key;
+ void * value;
+ uint_t hash;
+ struct hash_entry * next;
+};
+
+struct hashtable {
+ uint_t table_length;
+ struct hash_entry ** table;
+ uint_t entry_count;
+ uint_t load_limit;
+ uint_t prime_index;
+ uint_t (*hash_fn) (void * key);
+ int (*eq_fn) (void * key1, void * key2);
+};
+
+
+
+/* HASH FUNCTIONS */
+
+
+
+uint_t do_hash(struct hashtable * htable, void * key) {
+ /* Aim to protect against poor hash functions by adding logic here
+ * - logic taken from java 1.4 hashtable source */
+ uint_t i = htable->hash_fn(key);
+ i += ~(i << 9);
+ i ^= ((i >> 14) | (i << 18)); /* >>> */
+ i += (i << 4);
+ i ^= ((i >> 10) | (i << 22)); /* >>> */
+
+ return i;
+}
+
+
+/* HASH AN UNSIGNED LONG */
+/* LINUX UNSIGHED LONG HASH FUNCTION */
+#ifdef __V3_32BIT__
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME 0x9e370001UL
+#define BITS_PER_LONG 32
+#elif defined(__V3_64BIT__)
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
+#define BITS_PER_LONG 64
+#else
+#error Define GOLDEN_RATIO_PRIME for your wordsize.
+#endif
+
+ulong_t hash_long(ulong_t val, uint_t bits) {
+ ulong_t hash = val;
+
+#ifdef __V3_64BIT__
+ /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ ulong_t n = hash;
+ n <<= 18;
+ hash -= n;
+ n <<= 33;
+ hash -= n;
+ n <<= 3;
+ hash += n;
+ n <<= 3;
+ hash -= n;
+ n <<= 4;
+ hash += n;
+ n <<= 2;
+ hash += n;
+#else
+ /* On some cpus multiply is faster, on others gcc will do shifts */
+ hash *= GOLDEN_RATIO_PRIME;
+#endif
+
+ /* High bits are more random, so use them. */
+ return hash >> (BITS_PER_LONG - bits);
+}
+
+/* HASH GENERIC MEMORY BUFFER */
+/* ELF HEADER HASH FUNCTION */
+ulong_t hash_buffer(uchar_t * msg, uint_t length) {
+ ulong_t hash = 0;
+ ulong_t temp = 0;
+ int i;
+
+ for (i = 0; i < length; i++) {
+ hash = (hash << 4) + *(msg + i) + i;
+ if ((temp = (hash & 0xF0000000))) {
+ hash ^= (temp >> 24);
+ }
+ hash &= ~temp;
+ }
+ return hash;
+}
+
+
+
+/*****************************************************************************/
+/* indexFor */
+static inline uint_t indexFor(uint_t table_length, uint_t hash_value) {
+ return (hash_value % table_length);
+};
+
+/* Only works if table_length == 2^N */
+/*
+ static inline uint_t indexFor(uint_t table_length, uint_t hashvalue)
+ {
+ return (hashvalue & (table_length - 1u));
+ }
+*/
+
+/*****************************************************************************/
+#define freekey(X) V3_Free(X)
+/*define freekey(X) ; */
+
+
+static void * tmp_realloc(void * old_ptr, uint_t old_size, uint_t new_size) {
+ void * new_buf = V3_Malloc(new_size);
+
+ if (new_buf == NULL) {
+ return NULL;
+ }
+
+ memcpy(new_buf, old_ptr, old_size);
+ V3_Free(old_ptr);
+
+ return new_buf;
+}
+
+
+/*
+Credit for primes table: Aaron Krowne
+ http://br.endernet.org/~akrowne/
+ http://planetmath.org/encyclopedia/GoodHashTablePrimes.html
+*/
+static const uint_t primes[] = {
+ 53, 97, 193, 389,
+ 769, 1543, 3079, 6151,
+ 12289, 24593, 49157, 98317,
+ 196613, 393241, 786433, 1572869,
+ 3145739, 6291469, 12582917, 25165843,
+ 50331653, 100663319, 201326611, 402653189,
+ 805306457, 1610612741 };
+
+
+const uint_t prime_table_length = sizeof(primes) / sizeof(primes[0]);
+
+const float max_load_factor = 0.65;
+
+/*****************************************************************************/
+struct hashtable * create_hashtable(uint_t min_size,
+ uint_t (*hash_fn) (void *),
+ int (*eq_fn) (void *, void *)) {
+ struct hashtable * htable;
+ uint_t prime_index;
+ uint_t size = primes[0];
+
+ /* Check requested hashtable isn't too large */
+ if (min_size > (1u << 30)) {
+ return NULL;
+ }
+
+ /* Enforce size as prime */
+ for (prime_index = 0; prime_index < prime_table_length; prime_index++) {
+ if (primes[prime_index] > min_size) {
+ size = primes[prime_index];
+ break;
+ }
+ }
+
+ htable = (struct hashtable *)V3_Malloc(sizeof(struct hashtable));
+
+ if (htable == NULL) {
+ return NULL; /*oom*/
+ }
+
+ htable->table = (struct hash_entry **)V3_Malloc(sizeof(struct hash_entry*) * size);
+
+ if (htable->table == NULL) {
+ V3_Free(htable);
+ return NULL; /*oom*/
+ }
+
+
+ memset(htable->table, 0, size * sizeof(struct hash_entry *));
+
+ htable->table_length = size;
+ htable->prime_index = prime_index;
+ htable->entry_count = 0;
+ htable->hash_fn = hash_fn;
+ htable->eq_fn = eq_fn;
+ htable->load_limit = (uint_t) ceil((double)(size * max_load_factor));
+
+ return htable;
+}
+
+
+
+/*****************************************************************************/
+static int hashtable_expand(struct hashtable * htable) {
+ /* Double the size of the table to accomodate more entries */
+ struct hash_entry ** new_table;
+ struct hash_entry * tmp_entry;
+ struct hash_entry ** entry_ptr;
+ uint_t new_size;
+ uint_t i;
+ uint_t index;
+
+ /* Check we're not hitting max capacity */
+ if (htable->prime_index == (prime_table_length - 1)) {
+ return 0;
+ }
+
+ new_size = primes[++(htable->prime_index)];
+
+ new_table = (struct hash_entry **)V3_Malloc(sizeof(struct hash_entry*) * new_size);
+
+ if (new_table != NULL) {
+ memset(new_table, 0, new_size * sizeof(struct hash_entry *));
+ /* This algorithm is not 'stable'. ie. it reverses the list
+ * when it transfers entries between the tables */
+
+ for (i = 0; i < htable->table_length; i++) {
+
+ while ((tmp_entry = htable->table[i]) != NULL) {
+ htable->table[i] = tmp_entry->next;
+
+ index = indexFor(new_size, tmp_entry->hash);
+
+ tmp_entry->next = new_table[index];
+
+ new_table[index] = tmp_entry;
+ }
+ }
+
+ V3_Free(htable->table);
+
+ htable->table = new_table;
+ } else {
+ /* Plan B: realloc instead */
+
+ //new_table = (struct hash_entry **)realloc(htable->table, new_size * sizeof(struct hash_entry *));
+ new_table = (struct hash_entry **)tmp_realloc(htable->table, primes[htable->prime_index - 1],
+ new_size * sizeof(struct hash_entry *));
+
+ if (new_table == NULL) {
+ (htable->prime_index)--;
+ return 0;
+ }
+
+ htable->table = new_table;
+
+ memset(new_table[htable->table_length], 0, new_size - htable->table_length);
+
+ for (i = 0; i < htable->table_length; i++) {
+
+ for (entry_ptr = &(new_table[i]), tmp_entry = *entry_ptr;
+ tmp_entry != NULL;
+ tmp_entry = *entry_ptr) {
+
+ index = indexFor(new_size, tmp_entry->hash);
+
+ if (i == index) {
+ entry_ptr = &(tmp_entry->next);
+ } else {
+ *entry_ptr = tmp_entry->next;
+ tmp_entry->next = new_table[index];
+ new_table[index] = tmp_entry;
+ }
+ }
+ }
+ }
+
+ htable->table_length = new_size;
+
+ htable->load_limit = (uint_t) ceil(new_size * max_load_factor);
+
+ return -1;
+}
+
+/*****************************************************************************/
+uint_t hashtable_count(struct hashtable * htable) {
+ return htable->entry_count;
+}
+
+/*****************************************************************************/
+int hashtable_insert(struct hashtable * htable, void * key, void * value) {
+ /* This method allows duplicate keys - but they shouldn't be used */
+ uint_t index;
+ struct hash_entry * new_entry;
+
+ if (++(htable->entry_count) > htable->load_limit) {
+ /* Ignore the return value. If expand fails, we should
+ * still try cramming just this value into the existing table
+ * -- we may not have memory for a larger table, but one more
+ * element may be ok. Next time we insert, we'll try expanding again.*/
+ hashtable_expand(htable);
+ }
+
+
+ new_entry = (struct hash_entry *)V3_Malloc(sizeof(struct hash_entry));
+
+ if (new_entry == NULL) {
+ (htable->entry_count)--;
+ return 0; /*oom*/
+ }
+
+ new_entry->hash = do_hash(htable, key);
+
+ index = indexFor(htable->table_length, new_entry->hash);
+
+ new_entry->key = key;
+ new_entry->value = value;
+
+ new_entry->next = htable->table[index];
+
+ htable->table[index] = new_entry;
+
+ return -1;
+}
+
+
+
+int hashtable_change(struct hashtable * htable, void * key, void * value) {
+ struct hash_entry * tmp_entry;
+ uint_t hash_value;
+ uint_t index;
+
+ hash_value = do_hash(htable, key);
+
+ index = indexFor(htable->table_length, hash_value);
+
+ tmp_entry = htable->table[index];
+
+ while (tmp_entry != NULL) {
+ /* Check hash value to short circuit heavier comparison */
+ if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
+ V3_Free(tmp_entry->value);
+ tmp_entry->value = value;
+
+ return -1;
+ }
+ tmp_entry = tmp_entry->next;
+ }
+ return 0;
+}
+
+
+
+
+/*****************************************************************************/
+/* returns value associated with key */
+void * hashtable_search(struct hashtable * htable, void * key) {
+ struct hash_entry * cursor;
+ uint_t hash_value;
+ uint_t index;
+
+ hash_value = do_hash(htable, key);
+
+ index = indexFor(htable->table_length, hash_value);
+
+ cursor = htable->table[index];
+
+ while (cursor != NULL) {
+ /* Check hash value to short circuit heavier comparison */
+ if ((hash_value == cursor->hash) &&
+ (htable->eq_fn(key, cursor->key))) {
+ return cursor->value;
+ }
+
+ cursor = cursor->next;
+ }
+
+ return NULL;
+}
+
+/*****************************************************************************/
+/* returns value associated with key */
+void * hashtable_remove(struct hashtable * htable, void * key) {
+ /* TODO: consider compacting the table when the load factor drops enough,
+ * or provide a 'compact' method. */
+
+ struct hash_entry * cursor;
+ struct hash_entry ** entry_ptr;
+ void * value;
+ uint_t hash_value;
+ uint_t index;
+
+ hash_value = do_hash(htable, key);
+
+ index = indexFor(htable->table_length, hash_value);
+
+ entry_ptr = &(htable->table[index]);
+ cursor = *entry_ptr;
+
+ while (cursor != NULL) {
+ /* Check hash value to short circuit heavier comparison */
+ if ((hash_value == cursor->hash) &&
+ (htable->eq_fn(key, cursor->key))) {
+
+ *entry_ptr = cursor->next;
+ htable->entry_count--;
+ value = cursor->value;
+
+ freekey(cursor->key);
+ V3_Free(cursor);
+
+ return value;
+ }
+
+ entry_ptr = &(cursor->next);
+ cursor = cursor->next;
+ }
+ return NULL;
+}
+
+/*****************************************************************************/
+/* destroy */
+void hashtable_destroy(struct hashtable * htable, int free_values) {
+ uint_t i;
+ struct hash_entry * cursor;;
+ struct hash_entry **table = htable->table;
+
+ if (free_values) {
+ for (i = 0; i < htable->table_length; i++) {
+ cursor = table[i];
+
+ while (cursor != NULL) {
+ struct hash_entry * tmp;
+
+ tmp = cursor;
+ cursor = cursor->next;
+
+ freekey(tmp->key);
+ V3_Free(tmp->value);
+ V3_Free(tmp);
+ }
+ }
+ } else {
+ for (i = 0; i < htable->table_length; i++) {
+ cursor = table[i];
+
+ while (cursor != NULL) {
+ struct hash_entry * tmp;
+
+ tmp = cursor;
+ cursor = cursor->next;
+
+ freekey(tmp->key);
+ V3_Free(tmp);
+ }
+ }
+ }
+
+ V3_Free(htable->table);
+ V3_Free(htable);
+}
+
+
+
+
+/* HASH TABLE ITERATORS */
+
+
+
+struct hashtable_iter * create_hashtable_iterator(struct hashtable * htable) {
+ uint_t i;
+ uint_t table_length;
+
+ struct hashtable_iter * iter = (struct hashtable_iter *)V3_Malloc(sizeof(struct hashtable_iter));
+
+ if (iter == NULL) {
+ return NULL;
+ }
+
+ iter->htable = htable;
+ iter->entry = NULL;
+ iter->parent = NULL;
+ table_length = htable->table_length;
+ iter->index = table_length;
+
+ if (htable->entry_count == 0) {
+ return iter;
+ }
+
+ for (i = 0; i < table_length; i++) {
+ if (htable->table[i] != NULL) {
+ iter->entry = htable->table[i];
+ iter->index = i;
+ break;
+ }
+ }
+
+ return iter;
+}
+
+
+void * hashtable_get_iter_key(struct hashtable_iter * iter) {
+ return iter->entry->key;
+}
+
+void * hashtable_get_iter_value(struct hashtable_iter * iter) {
+ return iter->entry->value;
+}
+
+
+/* advance - advance the iterator to the next element
+ * returns zero if advanced to end of table */
+int hashtable_iterator_advance(struct hashtable_iter * iter) {
+ uint_t j;
+ uint_t table_length;
+ struct hash_entry ** table;
+ struct hash_entry * next;
+
+ if (iter->entry == NULL) {
+ return 0; /* stupidity check */
+ }
+
+
+ next = iter->entry->next;
+
+ if (next != NULL) {
+ iter->parent = iter->entry;
+ iter->entry = next;
+ return -1;
+ }
+
+ table_length = iter->htable->table_length;
+ iter->parent = NULL;
+
+ if (table_length <= (j = ++(iter->index))) {
+ iter->entry = NULL;
+ return 0;
+ }
+
+ table = iter->htable->table;
+
+ while ((next = table[j]) == NULL) {
+ if (++j >= table_length) {
+ iter->index = table_length;
+ iter->entry = NULL;
+ return 0;
+ }
+ }
+
+ iter->index = j;
+ iter->entry = next;
+
+ return -1;
+}
+
+
+/* remove - remove the entry at the current iterator position
+ * and advance the iterator, if there is a successive
+ * element.
+ * If you want the value, read it before you remove:
+ * beware memory leaks if you don't.
+ * Returns zero if end of iteration. */
+int hashtable_iterator_remove(struct hashtable_iter * iter) {
+ struct hash_entry * remember_entry;
+ struct hash_entry * remember_parent;
+ int ret;
+
+ /* Do the removal */
+ if ((iter->parent) == NULL) {
+ /* element is head of a chain */
+ iter->htable->table[iter->index] = iter->entry->next;
+ } else {
+ /* element is mid-chain */
+ iter->parent->next = iter->entry->next;
+ }
+
+
+ /* itr->e is now outside the hashtable */
+ remember_entry = iter->entry;
+ iter->htable->entry_count--;
+ freekey(remember_entry->key);
+
+ /* Advance the iterator, correcting the parent */
+ remember_parent = iter->parent;
+ ret = hashtable_iterator_advance(iter);
+
+ if (iter->parent == remember_entry) {
+ iter->parent = remember_parent;
+ }
+
+ V3_Free(remember_entry);
+ return ret;
+}
+
+
+/* returns zero if not found */
+int hashtable_iterator_search(struct hashtable_iter * iter,
+ struct hashtable * htable, void * key) {
+ struct hash_entry * entry;
+ struct hash_entry * parent;
+ uint_t hash_value;
+ uint_t index;
+
+ hash_value = do_hash(htable, key);
+ index = indexFor(htable->table_length, hash_value);
+
+ entry = htable->table[index];
+ parent = NULL;
+
+ while (entry != NULL) {
+ /* Check hash value to short circuit heavier comparison */
+ if ((hash_value == entry->hash) &&
+ (htable->eq_fn(key, entry->key))) {
+ iter->index = index;
+ iter->entry = entry;
+ iter->parent = parent;
+ iter->htable = htable;
+ return -1;
+ }
+ parent = entry;
+ entry = entry->next;
+ }
+ return 0;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * Copyright (c) 2002, Christopher Clark
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of the original author; nor the names of any contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
int mem_hook_dispatch(struct guest_info * info, addr_t mem_addr, pf_error_t access_info, struct vmm_mem_hook * hook) {
+ // emulate and then dispatch
+ // or dispatch and emulate
+
+
if (access_info.write == 1) {
void * src = NULL;
uint_t length = 0;
/*
* String library
* Copyright (c) 2001,2004 David H. Hovemeyer <daveho@cs.umd.edu>
- * $Revision: 1.1 $
+ * $Revision: 1.2 $
*
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "COPYING".
#include <palacios/vmm_string.h>
+#include <palacios/vmm.h>
-extern void *Malloc(size_t size);
+
+static float e = 0.00000001;
+
+double ceil(double x) {
+ if ((double)(x - (int)x) == 0) {
+ return (int)x;
+ }
+ return (int)(x + e) + 1;
+}
+
+#if 0
void* memset(void* s, int c, size_t n)
{
unsigned char* p = (unsigned char*) s;
return s;
}
+
void* memcpy(void *dst, const void* src, size_t n)
{
unsigned char* d = (unsigned char*) dst;
return dst;
}
+
int memcmp(const void *s1_, const void *s2_, size_t n)
{
const signed char *s1 = s1_, *s2 = s2_;
{
char *ret;
- ret = Malloc(strlen(s1) + 1);
+ ret = V3_Malloc(strlen(s1) + 1);
strcpy(ret, s1);
return ret;
return 0;
}
+#endif
+#ifdef __DECODER_TEST__
+#include "vmm_decoder.h"
+#include "vmm_xed.h"
+#include <xed/xed-interface.h>
+#include "vm_guest.h"
+#include "test.h"
+#else
#include <palacios/vmm_decoder.h>
#include <palacios/vmm_xed.h>
#include <xed/xed-interface.h>
#include <palacios/vm_guest.h>
+#include <palacios/vmm.h>
+
+#endif
static xed_state_t decoder_state;
+#define GPR_REGISTER 0
+#define SEGMENT_REGISTER 1
+#define CTRL_REGISTER 2
+#define DEBUG_REGISTER 3
+
+
+
+/* Disgusting mask hack...
+ I can't think right now, so we'll do it this way...
+*/
+static const ullong_t mask_1 = 0x00000000000000ffLL;
+static const ullong_t mask_2 = 0x000000000000ffffLL;
+static const ullong_t mask_4 = 0x00000000ffffffffLL;
+static const ullong_t mask_8 = 0xffffffffffffffffLL;
+
+
+#define MASK(val, length) ({ \
+ ullong_t mask = 0x0LL; \
+ switch (length) { \
+ case 1: \
+ mask = mask_1; \
+ case 2: \
+ mask = mask_2; \
+ case 4: \
+ mask = mask_4; \
+ case 8: \
+ mask = mask_8; \
+ } \
+ val & mask;}) \
+
+struct memory_operand {
+ uint_t segment_size;
+ addr_t segment;
+ uint_t base_size;
+ addr_t base;
+ uint_t index_size;
+ addr_t index;
+ addr_t scale;
+ uint_t displacement_size;
+ ullong_t displacement;
+};
+
+
// This returns a pointer to a V3_OPCODE_[*] array defined in vmm_decoder.h
static int get_opcode(xed_iform_enum_t iform, addr_t * opcode);
static int xed_reg_to_v3_reg(struct guest_info * info, xed_reg_enum_t xed_reg, addr_t * v3_reg, uint_t * reg_len);
-
+static int get_memory_operand(struct guest_info * info, xed_decoded_inst_t * xed_instr, uint_t index, struct x86_operand * operand);
static int set_decoder_mode(struct guest_info * info, xed_state_t * state) {
switch (info->cpu_mode) {
return 0;
}
-
+int is_flags_reg(xed_reg_enum_t xed_reg) {
+ switch (xed_reg) {
+ case XED_REG_FLAGS:
+ case XED_REG_EFLAGS:
+ case XED_REG_RFLAGS:
+ return 1;
+ default:
+ return 0;
+ }
+}
}
+int v3_basic_mem_decode(struct guest_info * info, addr_t instr_ptr, uint_t * size, uint_t * instr_length) {
+ xed_decoded_inst_t xed_instr;
+ xed_error_enum_t xed_error;
+
+
+ if (set_decoder_mode(info, &decoder_state) == -1) {
+ PrintError("Could not set decoder mode\n");
+ return -1;
+ }
+
+
+ xed_decoded_inst_zero_set_mode(&xed_instr, &decoder_state);
+
+ xed_error = xed_decode(&xed_instr,
+ REINTERPRET_CAST(const xed_uint8_t *, instr_ptr),
+ XED_MAX_INSTRUCTION_BYTES);
+
+ if (xed_error != XED_ERROR_NONE) {
+ PrintError("Xed error: %s\n", xed_error_enum_t2str(xed_error));
+ return -1;
+ }
+
+ *instr_length = xed_decoded_inst_get_length(&xed_instr);
+
+
+ if (xed_decoded_inst_number_of_memory_operands(&xed_instr) == 0) {
+ PrintError("Tried to decode memory operation with no memory operands\n");
+ return -1;
+ }
+
+ *size = xed_decoded_inst_get_memory_operand_length(&xed_instr,0);
+
+ return 0;
+}
+
+
+
int v3_decode(struct guest_info * info, addr_t instr_ptr, struct x86_instr * instr) {
xed_decoded_inst_t xed_instr;
xed_error_enum_t xed_error;
xed_iform_enum_t iform = xed_decoded_inst_get_iform_enum(&xed_instr);
+
+ PrintDebug("iform=%s\n", xed_iform_enum_t2str(iform));
+
+
+ if (instr->num_operands > 3) {
+ PrintDebug("Special Case Not Handled\n");
+ return -1;
+ // special case
+ } else if (instr->num_operands == 3) {
+ const xed_operand_t * op = xed_inst_operand(xi, 2);
+ xed_operand_enum_t op_enum = xed_operand_name(op);
+
+ if ((!xed_operand_is_register(op_enum)) ||
+ (!is_flags_reg(xed_decoded_inst_get_reg(&xed_instr, op_enum)))) {
+ // special case
+ PrintDebug("Special Case not handled\n");
+ return -1;
+ }
+ }
+
+
+
+
+
if (get_opcode(iform, &(instr->opcode)) == -1) {
- PrintError("Could not get opcode. (iform=%s)\n", xed_iform_enum_t2str(iform));
+ PrintDebug("Could not get opcode. (iform=%s)\n", xed_iform_enum_t2str(iform));
return -1;
}
- PrintDebug("Number of operands: %d\n", instr->num_operands);
- PrintDebug("INSTR length: %d\n", instr->instr_length);
+
+ //PrintDebug("Number of operands: %d\n", instr->num_operands);
+ //PrintDebug("INSTR length: %d\n", instr->instr_length);
// set first operand
if (instr->num_operands >= 1) {
const xed_operand_t * op = xed_inst_operand(xi, 0);
- xed_operand_type_enum_t op_type = xed_operand_type(op);
xed_operand_enum_t op_enum = xed_operand_name(op);
+ struct x86_operand * v3_op = NULL;
+
+ if (xed_operand_written(op)) {
+ v3_op = &(instr->dst_operand);
+ } else {
+ v3_op = &(instr->src_operand);
+ }
+
if (xed_operand_is_register(op_enum)) {
xed_reg_enum_t xed_reg = xed_decoded_inst_get_reg(&xed_instr, op_enum);
- if (xed_reg_to_v3_reg(info,
- xed_reg,
- &(instr->first_operand.operand),
- &(instr->first_operand.size)) == -1) {
-
+ int v3_reg_type = xed_reg_to_v3_reg(info,
+ xed_reg,
+ &(v3_op->operand),
+ &(v3_op->size));
+
+ if (v3_reg_type == -1) {
PrintError("First operand is an Unhandled Operand: %s\n", xed_reg_enum_t2str(xed_reg));
- instr->first_operand.type = INVALID_OPERAND;
+ v3_op->type = INVALID_OPERAND;
return -1;
+ } else if (v3_reg_type == SEGMENT_REGISTER) {
+ struct v3_segment * seg_reg = (struct v3_segment *)(v3_op->operand);
+ v3_op->operand = (addr_t)&(seg_reg->selector);
}
- instr->first_operand.type = REG_OPERAND;
- PrintDebug("First Operand: xed_reg=0x%x\n", instr->first_operand.operand);
-
+ v3_op->type = REG_OPERAND;
} else {
- PrintError("Unhandled first operand type %s\n", xed_operand_type_enum_t2str(op_type));
- return -1;
+
+ switch (op_enum) {
+
+ case XED_OPERAND_MEM0:
+ {
+ /*
+ struct x86_operand * operand = &(instr->dst_operand);
+
+ if (xed_decoded_inst_mem_read(&xed_instr, 0)) {
+ operand = &(instr->src_operand);
+ } else if (xed_decoded_inst_mem_written(&xed_instr, 0)) {
+ operand = &(instr->dst_operand);
+ }
+ */
+
+ if (get_memory_operand(info, &xed_instr, 0, v3_op) == -1) {
+ PrintError("Could not get first memory operand\n");
+ return -1;
+ }
+ }
+ break;
+
+ case XED_OPERAND_MEM1:
+ case XED_OPERAND_IMM1:
+ // illegal
+ PrintError("Illegal Operand Order\n");
+ return -1;
+
+
+ case XED_OPERAND_IMM0:
+ case XED_OPERAND_AGEN:
+ case XED_OPERAND_PTR:
+ case XED_OPERAND_RELBR:
+ default:
+ PrintError("Unhandled Operand Type\n");
+ return -1;
+
+
+ }
}
}
// set second operand
if (instr->num_operands >= 2) {
const xed_operand_t * op = xed_inst_operand(xi, 1);
- xed_operand_type_enum_t op_type = xed_operand_type(op);
+ // xed_operand_type_enum_t op_type = xed_operand_type(op);
xed_operand_enum_t op_enum = xed_operand_name(op);
+ struct x86_operand * v3_op;
+
+ if (xed_operand_written(op)) {
+ v3_op = &(instr->dst_operand);
+ } else {
+ v3_op = &(instr->src_operand);
+ }
+
+
if (xed_operand_is_register(op_enum)) {
xed_reg_enum_t xed_reg = xed_decoded_inst_get_reg(&xed_instr, op_enum);
- if (xed_reg_to_v3_reg(info,
- xed_reg,
- &(instr->second_operand.operand),
- &(instr->second_operand.size)) == -1) {
-
+ int v3_reg_type = xed_reg_to_v3_reg(info,
+ xed_reg,
+ &(v3_op->operand),
+ &(v3_op->size));
+ if (v3_reg_type == -1) {
PrintError("Second operand is an Unhandled Operand: %s\n", xed_reg_enum_t2str(xed_reg));
- instr->second_operand.type = INVALID_OPERAND;
+ v3_op->type = INVALID_OPERAND;
return -1;
+ } else if (v3_reg_type == SEGMENT_REGISTER) {
+ struct v3_segment * seg_reg = (struct v3_segment *)(v3_op->operand);
+ v3_op->operand = (addr_t)&(seg_reg->selector);
}
- instr->second_operand.type = REG_OPERAND;
+ v3_op->type = REG_OPERAND;
- PrintDebug("Second Operand: xed_reg=0x%x\n", instr->second_operand.operand);
+
} else {
- PrintError("Unhandled second operand type %s\n", xed_operand_type_enum_t2str(op_type));
- return -1;
+
+ switch (op_enum) {
+
+ case XED_OPERAND_MEM0:
+ {
+
+ /*
+ if (xed_decoded_inst_mem_read(&xed_instr, 0)) {
+ v3_op = &(instr->src_operand);
+ } else if (xed_decoded_inst_mem_written(&xed_instr, 0)) {
+ v3_op = &(instr->dst_operand);
+ }
+ */
+
+ if (get_memory_operand(info, &xed_instr, 0, v3_op) == -1) {
+ PrintError("Could not get first memory operand\n");
+ return -1;
+ }
+ }
+ break;
+
+ case XED_OPERAND_IMM0:
+ {
+ instr->src_operand.size = xed_decoded_inst_get_immediate_width(&xed_instr);
+
+ if (instr->src_operand.size > 4) {
+ PrintError("Unhandled 64 bit immediates\n");
+ return -1;
+ }
+ instr->src_operand.operand = xed_decoded_inst_get_unsigned_immediate(&xed_instr);
+
+ instr->src_operand.type = IMM_OPERAND;
+
+ }
+ break;
+
+ case XED_OPERAND_MEM1:
+ case XED_OPERAND_IMM1:
+ // illegal
+ PrintError("Illegal Operand Order\n");
+ return -1;
+
+ case XED_OPERAND_AGEN:
+ case XED_OPERAND_PTR:
+ case XED_OPERAND_RELBR:
+ default:
+ PrintError("Unhandled Operand Type\n");
+ return -1;
+ }
}
+
}
// set third operand
if (instr->num_operands >= 3) {
const xed_operand_t * op = xed_inst_operand(xi, 2);
- xed_operand_type_enum_t op_type = xed_operand_type(op);
+ // xed_operand_type_enum_t op_type = xed_operand_type(op);
xed_operand_enum_t op_enum = xed_operand_name(op);
if (xed_operand_is_register(op_enum)) {
xed_reg_enum_t xed_reg = xed_decoded_inst_get_reg(&xed_instr, op_enum);
- if (xed_reg_to_v3_reg(info,
- xed_reg,
- &(instr->third_operand.operand),
- &(instr->third_operand.size)) == -1) {
-
+ int v3_reg_type = xed_reg_to_v3_reg(info,
+ xed_reg,
+ &(instr->third_operand.operand),
+ &(instr->third_operand.size));
+
+ if (v3_reg_type == -1) {
PrintError("Third operand is an Unhandled Operand: %s\n", xed_reg_enum_t2str(xed_reg));
instr->third_operand.type = INVALID_OPERAND;
return -1;
+ } else if (v3_reg_type == SEGMENT_REGISTER) {
+ struct v3_segment * seg_reg = (struct v3_segment *)(instr->third_operand.operand);
+ instr->third_operand.operand = (addr_t)&(seg_reg->selector);
}
+
+
instr->third_operand.type = REG_OPERAND;
- PrintDebug("Third Operand: xed_reg=0x%x\n", instr->third_operand.operand);
+
} else {
- PrintError("Unhandled third operand type %s\n", xed_operand_type_enum_t2str(op_type));
+ // PrintError("Unhandled third operand type %s\n", xed_operand_type_enum_t2str(op_type));
return -1;
}
}
- /*
- PrintDebug("category: %s\n", xed_category_enum_t2str(xed_decoded_inst_get_category(&xed_instr)));;
- PrintDebug("ISA-extension:%s\n ",xed_extension_enum_t2str(xed_decoded_inst_get_extension(&xed_instr)));
- PrintDebug(" instruction-length: %d\n ", xed_decoded_inst_get_length(&xed_instr));
- PrintDebug(" operand-size:%d\n ", xed_operand_values_get_effective_operand_width(xed_decoded_inst_operands_const(&xed_instr)));
- PrintDebug("address-size:%d\n ", xed_operand_values_get_effective_address_width(xed_decoded_inst_operands_const(&xed_instr)));
- PrintDebug("iform-enum-name:%s\n ",xed_iform_enum_t2str(xed_decoded_inst_get_iform_enum(&xed_instr)));
- PrintDebug("iform-enum-name-dispatch (zero based):%d\n ", xed_decoded_inst_get_iform_enum_dispatch(&xed_instr));
- PrintDebug("iclass-max-iform-dispatch: %d\n ", xed_iform_max_per_iclass(xed_decoded_inst_get_iclass(&xed_instr)));
- */
- // operands
- // print_operands(&xed_instr);
-
- // memops
- // print_memops(&xed_instr);
-
- // flags
- //print_flags(&xed_instr);
-
- // attributes
- //print_attributes(&xed_instr);*/
-
return 0;
}
+
+
+
+static int get_memory_operand(struct guest_info * info, xed_decoded_inst_t * xed_instr, uint_t op_index, struct x86_operand * operand) {
+ struct memory_operand mem_op;
+
+ addr_t seg;
+ addr_t base;
+ addr_t scale;
+ addr_t index;
+ ullong_t displacement;
+ // struct v3_segment * seg_reg;
+
+
+
+
+ memset((void*)&mem_op, '\0', sizeof(struct memory_operand));
+
+ xed_reg_enum_t xed_seg = xed_decoded_inst_get_seg_reg(xed_instr, op_index);
+ if (xed_seg != XED_REG_INVALID) {
+ struct v3_segment *tmp_segment;
+ if (xed_reg_to_v3_reg(info, xed_seg, (addr_t *)&tmp_segment, &(mem_op.segment_size)) == -1) {
+ PrintError("Unhandled Segment Register\n");
+ return -1;
+ }
+ mem_op.segment = tmp_segment->base;
+ }
+
+ xed_reg_enum_t xed_base = xed_decoded_inst_get_base_reg(xed_instr, op_index);
+ if (xed_base != XED_REG_INVALID) {
+ addr_t base_reg;
+ if (xed_reg_to_v3_reg(info, xed_base, &base_reg, &(mem_op.base_size)) == -1) {
+ PrintError("Unhandled Base register\n");
+ return -1;
+ }
+ mem_op.base = *(addr_t *)base_reg;
+ }
+
+
+
+ xed_reg_enum_t xed_idx = xed_decoded_inst_get_index_reg(xed_instr, op_index);
+ if ((op_index == 0) && (xed_idx != XED_REG_INVALID)) {
+ addr_t index_reg;
+
+ if (xed_reg_to_v3_reg(info, xed_idx, &index_reg, &(mem_op.index_size)) == -1) {
+ PrintError("Unhandled Index Register\n");
+ return -1;
+ }
+
+ mem_op.index= *(addr_t *)index_reg;
+
+ xed_uint_t xed_scale = xed_decoded_inst_get_scale(xed_instr, op_index);
+ if (xed_scale != 0) {
+ mem_op.scale = xed_scale;
+ }
+ }
+
+
+ xed_uint_t disp_bits = xed_decoded_inst_get_memory_displacement_width(xed_instr, op_index);
+ if (disp_bits) {
+ xed_int64_t xed_disp = xed_decoded_inst_get_memory_displacement(xed_instr, op_index);
+
+ mem_op.displacement_size = disp_bits / 8;
+ mem_op.displacement = xed_disp;
+
+ }
+
+ operand->type = MEM_OPERAND;
+ operand->size = xed_decoded_inst_get_memory_operand_length(xed_instr, op_index);
+
+
+
+ PrintDebug("Struct: Seg=%x, base=%x, index=%x, scale=%x, displacement=%x\n",
+ mem_op.segment, mem_op.base, mem_op.index, mem_op.scale, mem_op.displacement);
+
+
+ seg = mem_op.segment;
+ base = MASK(mem_op.base, mem_op.base_size);
+ index = MASK(mem_op.index, mem_op.index_size);
+ scale = mem_op.scale;
+ displacement = MASK(mem_op.displacement, mem_op.displacement_size);
+
+ PrintDebug("Seg=%x, base=%x, index=%x, scale=%x, displacement=%x\n", seg, base, index, scale, displacement);
+
+ operand->operand = seg + base + (scale * index) + displacement;
+ return 0;
+}
+
+
static int xed_reg_to_v3_reg(struct guest_info * info, xed_reg_enum_t xed_reg, addr_t * v3_reg, uint_t * reg_len) {
switch (xed_reg) {
case XED_REG_RAX:
*v3_reg = (addr_t)&(info->vm_regs.rax);
*reg_len = 8;
- break;
+ return GPR_REGISTER;
case XED_REG_EAX:
*v3_reg = (addr_t)&(info->vm_regs.rax);
*reg_len = 4;
- break;
+ return GPR_REGISTER;
case XED_REG_AX:
*v3_reg = (addr_t)&(info->vm_regs.rax);
*reg_len = 2;
- break;
+ return GPR_REGISTER;
case XED_REG_AH:
*v3_reg = (addr_t)(&(info->vm_regs.rax)) + 1;
*reg_len = 1;
- break;
+ return GPR_REGISTER;
case XED_REG_AL:
*v3_reg = (addr_t)&(info->vm_regs.rax);
*reg_len = 1;
- break;
+ return GPR_REGISTER;
case XED_REG_RCX:
*v3_reg = (addr_t)&(info->vm_regs.rcx);
*reg_len = 8;
- break;
+ return GPR_REGISTER;
case XED_REG_ECX:
*v3_reg = (addr_t)&(info->vm_regs.rcx);
*reg_len = 4;
- break;
+ return GPR_REGISTER;
case XED_REG_CX:
*v3_reg = (addr_t)&(info->vm_regs.rcx);
*reg_len = 2;
- break;
+ return GPR_REGISTER;
case XED_REG_CH:
*v3_reg = (addr_t)(&(info->vm_regs.rcx)) + 1;
*reg_len = 1;
- break;
+ return GPR_REGISTER;
case XED_REG_CL:
*v3_reg = (addr_t)&(info->vm_regs.rcx);
*reg_len = 1;
- break;
+ return GPR_REGISTER;
case XED_REG_RDX:
*v3_reg = (addr_t)&(info->vm_regs.rdx);
*reg_len = 8;
- break;
+ return GPR_REGISTER;
case XED_REG_EDX:
*v3_reg = (addr_t)&(info->vm_regs.rdx);
*reg_len = 4;
- break;
+ return GPR_REGISTER;
case XED_REG_DX:
*v3_reg = (addr_t)&(info->vm_regs.rdx);
*reg_len = 2;
- break;
+ return GPR_REGISTER;
case XED_REG_DH:
*v3_reg = (addr_t)(&(info->vm_regs.rdx)) + 1;
*reg_len = 1;
- break;
+ return GPR_REGISTER;
case XED_REG_DL:
*v3_reg = (addr_t)&(info->vm_regs.rdx);
*reg_len = 1;
- break;
+ return GPR_REGISTER;
case XED_REG_RBX:
*v3_reg = (addr_t)&(info->vm_regs.rbx);
*reg_len = 8;
- break;
+ return GPR_REGISTER;
case XED_REG_EBX:
*v3_reg = (addr_t)&(info->vm_regs.rbx);
*reg_len = 4;
- break;
+ return GPR_REGISTER;
case XED_REG_BX:
*v3_reg = (addr_t)&(info->vm_regs.rbx);
*reg_len = 2;
- break;
+ return GPR_REGISTER;
case XED_REG_BH:
*v3_reg = (addr_t)(&(info->vm_regs.rbx)) + 1;
*reg_len = 1;
- break;
+ return GPR_REGISTER;
case XED_REG_BL:
*v3_reg = (addr_t)&(info->vm_regs.rbx);
*reg_len = 1;
- break;
+ return GPR_REGISTER;
case XED_REG_RSP:
*v3_reg = (addr_t)&(info->vm_regs.rsp);
*reg_len = 8;
- break;
+ return GPR_REGISTER;
case XED_REG_ESP:
*v3_reg = (addr_t)&(info->vm_regs.rsp);
*reg_len = 4;
- break;
+ return GPR_REGISTER;
case XED_REG_SP:
*v3_reg = (addr_t)&(info->vm_regs.rsp);
*reg_len = 2;
- break;
+ return GPR_REGISTER;
case XED_REG_SPL:
*v3_reg = (addr_t)&(info->vm_regs.rsp);
*reg_len = 1;
- break;
+ return GPR_REGISTER;
case XED_REG_RBP:
*v3_reg = (addr_t)&(info->vm_regs.rbp);
*reg_len = 8;
- break;
+ return GPR_REGISTER;
case XED_REG_EBP:
*v3_reg = (addr_t)&(info->vm_regs.rbp);
*reg_len = 4;
- break;
+ return GPR_REGISTER;
case XED_REG_BP:
*v3_reg = (addr_t)&(info->vm_regs.rbp);
*reg_len = 2;
- break;
+ return GPR_REGISTER;
case XED_REG_BPL:
*v3_reg = (addr_t)&(info->vm_regs.rbp);
*reg_len = 1;
- break;
+ return GPR_REGISTER;
case XED_REG_RSI:
*v3_reg = (addr_t)&(info->vm_regs.rsi);
*reg_len = 8;
- break;
+ return GPR_REGISTER;
case XED_REG_ESI:
*v3_reg = (addr_t)&(info->vm_regs.rsi);
*reg_len = 4;
- break;
+ return GPR_REGISTER;
case XED_REG_SI:
*v3_reg = (addr_t)&(info->vm_regs.rsi);
*reg_len = 2;
- break;
+ return GPR_REGISTER;
case XED_REG_SIL:
*v3_reg = (addr_t)&(info->vm_regs.rsi);
*reg_len = 1;
- break;
+ return GPR_REGISTER;
case XED_REG_RDI:
*v3_reg = (addr_t)&(info->vm_regs.rdi);
*reg_len = 8;
- break;
+ return GPR_REGISTER;
case XED_REG_EDI:
*v3_reg = (addr_t)&(info->vm_regs.rdi);
*reg_len = 4;
- break;
+ return GPR_REGISTER;
case XED_REG_DI:
*v3_reg = (addr_t)&(info->vm_regs.rdi);
*reg_len = 2;
- break;
+ return GPR_REGISTER;
case XED_REG_DIL:
*v3_reg = (addr_t)&(info->vm_regs.rdi);
*reg_len = 1;
- break;
+ return GPR_REGISTER;
/*
case XED_REG_RIP:
*v3_reg = (addr_t)&(info->rip);
*reg_len = 8;
- break;
+ return CTRL_REGISTER;
case XED_REG_EIP:
*v3_reg = (addr_t)&(info->rip);
*reg_len = 4;
- break;
+ return CTRL_REGISTER;
case XED_REG_IP:
*v3_reg = (addr_t)&(info->rip);
*reg_len = 2;
- break;
+ return CTRL_REGISTER;
case XED_REG_FLAGS:
*v3_reg = (addr_t)&(info->ctrl_regs.rflags);
*reg_len = 2;
- break;
+ return CTRL_REGISTER;
case XED_REG_EFLAGS:
*v3_reg = (addr_t)&(info->ctrl_regs.rflags);
*reg_len = 4;
- break;
+ return CTRL_REGISTER;
case XED_REG_RFLAGS:
*v3_reg = (addr_t)&(info->ctrl_regs.rflags);
*reg_len = 8;
- break;
+ return CTRL_REGISTER;
case XED_REG_CR0:
*v3_reg = (addr_t)&(info->ctrl_regs.cr0);
*reg_len = 4;
- break;
+ return CTRL_REGISTER;
case XED_REG_CR2:
*v3_reg = (addr_t)&(info->ctrl_regs.cr2);
*reg_len = 4;
- break;
+ return CTRL_REGISTER;
case XED_REG_CR3:
*v3_reg = (addr_t)&(info->ctrl_regs.cr3);
*reg_len = 4;
- break;
+ return CTRL_REGISTER;
case XED_REG_CR4:
*v3_reg = (addr_t)&(info->ctrl_regs.cr4);
*reg_len = 4;
- break;
+ return CTRL_REGISTER;
case XED_REG_CR8:
*v3_reg = (addr_t)&(info->ctrl_regs.cr8);
*reg_len = 4;
- break;
+ return CTRL_REGISTER;
case XED_REG_CR1:
case XED_REG_CR5:
* SEGMENT REGS
*/
case XED_REG_CS:
- *v3_reg = (addr_t)&(info->segments.cs.selector);
- *reg_len = 16;
- break;
+ *v3_reg = (addr_t)&(info->segments.cs);
+ return SEGMENT_REGISTER;
case XED_REG_DS:
- *v3_reg = (addr_t)&(info->segments.ds.selector);
- *reg_len = 16;
- break;
+ *v3_reg = (addr_t)&(info->segments.ds);
+ return SEGMENT_REGISTER;
case XED_REG_ES:
- *v3_reg = (addr_t)&(info->segments.es.selector);
- *reg_len = 16;
- break;
+ *v3_reg = (addr_t)&(info->segments.es);
+ return SEGMENT_REGISTER;
case XED_REG_SS:
- *v3_reg = (addr_t)&(info->segments.ss.selector);
- *reg_len = 16;
- break;
+ *v3_reg = (addr_t)&(info->segments.ss);
+ return SEGMENT_REGISTER;
case XED_REG_FS:
- *v3_reg = (addr_t)&(info->segments.fs.selector);
- *reg_len = 16;
- break;
+ *v3_reg = (addr_t)&(info->segments.fs);
+ return SEGMENT_REGISTER;
case XED_REG_GS:
- *v3_reg = (addr_t)&(info->segments.fs.selector);
- *reg_len = 16;
- break;
+ *v3_reg = (addr_t)&(info->segments.gs);
+ return SEGMENT_REGISTER;
case XED_REG_GDTR: