#ifndef __VNET_CORE_H__
#define __VNET_CORE_H__
-#include <palacios/vmm.h>
#include <palacios/vmm_ethernet.h>
-#include <vnet/vnet_host_interface.h>
+#include <vnet/vnet_host.h>
+#include <vnet/vnet_vmm.h>
#define MAC_NOSET 0
#define MAC_ANY 11
#define VNET_HASH_SIZE 17
-extern int v3_vnet_debug;
-
struct v3_vnet_route {
uint8_t src_mac[ETH_ALEN];
uint8_t dst_mac[ETH_ALEN];
--- /dev/null
+/*
+ Copyright (c) 2002, 2004, Christopher Clark
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the original author; nor the names of any contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/* Modifications made by Lei Xia <lxia@northwestern.edu> */
+
+
+#ifndef __VNET_HASHTABLE_H__
+#define __VNET_HASHTABLE_H__
+
+struct hashtable;
+
+/* Example of use:
+ *
+ * struct hashtable *h;
+ * struct some_key *k;
+ * struct some_value *v;
+ *
+ * static uint_t hash_from_key_fn( void *k );
+ * static int keys_equal_fn ( void *key1, void *key2 );
+ *
+ * h = create_hashtable(16, hash_from_key_fn, keys_equal_fn);
+ * k = (struct some_key *) malloc(sizeof(struct some_key));
+ * v = (struct some_value *) malloc(sizeof(struct some_value));
+ *
+ * (initialise k and v to suitable values)
+ *
+ * if (! hashtable_insert(h,k,v) )
+ * { exit(-1); }
+ *
+ * if (NULL == (found = hashtable_search(h,k) ))
+ * { printf("not found!"); }
+ *
+ * if (NULL == (found = hashtable_remove(h,k) ))
+ * { printf("Not found\n"); }
+ *
+ */
+
+/* Macros may be used to define type-safe(r) hashtable access functions, with
+ * methods specialized to take known key and value types as parameters.
+ *
+ * Example:
+ *
+ * Insert this at the start of your file:
+ *
+ * DEFINE_HASHTABLE_INSERT(insert_some, struct some_key, struct some_value);
+ * DEFINE_HASHTABLE_SEARCH(search_some, struct some_key, struct some_value);
+ * DEFINE_HASHTABLE_REMOVE(remove_some, struct some_key, struct some_value);
+ *
+ * This defines the functions 'insert_some', 'search_some' and 'remove_some'.
+ * These operate just like hashtable_insert etc., with the same parameters,
+ * but their function signatures have 'struct some_key *' rather than
+ * 'void *', and hence can generate compile time errors if your program is
+ * supplying incorrect data as a key (and similarly for value).
+ *
+ * Note that the hash and key equality functions passed to create_hashtable
+ * still take 'void *' parameters instead of 'some key *'. This shouldn't be
+ * a difficult issue as they're only defined and passed once, and the other
+ * functions will ensure that only valid keys are supplied to them.
+ *
+ * The cost for this checking is increased code size and runtime overhead
+ * - if performance is important, it may be worth switching back to the
+ * unsafe methods once your program has been debugged with the safe methods.
+ * This just requires switching to some simple alternative defines - eg:
+ * #define insert_some hashtable_insert
+ *
+ */
+
+/* These cannot be inlined because they are referenced as fn ptrs */
+ulong_t vnet_hash_long(ulong_t val, uint_t bits);
+ulong_t vnet_hash_buffer(uchar_t * msg, uint_t length);
+
+struct hashtable * vnet_create_htable(uint_t min_size,
+ uint_t (*hashfunction) (addr_t key),
+ int (*key_eq_fn) (addr_t key1, addr_t key2));
+
+void vnet_free_htable(struct hashtable * htable, int free_values, int free_keys);
+
+/*
+ * returns non-zero for successful insertion
+ *
+ * This function will cause the table to expand if the insertion would take
+ * the ratio of entries to table size over the maximum load factor.
+ *
+ * This function does not check for repeated insertions with a duplicate key.
+ * The value returned when using a duplicate key is undefined -- when
+ * the hashtable changes size, the order of retrieval of duplicate key
+ * entries is reversed.
+ * If in doubt, remove before insert.
+ */
+int vnet_htable_insert(struct hashtable * htable, addr_t key, addr_t value);
+
+// returns the value associated with the key, or NULL if none found
+addr_t vnet_htable_search(struct hashtable * htable, addr_t key);
+
+// returns the value associated with the key, or NULL if none found
+addr_t vnet_htable_remove(struct hashtable * htable, addr_t key, int free_key);
+
+uint_t vnet_htable_count(struct hashtable * htable);
+
+
+#endif
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * Copyright (c) 2002, Christopher Clark
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of the original author; nor the names of any contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
--- /dev/null
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2011, Lei Xia <lxia@northwestern.edu>
+ * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Lei Xia <lxia@northwestern.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#ifndef __VNET_HOST_H__
+#define __VNET_HOST_H__
+
+#include <vnet/vnet_vmm.h>
+
+struct vnet_thread {
+ void * host_thread;
+};
+
+struct vnet_timer {
+ void * host_timer;
+};
+
+typedef addr_t vnet_lock_t;
+
+
+
+struct vnet_host_hooks {
+ void *(*thread_start)(int (*fn)(void * arg), void * arg, char * thread_name);
+ void (*thread_sleep)(int timeout);
+ void (*thread_wakeup)(void * thread);
+ void (*thread_stop)(void * thread);
+ int (*thread_should_stop)();
+
+ void *(*timer_create)(unsigned long interval, void (* timer_fun)(void * priv_data), void * data);
+ void (*timer_del)(void * timer);
+ int (*timer_start)(void * timer);
+ int (*timer_stop)(void * timer);
+ void (*timer_reset)(void * timer, unsigned long interval);
+
+ void (*udelay)(unsigned long usecs);
+
+ /* duplicate part from os_hooks */
+ void (*yield_cpu)(void);
+ void (*print)(const char * format, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+ void *(*allocate_pages)(int num_pages, unsigned int alignment);
+ void (*free_pages)(void * page, int num_pages);
+
+ void *(*malloc)(unsigned int size);
+ void (*free)(void * addr);
+
+ void *(*paddr_to_vaddr)(void * addr);
+ void *(*vaddr_to_paddr)(void * addr);
+
+ void *(*mutex_alloc)(void);
+ void (*mutex_free)(void * mutex);
+ void (*mutex_lock)(void * mutex, int must_spin);
+ void (*mutex_unlock)(void * mutex);
+};
+
+
+
+extern struct vnet_host_hooks * host_hooks;
+
+
+/* MEMORY ALLOCATE/DEALLOCATE */
+
+#define PAGE_SIZE_4KB 4096
+
+/* 4KB-aligned */
+static inline void * Vnet_AllocPages(int num_pages){
+ if ((host_hooks) && host_hooks->allocate_pages) {
+ return host_hooks->allocate_pages(num_pages, PAGE_SIZE_4KB);
+ }
+
+ return NULL;
+}
+
+static inline void Vnet_FreePages(void * page, int num_pages){
+ if ((host_hooks) && host_hooks->free_pages) {
+ host_hooks->free_pages(page, num_pages);
+ }
+}
+
+static inline void * Vnet_VAddr(void * addr) {
+ if ((host_hooks) && host_hooks->paddr_to_vaddr){
+ return host_hooks->paddr_to_vaddr(addr);
+ }
+
+ return NULL;
+}
+
+static inline void * Vnet_PAddr(void *addr) {
+ if ((host_hooks) && host_hooks->vaddr_to_paddr) {
+ return host_hooks->vaddr_to_paddr(addr);
+ }
+
+ return NULL;
+}
+
+static inline void * Vnet_Malloc(uint32_t size){
+ if ((host_hooks) && host_hooks->malloc) {
+ return host_hooks->malloc(size);
+ }
+
+ return NULL;
+}
+
+static inline void Vnet_Free(void * addr){
+ if ((host_hooks) && host_hooks->free) {
+ host_hooks->free(addr);
+ }
+}
+
+
+static inline void Vnet_Yield(){
+ if ((host_hooks) && (host_hooks)->yield_cpu) {
+ host_hooks->yield_cpu();
+ }
+}
+
+/* THREAD FUNCTIONS */
+struct vnet_thread * vnet_start_thread(int (*func)(void *), void *arg, char * name);
+
+static inline void vnet_thread_sleep(long timeout){
+ if((host_hooks) && host_hooks->thread_sleep){
+ host_hooks->thread_sleep(timeout);
+ }
+}
+
+static inline void vnet_thread_wakeup(struct vnet_thread * thread){
+ if((host_hooks) && host_hooks->thread_wakeup){
+ host_hooks->thread_wakeup(thread->host_thread);
+ }
+}
+
+
+static inline void vnet_thread_stop(struct vnet_thread * thread){
+ if((host_hooks) && host_hooks->thread_stop){
+ host_hooks->thread_stop(thread->host_thread);
+ }
+}
+
+static inline int vnet_thread_should_stop(){
+ if((host_hooks) && host_hooks->thread_should_stop){
+ return host_hooks->thread_should_stop();
+ }
+
+ return 0;
+}
+
+static inline void vnet_udelay(unsigned long usecs){
+ if((host_hooks) && host_hooks->udelay){
+ host_hooks->udelay(usecs);
+ }
+}
+
+/* TIMER FUNCTIONS */
+/* interval, in jittes */
+struct vnet_timer * vnet_create_timer(unsigned long interval, void (* timer_fun)(void * priv_data), void * pri_data);
+
+static inline void vnet_del_timer(struct vnet_timer * timer){
+ if((host_hooks) && host_hooks->timer_del){
+ host_hooks->timer_del(timer->host_timer);
+ Vnet_Free(timer);
+ }
+}
+
+static inline int vnet_start_timer(struct vnet_timer * timer){
+ if((host_hooks) && host_hooks->timer_start){
+ return host_hooks->timer_start(timer->host_timer);
+ }
+
+ return -1;
+}
+
+static inline int vnet_stop_timer(struct vnet_timer * timer){
+ if((host_hooks) && host_hooks->timer_stop){
+ return host_hooks->timer_stop(timer->host_timer);
+ }
+
+ return -1;
+}
+
+static inline void vnet_reset_timer(struct vnet_timer * timer, unsigned long new_interval){
+ if((host_hooks) && host_hooks->timer_reset){
+ host_hooks->timer_reset(timer->host_timer, new_interval);
+ }
+}
+
+
+
+#define Vnet_Print(level, fmt, args...) \
+ do { \
+ extern int vnet_debug; \
+ if(level <= vnet_debug) { \
+ extern struct vnet_host_hooks * host_hooks; \
+ if ((host_hooks) && (host_hooks)->print) { \
+ (host_hooks)->print((fmt), ##args); \
+ } \
+ } \
+ } while (0)
+
+
+#define Vnet_Debug(fmt, args...) \
+ do { \
+ extern struct vnet_host_hooks * host_hooks; \
+ if ((host_hooks) && (host_hooks)->print) { \
+ (host_hooks)->print((fmt), ##args); \
+ } \
+ } while (0)
+
+
+
+
+/* Lock Utilities */
+int vnet_lock_init(vnet_lock_t * lock);
+
+static inline void vnet_lock_deinit(vnet_lock_t * lock) {
+ host_hooks->mutex_free((void *)*lock);
+ *lock = 0;
+}
+
+static inline void vnet_lock(vnet_lock_t lock) {
+ host_hooks->mutex_lock((void *)lock, 0);
+}
+
+static inline void vnet_unlock(vnet_lock_t lock) {
+ host_hooks->mutex_unlock((void *)lock);
+}
+
+static inline addr_t vnet_lock_irqsave(vnet_lock_t lock) {
+ addr_t irq_state = v3_irq_save();
+ host_hooks->mutex_lock((void *)lock, 1);
+ return irq_state;
+}
+
+
+static inline void vnet_unlock_irqrestore(vnet_lock_t lock, addr_t irq_state) {
+ host_hooks->mutex_unlock((void *)lock);
+ v3_irq_restore(irq_state);
+}
+
+
+
+\r
+
+void init_vnet(struct vnet_host_hooks * hooks);
+
+
+#endif
+
+
+++ /dev/null
-/*
- * This file is part of the Palacios Virtual Machine Monitor developed
- * by the V3VEE Project with funding from the United States National
- * Science Foundation and the Department of Energy.
- *
- * The V3VEE Project is a joint project between Northwestern University
- * and the University of New Mexico. You can find out more at
- * http://www.v3vee.org
- *
- * Copyright (c) 2011, Lei Xia <lxia@northwestern.edu>
- * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
- * All rights reserved.
- *
- * Author: Lei Xia <lxia@northwestern.edu>
- *
- * This is free software. You are permitted to use,
- * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
- */
-
-#ifndef __VNET_HOST_INTERFACE_H__
-#define __VNET_HOST_INTERFACE_H__
-
-struct vnet_thread {
- void * host_thread;
- void * data;
-};
-
-struct vnet_timer {
- void * host_timer;
- void * data;
-};
-
-struct vnet_thread * vnet_thread_create(int (*func)(void *), void *arg, char * name);
-void vnet_thread_sleep(int cond);
-void vnet_thread_wakeup(struct vnet_thread *);
-void vnet_thread_kill(struct vnet_thread *);
-void vnet_thread_stop(struct vnet_thread *);
-void vnet_thread_continue(struct vnet_thread *);
-
-void vnet_udelay(unsigned long usecs);
-
-// I know there is timer in palacios, but it has to be binded to specific VM, and the granularity is not
-// guaranteed
-// I need a timer that is global, not related to any specific VM, and also fine-granularity
-struct vnet_timer * vnet_create_timer(int interval /*in us*/, void (*timer_fun)(uint64_t eclipsed_cycles, void * priv_data), void * pri_data);
-int vnet_del_timer(struct vnet_timer *);
-int vnet_start_timer(struct vnet_timer *);
-int vnet_stop_timer(struct vnet_timer *);
-
-
-#endif
-
-
--- /dev/null
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2011, Lei Xia <lxia@northwestern.edu>
+ * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Lei Xia <lxia@northwestern.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#ifndef __VNET_VMM_H__
+#define __VNET_VMM_H__
+
+#include <palacios/vmm.h>
+#include <palacios/vmm_string.h>
+#include <palacios/vmm_sprintf.h>
+#include <palacios/vmm_lowlevel.h>
+
+
+
+#endif
+
+
handle_pkt_tx(&(virtio->vm->cores[0]), virtio);
v3_yield(NULL);
}else {
- vnet_thread_sleep(0);
+ vnet_thread_sleep(-1);
}
}
ops->frontend_data = net_state;
memcpy(ops->fnt_mac, virtio->mac, ETH_ALEN);
- net_state->poll_thread = vnet_thread_create(virtio_tx_flush, (void *)net_state, "Virtio_Poll");
+ net_state->poll_thread = vnet_start_thread(virtio_tx_flush, (void *)net_state, "Virtio_Poll");
return 0;
}
#include <palacios/vmx.h>
#endif
-#ifdef V3_CONFIG_VNET
-#include <vnet/vnet.h>
-#endif
-
v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
struct v3_os_hooks * os_hooks = NULL;
#endif
-#ifdef V3_CONFIG_VNET
- v3_init_vnet();
-#endif
-
#ifdef V3_CONFIG_MULTITHREAD_OS
if ((hooks) && (hooks->call_on_cpu)) {
#endif
-#ifdef V3_CONFIG_VNET
- v3_deinit_vnet();
-#endif
-
#ifdef V3_CONFIG_MULTITHREAD_OS
if ((os_hooks) && (os_hooks->call_on_cpu)) {
for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
-obj-$(V3_CONFIG_VNET) += vnet_core.o
+obj-$(V3_CONFIG_VNET) += vnet_core.o vnet_host.o vnet_hashtable.o
*/
#include <vnet/vnet.h>
-#include <palacios/vm_guest_mem.h>
-#include <palacios/vmm_lock.h>
-#include <palacios/vmm_queue.h>
-#include <palacios/vmm_sprintf.h>
-#include <palacios/vmm_ethernet.h>
+#include <vnet/vnet_hashtable.h>
+#include <vnet/vnet_host.h>
+#include <vnet/vnet_vmm.h>
#ifndef V3_CONFIG_DEBUG_VNET
-#undef PrintDebug
-#define PrintDebug(fmt, args...)
+#undef Vnet_Debug
+#define Vnet_Debug(fmt, args...)
#endif
-int v3_net_debug = 0;
+int vnet_debug = 0;
struct eth_hdr {
uint8_t dst_mac[ETH_ALEN];
struct queue_entry buf[VNET_QUEUE_SIZE];
int head, tail;
int count;
- v3_lock_t lock;
+ vnet_lock_t lock;
};
static struct {
struct vnet_brg_dev * bridge;
- v3_lock_t lock;
+ vnet_lock_t lock;
struct vnet_stat stats;
struct vnet_thread * pkt_flush_thread;
char str[50];
mac_to_string(route->src_mac, str);
- PrintDebug("Src Mac (%s), src_qual (%d)\n",
+ Vnet_Debug("Src Mac (%s), src_qual (%d)\n",
str, route->src_mac_qual);
mac_to_string(route->dst_mac, str);
- PrintDebug("Dst Mac (%s), dst_qual (%d)\n",
+ Vnet_Debug("Dst Mac (%s), dst_qual (%d)\n",
str, route->dst_mac_qual);
- PrintDebug("Src dev id (%d), src type (%d)",
+ Vnet_Debug("Src dev id (%d), src type (%d)",
route->src_id,
route->src_type);
- PrintDebug("Dst dev id (%d), dst type (%d)\n",
+ Vnet_Debug("Dst dev id (%d), dst type (%d)\n",
route->dst_id,
route->dst_type);
}
struct vnet_route_info *route;
int i = 0;
- PrintDebug("\n========Dump routes starts ============\n");
+ Vnet_Debug("\n========Dump routes starts ============\n");
list_for_each_entry(route, &(vnet_state.routes), node) {
- PrintDebug("\nroute %d:\n", i++);
+ Vnet_Debug("\nroute %d:\n", i++);
print_route(&(route->route_def));
if (route->route_def.dst_type == LINK_INTERFACE) {
- PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
+ Vnet_Debug("dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
route->dst_dev,
route->dst_dev->dev_id,
(void *)&(route->dst_dev->dev_ops),
}
}
- PrintDebug("\n========Dump routes end ============\n");
+ Vnet_Debug("\n========Dump routes end ============\n");
}
#endif
static inline uint_t hash_fn(addr_t hdr_ptr) {
uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
- return v3_hash_buffer(hdr_buf, VNET_HASH_SIZE);
+ return vnet_hash_buffer(hdr_buf, VNET_HASH_SIZE);
}
static inline int hash_eq(addr_t key1, addr_t key2) {
static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);
- if (v3_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
+ if (vnet_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
PrintError("VNET/P Core: Failed to insert new route entry to the cache\n");
return -1;
}
}
static int clear_hash_cache() {
- v3_free_htable(vnet_state.route_cache, 1, 1);
- vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
+ vnet_free_htable(vnet_state.route_cache, 1, 1);
+ vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
return 0;
}
static int look_into_cache(const struct v3_vnet_pkt * pkt,
struct route_list ** routes) {
- *routes = (struct route_list *)v3_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
+ *routes = (struct route_list *)vnet_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
return 0;
}
struct vnet_route_info * new_route = NULL;
unsigned long flags;
- new_route = (struct vnet_route_info *)V3_Malloc(sizeof(struct vnet_route_info));
+ new_route = (struct vnet_route_info *)Vnet_Malloc(sizeof(struct vnet_route_info));
memset(new_route, 0, sizeof(struct vnet_route_info));
#ifdef V3_CONFIG_DEBUG_VNET
- PrintDebug("VNET/P Core: add_route_entry:\n");
+ Vnet_Debug("VNET/P Core: add_route_entry:\n");
print_route(&route);
#endif
}
- flags = v3_lock_irqsave(vnet_state.lock);
+ flags = vnet_lock_irqsave(vnet_state.lock);
list_add(&(new_route->node), &(vnet_state.routes));
clear_hash_cache();
- v3_unlock_irqrestore(vnet_state.lock, flags);
+ vnet_unlock_irqrestore(vnet_state.lock, flags);
#ifdef V3_CONFIG_DEBUG_VNET
struct vnet_route_info * route = NULL;
unsigned long flags;
- flags = v3_lock_irqsave(vnet_state.lock);
+ flags = vnet_lock_irqsave(vnet_state.lock);
list_for_each_entry(route, &(vnet_state.routes), node) {
if((route->route_def.dst_type == LINK_INTERFACE &&
list_del(&(route->node));
list_del(&(route->match_node));
- V3_Free(route);
+ Vnet_Free(route);
}
}
- v3_unlock_irqrestore(vnet_state.lock, flags);
+ vnet_unlock_irqrestore(vnet_state.lock, flags);
}
/* At the end allocate a route_list
mac_to_string(hdr->src_mac, src_str);
mac_to_string(hdr->dst_mac, dst_str);
- PrintDebug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
+ Vnet_Debug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
}
#endif
}
}
- PrintDebug("VNET/P Core: match_route: Matches=%d\n", num_matches);
+ Vnet_Debug("VNET/P Core: match_route: Matches=%d\n", num_matches);
if (num_matches == 0) {
return NULL;
}
- matches = (struct route_list *)V3_Malloc(sizeof(struct route_list) +
+ matches = (struct route_list *)Vnet_Malloc(sizeof(struct route_list) +
(sizeof(struct vnet_route_info *) * num_matches));
matches->num_routes = num_matches;
int i;
int cpu = V3_Get_CPU();
- V3_Net_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
+ Vnet_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
cpu, pkt->size, pkt->src_id,
pkt->src_type, pkt->dst_id, pkt->dst_type);
- if(v3_net_debug >= 4){
+ if(vnet_debug >= 4){
v3_hexdump(pkt->data, pkt->size, NULL, 0);
}
- flags = v3_lock_irqsave(vnet_state.lock);
+ flags = vnet_lock_irqsave(vnet_state.lock);
vnet_state.stats.rx_bytes += pkt->size;
vnet_state.stats.rx_pkts++;
look_into_cache(pkt, &matched_routes);
if (matched_routes == NULL) {
- PrintDebug("VNET/P Core: send pkt Looking into routing table\n");
+ Vnet_Debug("VNET/P Core: send pkt Looking into routing table\n");
matched_routes = match_route(pkt);
if (matched_routes) {
add_route_to_cache(pkt, matched_routes);
} else {
- PrintDebug("VNET/P Core: Could not find route for packet... discards packet\n");
- v3_unlock_irqrestore(vnet_state.lock, flags);
+ Vnet_Debug("VNET/P Core: Could not find route for packet... discards packet\n");
+ vnet_unlock_irqrestore(vnet_state.lock, flags);
return 0; /* do we return -1 here?*/
}
}
- v3_unlock_irqrestore(vnet_state.lock, flags);
+ vnet_unlock_irqrestore(vnet_state.lock, flags);
- PrintDebug("VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
+ Vnet_Debug("VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
for (i = 0; i < matched_routes->num_routes; i++) {
struct vnet_route_info * route = matched_routes->routes[i];
pkt->dst_id = route->route_def.dst_id;
if (bridge == NULL) {
- V3_Net_Print(2, "VNET/P Core: No active bridge to sent data to\n");
+ Vnet_Print(2, "VNET/P Core: No active bridge to sent data to\n");
continue;
}
if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
- V3_Net_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
+ Vnet_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
continue;
}
vnet_state.stats.tx_bytes += pkt->size;
vnet_state.stats.tx_pkts ++;
} else if (route->route_def.dst_type == LINK_INTERFACE) {
if (route->dst_dev == NULL){
- V3_Net_Print(2, "VNET/P Core: No active device to sent data to\n");
+ Vnet_Print(2, "VNET/P Core: No active device to sent data to\n");
continue;
}
if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
- V3_Net_Print(2, "VNET/P Core: Packet not sent properly\n");
+ Vnet_Print(2, "VNET/P Core: Packet not sent properly\n");
continue;
}
vnet_state.stats.tx_bytes += pkt->size;
vnet_state.stats.tx_pkts ++;
} else {
- PrintError("VNET/P Core: Wrong dst type\n");
+ Vnet_Print(0, "VNET/P Core: Wrong dst type\n");
}
}
struct vnet_queue * q = &(vnet_state.pkt_q);
uint16_t num_pages;
- flags = v3_lock_irqsave(q->lock);
+ flags = vnet_lock_irqsave(q->lock);
if (q->count >= VNET_QUEUE_SIZE){
- V3_Net_Print(1, "VNET Queue overflow!\n");
- v3_unlock_irqrestore(q->lock, flags);
+ Vnet_Print(1, "VNET Queue overflow!\n");
+ vnet_unlock_irqrestore(q->lock, flags);
return -1;
}
entry = &(q->buf[q->tail++]);
q->tail %= VNET_QUEUE_SIZE;
- v3_unlock_irqrestore(q->lock, flags);
+ vnet_unlock_irqrestore(q->lock, flags);
/* this is ugly, but should happen very unlikely */
while(entry->use);
if(entry->size_alloc < pkt->size){
if(entry->data != NULL){
- V3_FreePages(V3_PAddr(entry->data), (entry->size_alloc / PAGE_SIZE));
+ Vnet_FreePages(Vnet_PAddr(entry->data), (entry->size_alloc / PAGE_SIZE));
entry->data = NULL;
}
num_pages = 1 + (pkt->size / PAGE_SIZE);
- entry->data = V3_VAddr(V3_AllocPages(num_pages));
+ entry->data = Vnet_VAddr(Vnet_AllocPages(num_pages));
if(entry->data == NULL){
return -1;
}
vnet_tx_one_pkt(pkt, NULL);
}else {
vnet_pkt_enqueue(pkt);
- V3_Net_Print(2, "VNET/P Core: Put pkt into Queue: pkt size %d\n", pkt->size);
+ Vnet_Print(2, "VNET/P Core: Put pkt into Queue: pkt size %d\n", pkt->size);
}
return 0;
struct vnet_dev * new_dev = NULL;
unsigned long flags;
- new_dev = (struct vnet_dev *)V3_Malloc(sizeof(struct vnet_dev));
+ new_dev = (struct vnet_dev *)Vnet_Malloc(sizeof(struct vnet_dev));
if (new_dev == NULL) {
- PrintError("Malloc fails\n");
+ Vnet_Print(0, "Malloc fails\n");
return -1;
}
new_dev->vm = vm;
new_dev->dev_id = 0;
- flags = v3_lock_irqsave(vnet_state.lock);
+ flags = vnet_lock_irqsave(vnet_state.lock);
if (dev_by_mac(mac) == NULL) {
list_add(&(new_dev->node), &(vnet_state.devs));
new_dev->dev_id = ++vnet_state.num_devs;
}
- v3_unlock_irqrestore(vnet_state.lock, flags);
+ vnet_unlock_irqrestore(vnet_state.lock, flags);
/* if the device was found previosly the id should still be 0 */
if (new_dev->dev_id == 0) {
- PrintError("VNET/P Core: Device Already exists\n");
+ Vnet_Print(0, "VNET/P Core: Device Already exists\n");
return -1;
}
- PrintDebug("VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
+ Vnet_Debug("VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
return new_dev->dev_id;
}
struct vnet_dev * dev = NULL;
unsigned long flags;
- flags = v3_lock_irqsave(vnet_state.lock);
+ flags = vnet_lock_irqsave(vnet_state.lock);
dev = dev_by_id(dev_id);
if (dev != NULL){
del_routes_by_dev(dev_id);
}
- v3_unlock_irqrestore(vnet_state.lock, flags);
+ vnet_unlock_irqrestore(vnet_state.lock, flags);
- V3_Free(dev);
+ Vnet_Free(dev);
- PrintDebug("VNET/P Core: Remove Device: dev_id %d\n", dev_id);
+ Vnet_Debug("VNET/P Core: Remove Device: dev_id %d\n", dev_id);
return 0;
}
list_for_each_entry(dev, &(vnet_state.devs), node) {
list_del(&(dev->node));
- V3_Free(dev);
+ Vnet_Free(dev);
}
}
list_for_each_entry(route, &(vnet_state.routes), node) {
list_del(&(route->node));
list_del(&(route->match_node));
- V3_Free(route);
+ Vnet_Free(route);
}
}
int bridge_free = 0;
struct vnet_brg_dev * tmp_bridge = NULL;
- flags = v3_lock_irqsave(vnet_state.lock);
+ flags = vnet_lock_irqsave(vnet_state.lock);
if (vnet_state.bridge == NULL) {
bridge_free = 1;
vnet_state.bridge = (void *)1;
}
- v3_unlock_irqrestore(vnet_state.lock, flags);
+ vnet_unlock_irqrestore(vnet_state.lock, flags);
if (bridge_free == 0) {
PrintError("VNET/P Core: Bridge already set\n");
return -1;
}
- tmp_bridge = (struct vnet_brg_dev *)V3_Malloc(sizeof(struct vnet_brg_dev));
+ tmp_bridge = (struct vnet_brg_dev *)Vnet_Malloc(sizeof(struct vnet_brg_dev));
if (tmp_bridge == NULL) {
PrintError("Malloc Fails\n");
tmp_bridge->type = type;
/* make this atomic to avoid possible race conditions */
- flags = v3_lock_irqsave(vnet_state.lock);
+ flags = vnet_lock_irqsave(vnet_state.lock);
vnet_state.bridge = tmp_bridge;
- v3_unlock_irqrestore(vnet_state.lock, flags);
+ vnet_unlock_irqrestore(vnet_state.lock, flags);
return 0;
}
struct queue_entry * entry;
struct vnet_queue * q = &(vnet_state.pkt_q);
- V3_Print("VNET/P Handing Pkt Thread Starting ....\n");
+ Vnet_Print(0, "VNET/P Handing Pkt Thread Starting ....\n");
- //V3_THREAD_SLEEP();
/* we need thread sleep/wakeup in Palacios */
- while(1){
- flags = v3_lock_irqsave(q->lock);
+ while(!vnet_thread_should_stop()){
+ flags = vnet_lock_irqsave(q->lock);
if (q->count <= 0){
- v3_unlock_irqrestore(q->lock, flags);
- v3_yield(NULL);
- //V3_THREAD_SLEEP();
+ vnet_unlock_irqrestore(q->lock, flags);
+ Vnet_Yield();
}else {
q->count --;
entry = &(q->buf[q->head++]);
q->head %= VNET_QUEUE_SIZE;
- v3_unlock_irqrestore(q->lock, flags);
+ vnet_unlock_irqrestore(q->lock, flags);
/* this is ugly, but should happen very unlikely */
while(!entry->use);
/* asynchronizely release allocated memory for buffer entry here */
entry->use = 0;
- V3_Net_Print(2, "vnet_tx_flush: pkt (size %d)\n", entry->pkt.size);
+ Vnet_Print(2, "vnet_tx_flush: pkt (size %d)\n", entry->pkt.size);
}
}
+
+ return 0;
}
int v3_init_vnet() {
vnet_state.num_devs = 0;
vnet_state.num_routes = 0;
- if (v3_lock_init(&(vnet_state.lock)) == -1){
+ if (vnet_lock_init(&(vnet_state.lock)) == -1){
PrintError("VNET/P Core: Fails to initiate lock\n");
}
- vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
+ vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
if (vnet_state.route_cache == NULL) {
PrintError("VNET/P Core: Fails to initiate route cache\n");
return -1;
}
- v3_lock_init(&(vnet_state.pkt_q.lock));
+ vnet_lock_init(&(vnet_state.pkt_q.lock));
- vnet_state.pkt_flush_thread = vnet_thread_create(vnet_tx_flush, NULL, "VNET_Pkts");
+ vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "VNET_Pkts");
- PrintDebug("VNET/P Core is initiated\n");
+ Vnet_Debug("VNET/P Core is initiated\n");
return 0;
}
void v3_deinit_vnet(){
- v3_lock_deinit(&(vnet_state.lock));
+ vnet_lock_deinit(&(vnet_state.lock));
free_devices();
free_routes();
- v3_free_htable(vnet_state.route_cache, 1, 1);
- V3_Free(vnet_state.bridge);
+ vnet_free_htable(vnet_state.route_cache, 1, 1);
+ Vnet_Free(vnet_state.bridge);
}
--- /dev/null
+/*
+ Copyright (c) 2002, 2004, Christopher Clark
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the original author; nor the names of any contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/* Modifications made by Lei Xia <lxia@northwestern.edu> */
+
+#include <vnet/vnet_host.h>
+#include <vnet/vnet_vmm.h>
+#include <vnet/vnet_hashtable.h>
+
+struct hash_entry {
+ addr_t key;
+ addr_t value;
+ uint_t hash;
+ struct hash_entry * next;
+};
+
+struct hashtable {
+ uint_t table_length;
+ struct hash_entry ** table;
+ uint_t entry_count;
+ uint_t load_limit;
+ uint_t prime_index;
+ uint_t (*hash_fn) (addr_t key);
+ int (*eq_fn) (addr_t key1, addr_t key2);
+};
+
+
+
+/* HASH FUNCTIONS */
+static inline uint_t do_hash(struct hashtable * htable, addr_t key) {
+ /* Aim to protect against poor hash functions by adding logic here
+ * - logic taken from java 1.4 hashtable source */
+ uint_t i = htable->hash_fn(key);
+ i += ~(i << 9);
+ i ^= ((i >> 14) | (i << 18)); /* >>> */
+ i += (i << 4);
+ i ^= ((i >> 10) | (i << 22)); /* >>> */
+
+ return i;
+}
+
+
+/* HASH AN UNSIGNED LONG */
+/* LINUX UNSIGHED LONG HASH FUNCTION */
+#ifdef __V3_32BIT__
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME 0x9e370001UL
+#define BITS_PER_LONG 32
+#elif defined(__V3_64BIT__)
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
+#define BITS_PER_LONG 64
+#else
+#error Define GOLDEN_RATIO_PRIME for your wordsize.
+#endif
+
+ulong_t v3_hash_long(ulong_t val, uint_t bits) {
+ ulong_t hash = val;
+
+#ifdef __V3_64BIT__
+ /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ ulong_t n = hash;
+ n <<= 18;
+ hash -= n;
+ n <<= 33;
+ hash -= n;
+ n <<= 3;
+ hash += n;
+ n <<= 3;
+ hash -= n;
+ n <<= 4;
+ hash += n;
+ n <<= 2;
+ hash += n;
+#else
+ /* On some cpus multiply is faster, on others gcc will do shifts */
+ hash *= GOLDEN_RATIO_PRIME;
+#endif
+
+ /* High bits are more random, so use them. */
+ return hash >> (BITS_PER_LONG - bits);
+}
+
+/* HASH GENERIC MEMORY BUFFER */
+/* ELF HEADER HASH FUNCTION */
+ulong_t v3_hash_buffer(uchar_t * msg, uint_t length) {
+ ulong_t hash = 0;
+ ulong_t temp = 0;
+ uint_t i;
+
+ for (i = 0; i < length; i++) {
+ hash = (hash << 4) + *(msg + i) + i;
+ if ((temp = (hash & 0xF0000000))) {
+ hash ^= (temp >> 24);
+ }
+ hash &= ~temp;
+ }
+ return hash;
+}
+
+/* indexFor */
+static inline uint_t indexFor(uint_t table_length, uint_t hash_value) {
+ return (hash_value % table_length);
+};
+
+#define freekey(X) Vnet_Free(X)
+
+
+static void * tmp_realloc(void * old_ptr, uint_t old_size, uint_t new_size) {
+ void * new_buf = Vnet_Malloc(new_size);
+
+ if (new_buf == NULL) {
+ return NULL;
+ }
+
+ memcpy(new_buf, old_ptr, old_size);
+ Vnet_Free(old_ptr);
+
+ return new_buf;
+}
+
+
+/*
+ Credit for primes table: Aaron Krowne
+ http://br.endernet.org/~akrowne/
+ http://planetmath.org/encyclopedia/GoodHashTablePrimes.html
+*/
+static const uint_t primes[] = {
+ 53, 97, 193, 389,
+ 769, 1543, 3079, 6151,
+ 12289, 24593, 49157, 98317,
+ 196613, 393241, 786433, 1572869,
+ 3145739, 6291469, 12582917, 25165843,
+ 50331653, 100663319, 201326611, 402653189,
+ 805306457, 1610612741 };
+
+
+// this assumes that the max load factor is .65
+static const uint_t load_factors[] = {
+ 35, 64, 126, 253,
+ 500, 1003, 2002, 3999,
+ 7988, 15986, 31953, 63907,
+ 127799, 255607, 511182, 1022365,
+ 2044731, 4089455, 8178897, 16357798,
+ 32715575, 65431158, 130862298, 261724573,
+ 523449198, 1046898282 };
+
+const uint_t prime_table_length = sizeof(primes) / sizeof(primes[0]);
+
+struct hashtable * vnet_create_htable(uint_t min_size,
+ uint_t (*hash_fn) (addr_t),
+ int (*eq_fn) (addr_t, addr_t)) {
+ struct hashtable * htable;
+ uint_t prime_index;
+ uint_t size = primes[0];
+
+ /* Check requested hashtable isn't too large */
+ if (min_size > (1u << 30)) {
+ return NULL;
+ }
+
+ /* Enforce size as prime */
+ for (prime_index = 0; prime_index < prime_table_length; prime_index++) {
+ if (primes[prime_index] > min_size) {
+ size = primes[prime_index];
+ break;
+ }
+ }
+
+ htable = (struct hashtable *)Vnet_Malloc(sizeof(struct hashtable));
+
+ if (htable == NULL) {
+ return NULL; /*oom*/
+ }
+
+ htable->table = (struct hash_entry **)Vnet_Malloc(sizeof(struct hash_entry*) * size);
+
+ if (htable->table == NULL) {
+ V3_Free(htable);
+ return NULL; /*oom*/
+ }
+
+ memset(htable->table, 0, size * sizeof(struct hash_entry *));
+
+ htable->table_length = size;
+ htable->prime_index = prime_index;
+ htable->entry_count = 0;
+ htable->hash_fn = hash_fn;
+ htable->eq_fn = eq_fn;
+ htable->load_limit = load_factors[prime_index];
+
+ return htable;
+}
+
+
+
+static int hashtable_expand(struct hashtable * htable) {
+ /* Double the size of the table to accomodate more entries */
+ struct hash_entry ** new_table;
+ struct hash_entry * tmp_entry;
+ struct hash_entry ** entry_ptr;
+ uint_t new_size;
+ uint_t i;
+ uint_t index;
+
+ /* Check we're not hitting max capacity */
+ if (htable->prime_index == (prime_table_length - 1)) {
+ return 0;
+ }
+
+ new_size = primes[++(htable->prime_index)];
+
+ new_table = (struct hash_entry **)Vnet_Malloc(sizeof(struct hash_entry*) * new_size);
+
+ if (new_table != NULL) {
+ memset(new_table, 0, new_size * sizeof(struct hash_entry *));
+ /* This algorithm is not 'stable'. ie. it reverses the list
+ * when it transfers entries between the tables */
+
+ for (i = 0; i < htable->table_length; i++) {
+
+ while ((tmp_entry = htable->table[i]) != NULL) {
+ htable->table[i] = tmp_entry->next;
+
+ index = indexFor(new_size, tmp_entry->hash);
+
+ tmp_entry->next = new_table[index];
+
+ new_table[index] = tmp_entry;
+ }
+ }
+
+ Vnet_Free(htable->table);
+
+ htable->table = new_table;
+ } else {
+ /* Plan B: realloc instead */
+
+ //new_table = (struct hash_entry **)realloc(htable->table, new_size * sizeof(struct hash_entry *));
+ new_table = (struct hash_entry **)tmp_realloc(htable->table, primes[htable->prime_index - 1],
+ new_size * sizeof(struct hash_entry *));
+
+ if (new_table == NULL) {
+ (htable->prime_index)--;
+ return 0;
+ }
+
+ htable->table = new_table;
+
+ memset(new_table[htable->table_length], 0, new_size - htable->table_length);
+
+ for (i = 0; i < htable->table_length; i++) {
+
+ for (entry_ptr = &(new_table[i]), tmp_entry = *entry_ptr;
+ tmp_entry != NULL;
+ tmp_entry = *entry_ptr) {
+
+ index = indexFor(new_size, tmp_entry->hash);
+
+ if (i == index) {
+ entry_ptr = &(tmp_entry->next);
+ } else {
+ *entry_ptr = tmp_entry->next;
+ tmp_entry->next = new_table[index];
+ new_table[index] = tmp_entry;
+ }
+ }
+ }
+ }
+
+ htable->table_length = new_size;
+
+ htable->load_limit = load_factors[htable->prime_index];
+
+ return -1;
+}
+
+uint_t vnet_htable_count(struct hashtable * htable) {
+ return htable->entry_count;
+}
+
+int vnet_htable_insert(struct hashtable * htable, addr_t key, addr_t value) {
+ /* This method allows duplicate keys - but they shouldn't be used */
+ uint_t index;
+ struct hash_entry * new_entry;
+
+ if (++(htable->entry_count) > htable->load_limit) {
+ /* Ignore the return value. If expand fails, we should
+ * still try cramming just this value into the existing table
+ * -- we may not have memory for a larger table, but one more
+ * element may be ok. Next time we insert, we'll try expanding again.*/
+ hashtable_expand(htable);
+ }
+
+
+ new_entry = (struct hash_entry *)Vnet_Malloc(sizeof(struct hash_entry));
+
+ if (new_entry == NULL) {
+ (htable->entry_count)--;
+ return 0; /*oom*/
+ }
+
+ new_entry->hash = do_hash(htable, key);
+
+ index = indexFor(htable->table_length, new_entry->hash);
+
+ new_entry->key = key;
+ new_entry->value = value;
+
+ new_entry->next = htable->table[index];
+
+ htable->table[index] = new_entry;
+
+ return -1;
+}
+
+
+/* returns value associated with key */
+addr_t vnet_htable_search(struct hashtable * htable, addr_t key) {
+ struct hash_entry * cursor;
+ uint_t hash_value;
+ uint_t index;
+
+ hash_value = do_hash(htable, key);
+
+ index = indexFor(htable->table_length, hash_value);
+
+ cursor = htable->table[index];
+
+ while (cursor != NULL) {
+ /* Check hash value to short circuit heavier comparison */
+ if ((hash_value == cursor->hash) &&
+ (htable->eq_fn(key, cursor->key))) {
+ return cursor->value;
+ }
+
+ cursor = cursor->next;
+ }
+
+ return (addr_t)NULL;
+}
+
+/* returns value associated with key */
+addr_t vnet_htable_remove(struct hashtable * htable, addr_t key, int free_key) {
+ /* TODO: consider compacting the table when the load factor drops enough,
+ * or provide a 'compact' method. */
+
+ struct hash_entry * cursor;
+ struct hash_entry ** entry_ptr;
+ addr_t value;
+ uint_t hash_value;
+ uint_t index;
+
+ hash_value = do_hash(htable, key);
+
+ index = indexFor(htable->table_length, hash_value);
+
+ entry_ptr = &(htable->table[index]);
+ cursor = *entry_ptr;
+
+ while (cursor != NULL) {
+ /* Check hash value to short circuit heavier comparison */
+ if ((hash_value == cursor->hash) &&
+ (htable->eq_fn(key, cursor->key))) {
+
+ *entry_ptr = cursor->next;
+ htable->entry_count--;
+ value = cursor->value;
+
+ if (free_key) {
+ freekey((void *)(cursor->key));
+ }
+ Vnet_Free(cursor);
+
+ return value;
+ }
+
+ entry_ptr = &(cursor->next);
+ cursor = cursor->next;
+ }
+ return (addr_t)NULL;
+}
+
+/* destroy */
+void vnet_free_htable(struct hashtable * htable, int free_values, int free_keys) {
+ uint_t i;
+ struct hash_entry * cursor;;
+ struct hash_entry **table = htable->table;
+
+ if (free_values) {
+ for (i = 0; i < htable->table_length; i++) {
+ cursor = table[i];
+
+ while (cursor != NULL) {
+ struct hash_entry * tmp;
+
+ tmp = cursor;
+ cursor = cursor->next;
+
+ if (free_keys) {
+ freekey((void *)(tmp->key));
+ }
+ Vnet_Free((void *)(tmp->value));
+ Vnet_Free(tmp);
+ }
+ }
+ } else {
+ for (i = 0; i < htable->table_length; i++) {
+ cursor = table[i];
+
+ while (cursor != NULL) {
+ struct hash_entry * tmp;
+
+ tmp = cursor;
+ cursor = cursor->next;
+
+ if (free_keys) {
+ freekey((void *)(tmp->key));
+ }
+ Vnet_Free(tmp);
+ }
+ }
+ }
+
+ Vnet_Free(htable->table);
+ Vnet_Free(htable);
+}
+
+
+/*
+ * Copyright (c) 2002, Christopher Clark
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of the original author; nor the names of any contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
--- /dev/null
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2011, Lei Xia <lxia@northwestern.edu>
+ * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Lei Xia <lxia@northwestern.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#include <vnet/vnet_host.h>
+#include <vnet/vnet.h>
+
+struct vnet_host_hooks * host_hooks;
+
+
+int vnet_lock_init(vnet_lock_t * lock) {
+ *lock = (addr_t)(host_hooks->mutex_alloc());
+
+ if (!(*lock)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+
+struct vnet_thread * vnet_start_thread(int (*func)(void *), void *arg, char * name){
+ struct vnet_thread * thread;
+
+ thread = Vnet_Malloc(sizeof(struct vnet_thread));
+
+ thread->host_thread = host_hooks->thread_start(func, arg, name);
+
+ if(thread->host_thread == NULL){
+ Vnet_Free(thread);
+ return NULL;
+ }
+
+ return thread;
+}
+
+
+
+struct vnet_timer * vnet_create_timer(unsigned long interval,
+ void (* timer_fun)(void * priv_data), void * priv_data){
+ struct vnet_timer * timer = NULL;
+
+ if((host_hooks) && host_hooks->timer_create){
+ timer = Vnet_Malloc(sizeof(struct vnet_timer));
+
+ timer->host_timer = host_hooks->timer_create(interval, timer_fun, priv_data);
+ }
+
+ return timer;
+ }
+
+
+void init_vnet(struct vnet_host_hooks * hooks){
+ host_hooks = hooks;
+ v3_init_vnet();
+}
+
+