1 /* Copyright (C) 2004 Christopher Clark <firstname.lastname@cl.cam.ac.uk> */
2 /* Modifications made by Jack Lange <jarusl@cs.northwestern.edu> */
3 /* (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> */
4 /* (c) 2008, The V3VEE Project <http://www.v3vee.org> */
10 #include <palacios/vmm.h>
11 #include <palacios/vmm_hashtable.h>
12 #include <palacios/vmm_string.h>
22 struct hash_entry * next;
27 struct hash_entry ** table;
31 uint_t (*hash_fn) (addr_t key);
32 int (*eq_fn) (addr_t key1, addr_t key2);
41 uint_t do_hash(struct hashtable * htable, addr_t key) {
42 /* Aim to protect against poor hash functions by adding logic here
43 * - logic taken from java 1.4 hashtable source */
44 uint_t i = htable->hash_fn(key);
46 i ^= ((i >> 14) | (i << 18)); /* >>> */
48 i ^= ((i >> 10) | (i << 22)); /* >>> */
54 /* HASH AN UNSIGNED LONG */
55 /* LINUX UNSIGHED LONG HASH FUNCTION */
57 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
58 #define GOLDEN_RATIO_PRIME 0x9e370001UL
59 #define BITS_PER_LONG 32
60 #elif defined(__V3_64BIT__)
61 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
62 #define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
63 #define BITS_PER_LONG 64
65 #error Define GOLDEN_RATIO_PRIME for your wordsize.
68 ulong_t hash_long(ulong_t val, uint_t bits) {
72 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
87 /* On some cpus multiply is faster, on others gcc will do shifts */
88 hash *= GOLDEN_RATIO_PRIME;
91 /* High bits are more random, so use them. */
92 return hash >> (BITS_PER_LONG - bits);
95 /* HASH GENERIC MEMORY BUFFER */
96 /* ELF HEADER HASH FUNCTION */
97 ulong_t hash_buffer(uchar_t * msg, uint_t length) {
102 for (i = 0; i < length; i++) {
103 hash = (hash << 4) + *(msg + i) + i;
104 if ((temp = (hash & 0xF0000000))) {
105 hash ^= (temp >> 24);
114 /*****************************************************************************/
116 static inline uint_t indexFor(uint_t table_length, uint_t hash_value) {
117 return (hash_value % table_length);
120 /* Only works if table_length == 2^N */
122 static inline uint_t indexFor(uint_t table_length, uint_t hashvalue)
124 return (hashvalue & (table_length - 1u));
128 /*****************************************************************************/
129 #define freekey(X) V3_Free(X)
130 /*define freekey(X) ; */
133 static void * tmp_realloc(void * old_ptr, uint_t old_size, uint_t new_size) {
134 void * new_buf = V3_Malloc(new_size);
136 if (new_buf == NULL) {
140 memcpy(new_buf, old_ptr, old_size);
148 Credit for primes table: Aaron Krowne
149 http://br.endernet.org/~akrowne/
150 http://planetmath.org/encyclopedia/GoodHashTablePrimes.html
152 static const uint_t primes[] = {
154 769, 1543, 3079, 6151,
155 12289, 24593, 49157, 98317,
156 196613, 393241, 786433, 1572869,
157 3145739, 6291469, 12582917, 25165843,
158 50331653, 100663319, 201326611, 402653189,
159 805306457, 1610612741 };
162 const uint_t prime_table_length = sizeof(primes) / sizeof(primes[0]);
164 const float max_load_factor = 0.65;
166 /*****************************************************************************/
167 struct hashtable * create_hashtable(uint_t min_size,
168 uint_t (*hash_fn) (addr_t),
169 int (*eq_fn) (addr_t, addr_t)) {
170 struct hashtable * htable;
172 uint_t size = primes[0];
174 /* Check requested hashtable isn't too large */
175 if (min_size > (1u << 30)) {
179 /* Enforce size as prime */
180 for (prime_index = 0; prime_index < prime_table_length; prime_index++) {
181 if (primes[prime_index] > min_size) {
182 size = primes[prime_index];
187 htable = (struct hashtable *)V3_Malloc(sizeof(struct hashtable));
189 if (htable == NULL) {
193 htable->table = (struct hash_entry **)V3_Malloc(sizeof(struct hash_entry*) * size);
195 if (htable->table == NULL) {
201 memset(htable->table, 0, size * sizeof(struct hash_entry *));
203 htable->table_length = size;
204 htable->prime_index = prime_index;
205 htable->entry_count = 0;
206 htable->hash_fn = hash_fn;
207 htable->eq_fn = eq_fn;
208 htable->load_limit = (uint_t) ceil((double)(size * max_load_factor));
215 /*****************************************************************************/
216 static int hashtable_expand(struct hashtable * htable) {
217 /* Double the size of the table to accomodate more entries */
218 struct hash_entry ** new_table;
219 struct hash_entry * tmp_entry;
220 struct hash_entry ** entry_ptr;
225 /* Check we're not hitting max capacity */
226 if (htable->prime_index == (prime_table_length - 1)) {
230 new_size = primes[++(htable->prime_index)];
232 new_table = (struct hash_entry **)V3_Malloc(sizeof(struct hash_entry*) * new_size);
234 if (new_table != NULL) {
235 memset(new_table, 0, new_size * sizeof(struct hash_entry *));
236 /* This algorithm is not 'stable'. ie. it reverses the list
237 * when it transfers entries between the tables */
239 for (i = 0; i < htable->table_length; i++) {
241 while ((tmp_entry = htable->table[i]) != NULL) {
242 htable->table[i] = tmp_entry->next;
244 index = indexFor(new_size, tmp_entry->hash);
246 tmp_entry->next = new_table[index];
248 new_table[index] = tmp_entry;
252 V3_Free(htable->table);
254 htable->table = new_table;
256 /* Plan B: realloc instead */
258 //new_table = (struct hash_entry **)realloc(htable->table, new_size * sizeof(struct hash_entry *));
259 new_table = (struct hash_entry **)tmp_realloc(htable->table, primes[htable->prime_index - 1],
260 new_size * sizeof(struct hash_entry *));
262 if (new_table == NULL) {
263 (htable->prime_index)--;
267 htable->table = new_table;
269 memset(new_table[htable->table_length], 0, new_size - htable->table_length);
271 for (i = 0; i < htable->table_length; i++) {
273 for (entry_ptr = &(new_table[i]), tmp_entry = *entry_ptr;
275 tmp_entry = *entry_ptr) {
277 index = indexFor(new_size, tmp_entry->hash);
280 entry_ptr = &(tmp_entry->next);
282 *entry_ptr = tmp_entry->next;
283 tmp_entry->next = new_table[index];
284 new_table[index] = tmp_entry;
290 htable->table_length = new_size;
292 htable->load_limit = (uint_t) ceil(new_size * max_load_factor);
297 /*****************************************************************************/
298 uint_t hashtable_count(struct hashtable * htable) {
299 return htable->entry_count;
302 /*****************************************************************************/
303 int hashtable_insert(struct hashtable * htable, addr_t key, addr_t value) {
304 /* This method allows duplicate keys - but they shouldn't be used */
306 struct hash_entry * new_entry;
308 if (++(htable->entry_count) > htable->load_limit) {
309 /* Ignore the return value. If expand fails, we should
310 * still try cramming just this value into the existing table
311 * -- we may not have memory for a larger table, but one more
312 * element may be ok. Next time we insert, we'll try expanding again.*/
313 hashtable_expand(htable);
317 new_entry = (struct hash_entry *)V3_Malloc(sizeof(struct hash_entry));
319 if (new_entry == NULL) {
320 (htable->entry_count)--;
324 new_entry->hash = do_hash(htable, key);
326 index = indexFor(htable->table_length, new_entry->hash);
328 new_entry->key = key;
329 new_entry->value = value;
331 new_entry->next = htable->table[index];
333 htable->table[index] = new_entry;
340 int hashtable_change(struct hashtable * htable, addr_t key, addr_t value, int free_value) {
341 struct hash_entry * tmp_entry;
345 hash_value = do_hash(htable, key);
347 index = indexFor(htable->table_length, hash_value);
349 tmp_entry = htable->table[index];
351 while (tmp_entry != NULL) {
352 /* Check hash value to short circuit heavier comparison */
353 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
356 V3_Free((void *)(tmp_entry->value));
359 tmp_entry->value = value;
362 tmp_entry = tmp_entry->next;
370 /*****************************************************************************/
371 /* returns value associated with key */
372 addr_t hashtable_search(struct hashtable * htable, addr_t key) {
373 struct hash_entry * cursor;
377 hash_value = do_hash(htable, key);
379 index = indexFor(htable->table_length, hash_value);
381 cursor = htable->table[index];
383 while (cursor != NULL) {
384 /* Check hash value to short circuit heavier comparison */
385 if ((hash_value == cursor->hash) &&
386 (htable->eq_fn(key, cursor->key))) {
387 return cursor->value;
390 cursor = cursor->next;
396 /*****************************************************************************/
397 /* returns value associated with key */
398 addr_t hashtable_remove(struct hashtable * htable, addr_t key, int free_key) {
399 /* TODO: consider compacting the table when the load factor drops enough,
400 * or provide a 'compact' method. */
402 struct hash_entry * cursor;
403 struct hash_entry ** entry_ptr;
408 hash_value = do_hash(htable, key);
410 index = indexFor(htable->table_length, hash_value);
412 entry_ptr = &(htable->table[index]);
415 while (cursor != NULL) {
416 /* Check hash value to short circuit heavier comparison */
417 if ((hash_value == cursor->hash) &&
418 (htable->eq_fn(key, cursor->key))) {
420 *entry_ptr = cursor->next;
421 htable->entry_count--;
422 value = cursor->value;
425 freekey((void *)(cursor->key));
432 entry_ptr = &(cursor->next);
433 cursor = cursor->next;
438 /*****************************************************************************/
440 void hashtable_destroy(struct hashtable * htable, int free_values, int free_keys) {
442 struct hash_entry * cursor;;
443 struct hash_entry **table = htable->table;
446 for (i = 0; i < htable->table_length; i++) {
449 while (cursor != NULL) {
450 struct hash_entry * tmp;
453 cursor = cursor->next;
456 freekey((void *)(tmp->key));
458 V3_Free((void *)(tmp->value));
463 for (i = 0; i < htable->table_length; i++) {
466 while (cursor != NULL) {
467 struct hash_entry * tmp;
470 cursor = cursor->next;
473 freekey((void *)(tmp->key));
480 V3_Free(htable->table);
487 /* HASH TABLE ITERATORS */
491 struct hashtable_iter * create_hashtable_iterator(struct hashtable * htable) {
495 struct hashtable_iter * iter = (struct hashtable_iter *)V3_Malloc(sizeof(struct hashtable_iter));
501 iter->htable = htable;
504 table_length = htable->table_length;
505 iter->index = table_length;
507 if (htable->entry_count == 0) {
511 for (i = 0; i < table_length; i++) {
512 if (htable->table[i] != NULL) {
513 iter->entry = htable->table[i];
523 addr_t hashtable_get_iter_key(struct hashtable_iter * iter) {
524 return iter->entry->key;
527 addr_t hashtable_get_iter_value(struct hashtable_iter * iter) {
528 return iter->entry->value;
532 /* advance - advance the iterator to the next element
533 * returns zero if advanced to end of table */
534 int hashtable_iterator_advance(struct hashtable_iter * iter) {
537 struct hash_entry ** table;
538 struct hash_entry * next;
540 if (iter->entry == NULL) {
541 return 0; /* stupidity check */
545 next = iter->entry->next;
548 iter->parent = iter->entry;
553 table_length = iter->htable->table_length;
556 if (table_length <= (j = ++(iter->index))) {
561 table = iter->htable->table;
563 while ((next = table[j]) == NULL) {
564 if (++j >= table_length) {
565 iter->index = table_length;
578 /* remove - remove the entry at the current iterator position
579 * and advance the iterator, if there is a successive
581 * If you want the value, read it before you remove:
582 * beware memory leaks if you don't.
583 * Returns zero if end of iteration. */
584 int hashtable_iterator_remove(struct hashtable_iter * iter, int free_key) {
585 struct hash_entry * remember_entry;
586 struct hash_entry * remember_parent;
590 if ((iter->parent) == NULL) {
591 /* element is head of a chain */
592 iter->htable->table[iter->index] = iter->entry->next;
594 /* element is mid-chain */
595 iter->parent->next = iter->entry->next;
599 /* itr->e is now outside the hashtable */
600 remember_entry = iter->entry;
601 iter->htable->entry_count--;
603 freekey((void *)(remember_entry->key));
606 /* Advance the iterator, correcting the parent */
607 remember_parent = iter->parent;
608 ret = hashtable_iterator_advance(iter);
610 if (iter->parent == remember_entry) {
611 iter->parent = remember_parent;
614 V3_Free(remember_entry);
619 /* returns zero if not found */
620 int hashtable_iterator_search(struct hashtable_iter * iter,
621 struct hashtable * htable, addr_t key) {
622 struct hash_entry * entry;
623 struct hash_entry * parent;
627 hash_value = do_hash(htable, key);
628 index = indexFor(htable->table_length, hash_value);
630 entry = htable->table[index];
633 while (entry != NULL) {
634 /* Check hash value to short circuit heavier comparison */
635 if ((hash_value == entry->hash) &&
636 (htable->eq_fn(key, entry->key))) {
639 iter->parent = parent;
640 iter->htable = htable;
664 * Copyright (c) 2002, Christopher Clark
665 * All rights reserved.
667 * Redistribution and use in source and binary forms, with or without
668 * modification, are permitted provided that the following conditions
671 * * Redistributions of source code must retain the above copyright
672 * notice, this list of conditions and the following disclaimer.
674 * * Redistributions in binary form must reproduce the above copyright
675 * notice, this list of conditions and the following disclaimer in the
676 * documentation and/or other materials provided with the distribution.
678 * * Neither the name of the original author; nor the names of any contributors
679 * may be used to endorse or promote products derived from this software
680 * without specific prior written permission.
683 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
684 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
685 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
686 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
687 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
688 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
689 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
690 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
691 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
692 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
693 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.