2 Copyright (c) 2002, 2004, Christopher Clark
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions
9 * Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 * Neither the name of the original author; nor the names of any contributors
17 may be used to endorse or promote products derived from this software
18 without specific prior written permission.
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 /* Modifications made by Jack Lange <jarusl@cs.northwestern.edu> */
38 #include <palacios/vmm.h>
39 #include <palacios/vmm_hashtable.h>
40 #include <palacios/vmm_string.h>
50 struct hash_entry * next;
55 struct hash_entry ** table;
59 uint_t (*hash_fn) (addr_t key);
60 int (*eq_fn) (addr_t key1, addr_t key2);
69 uint_t do_hash(struct hashtable * htable, addr_t key) {
70 /* Aim to protect against poor hash functions by adding logic here
71 * - logic taken from java 1.4 hashtable source */
72 uint_t i = htable->hash_fn(key);
74 i ^= ((i >> 14) | (i << 18)); /* >>> */
76 i ^= ((i >> 10) | (i << 22)); /* >>> */
82 /* HASH AN UNSIGNED LONG */
83 /* LINUX UNSIGHED LONG HASH FUNCTION */
85 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
86 #define GOLDEN_RATIO_PRIME 0x9e370001UL
87 #define BITS_PER_LONG 32
88 #elif defined(__V3_64BIT__)
89 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
90 #define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
91 #define BITS_PER_LONG 64
93 #error Define GOLDEN_RATIO_PRIME for your wordsize.
96 ulong_t hash_long(ulong_t val, uint_t bits) {
100 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
115 /* On some cpus multiply is faster, on others gcc will do shifts */
116 hash *= GOLDEN_RATIO_PRIME;
119 /* High bits are more random, so use them. */
120 return hash >> (BITS_PER_LONG - bits);
123 /* HASH GENERIC MEMORY BUFFER */
124 /* ELF HEADER HASH FUNCTION */
125 ulong_t hash_buffer(uchar_t * msg, uint_t length) {
130 for (i = 0; i < length; i++) {
131 hash = (hash << 4) + *(msg + i) + i;
132 if ((temp = (hash & 0xF0000000))) {
133 hash ^= (temp >> 24);
142 /*****************************************************************************/
144 static inline uint_t indexFor(uint_t table_length, uint_t hash_value) {
145 return (hash_value % table_length);
148 /* Only works if table_length == 2^N */
150 static inline uint_t indexFor(uint_t table_length, uint_t hashvalue)
152 return (hashvalue & (table_length - 1u));
156 /*****************************************************************************/
157 #define freekey(X) V3_Free(X)
158 /*define freekey(X) ; */
161 static void * tmp_realloc(void * old_ptr, uint_t old_size, uint_t new_size) {
162 void * new_buf = V3_Malloc(new_size);
164 if (new_buf == NULL) {
168 memcpy(new_buf, old_ptr, old_size);
176 Credit for primes table: Aaron Krowne
177 http://br.endernet.org/~akrowne/
178 http://planetmath.org/encyclopedia/GoodHashTablePrimes.html
180 static const uint_t primes[] = {
182 769, 1543, 3079, 6151,
183 12289, 24593, 49157, 98317,
184 196613, 393241, 786433, 1572869,
185 3145739, 6291469, 12582917, 25165843,
186 50331653, 100663319, 201326611, 402653189,
187 805306457, 1610612741 };
190 const uint_t prime_table_length = sizeof(primes) / sizeof(primes[0]);
192 const float max_load_factor = 0.65;
194 /*****************************************************************************/
195 struct hashtable * create_hashtable(uint_t min_size,
196 uint_t (*hash_fn) (addr_t),
197 int (*eq_fn) (addr_t, addr_t)) {
198 struct hashtable * htable;
200 uint_t size = primes[0];
202 /* Check requested hashtable isn't too large */
203 if (min_size > (1u << 30)) {
207 /* Enforce size as prime */
208 for (prime_index = 0; prime_index < prime_table_length; prime_index++) {
209 if (primes[prime_index] > min_size) {
210 size = primes[prime_index];
215 htable = (struct hashtable *)V3_Malloc(sizeof(struct hashtable));
217 if (htable == NULL) {
221 htable->table = (struct hash_entry **)V3_Malloc(sizeof(struct hash_entry*) * size);
223 if (htable->table == NULL) {
229 memset(htable->table, 0, size * sizeof(struct hash_entry *));
231 htable->table_length = size;
232 htable->prime_index = prime_index;
233 htable->entry_count = 0;
234 htable->hash_fn = hash_fn;
235 htable->eq_fn = eq_fn;
236 htable->load_limit = (uint_t) v3_ceil((double)(size * max_load_factor));
243 /*****************************************************************************/
244 static int hashtable_expand(struct hashtable * htable) {
245 /* Double the size of the table to accomodate more entries */
246 struct hash_entry ** new_table;
247 struct hash_entry * tmp_entry;
248 struct hash_entry ** entry_ptr;
253 /* Check we're not hitting max capacity */
254 if (htable->prime_index == (prime_table_length - 1)) {
258 new_size = primes[++(htable->prime_index)];
260 new_table = (struct hash_entry **)V3_Malloc(sizeof(struct hash_entry*) * new_size);
262 if (new_table != NULL) {
263 memset(new_table, 0, new_size * sizeof(struct hash_entry *));
264 /* This algorithm is not 'stable'. ie. it reverses the list
265 * when it transfers entries between the tables */
267 for (i = 0; i < htable->table_length; i++) {
269 while ((tmp_entry = htable->table[i]) != NULL) {
270 htable->table[i] = tmp_entry->next;
272 index = indexFor(new_size, tmp_entry->hash);
274 tmp_entry->next = new_table[index];
276 new_table[index] = tmp_entry;
280 V3_Free(htable->table);
282 htable->table = new_table;
284 /* Plan B: realloc instead */
286 //new_table = (struct hash_entry **)realloc(htable->table, new_size * sizeof(struct hash_entry *));
287 new_table = (struct hash_entry **)tmp_realloc(htable->table, primes[htable->prime_index - 1],
288 new_size * sizeof(struct hash_entry *));
290 if (new_table == NULL) {
291 (htable->prime_index)--;
295 htable->table = new_table;
297 memset(new_table[htable->table_length], 0, new_size - htable->table_length);
299 for (i = 0; i < htable->table_length; i++) {
301 for (entry_ptr = &(new_table[i]), tmp_entry = *entry_ptr;
303 tmp_entry = *entry_ptr) {
305 index = indexFor(new_size, tmp_entry->hash);
308 entry_ptr = &(tmp_entry->next);
310 *entry_ptr = tmp_entry->next;
311 tmp_entry->next = new_table[index];
312 new_table[index] = tmp_entry;
318 htable->table_length = new_size;
320 htable->load_limit = (uint_t) v3_ceil(new_size * max_load_factor);
325 /*****************************************************************************/
326 uint_t hashtable_count(struct hashtable * htable) {
327 return htable->entry_count;
330 /*****************************************************************************/
331 int hashtable_insert(struct hashtable * htable, addr_t key, addr_t value) {
332 /* This method allows duplicate keys - but they shouldn't be used */
334 struct hash_entry * new_entry;
336 if (++(htable->entry_count) > htable->load_limit) {
337 /* Ignore the return value. If expand fails, we should
338 * still try cramming just this value into the existing table
339 * -- we may not have memory for a larger table, but one more
340 * element may be ok. Next time we insert, we'll try expanding again.*/
341 hashtable_expand(htable);
345 new_entry = (struct hash_entry *)V3_Malloc(sizeof(struct hash_entry));
347 if (new_entry == NULL) {
348 (htable->entry_count)--;
352 new_entry->hash = do_hash(htable, key);
354 index = indexFor(htable->table_length, new_entry->hash);
356 new_entry->key = key;
357 new_entry->value = value;
359 new_entry->next = htable->table[index];
361 htable->table[index] = new_entry;
368 int hashtable_change(struct hashtable * htable, addr_t key, addr_t value, int free_value) {
369 struct hash_entry * tmp_entry;
373 hash_value = do_hash(htable, key);
375 index = indexFor(htable->table_length, hash_value);
377 tmp_entry = htable->table[index];
379 while (tmp_entry != NULL) {
380 /* Check hash value to short circuit heavier comparison */
381 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
384 V3_Free((void *)(tmp_entry->value));
387 tmp_entry->value = value;
390 tmp_entry = tmp_entry->next;
397 int hashtable_inc(struct hashtable * htable, addr_t key, addr_t value) {
398 struct hash_entry * tmp_entry;
402 hash_value = do_hash(htable, key);
404 index = indexFor(htable->table_length, hash_value);
406 tmp_entry = htable->table[index];
408 while (tmp_entry != NULL) {
409 /* Check hash value to short circuit heavier comparison */
410 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
412 tmp_entry->value += value;
415 tmp_entry = tmp_entry->next;
421 int hashtable_dec(struct hashtable * htable, addr_t key, addr_t value) {
422 struct hash_entry * tmp_entry;
426 hash_value = do_hash(htable, key);
428 index = indexFor(htable->table_length, hash_value);
430 tmp_entry = htable->table[index];
432 while (tmp_entry != NULL) {
433 /* Check hash value to short circuit heavier comparison */
434 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
436 tmp_entry->value -= value;
439 tmp_entry = tmp_entry->next;
447 /*****************************************************************************/
448 /* returns value associated with key */
449 addr_t hashtable_search(struct hashtable * htable, addr_t key) {
450 struct hash_entry * cursor;
454 hash_value = do_hash(htable, key);
456 index = indexFor(htable->table_length, hash_value);
458 cursor = htable->table[index];
460 while (cursor != NULL) {
461 /* Check hash value to short circuit heavier comparison */
462 if ((hash_value == cursor->hash) &&
463 (htable->eq_fn(key, cursor->key))) {
464 return cursor->value;
467 cursor = cursor->next;
473 /*****************************************************************************/
474 /* returns value associated with key */
475 addr_t hashtable_remove(struct hashtable * htable, addr_t key, int free_key) {
476 /* TODO: consider compacting the table when the load factor drops enough,
477 * or provide a 'compact' method. */
479 struct hash_entry * cursor;
480 struct hash_entry ** entry_ptr;
485 hash_value = do_hash(htable, key);
487 index = indexFor(htable->table_length, hash_value);
489 entry_ptr = &(htable->table[index]);
492 while (cursor != NULL) {
493 /* Check hash value to short circuit heavier comparison */
494 if ((hash_value == cursor->hash) &&
495 (htable->eq_fn(key, cursor->key))) {
497 *entry_ptr = cursor->next;
498 htable->entry_count--;
499 value = cursor->value;
502 freekey((void *)(cursor->key));
509 entry_ptr = &(cursor->next);
510 cursor = cursor->next;
515 /*****************************************************************************/
517 void hashtable_destroy(struct hashtable * htable, int free_values, int free_keys) {
519 struct hash_entry * cursor;;
520 struct hash_entry **table = htable->table;
523 for (i = 0; i < htable->table_length; i++) {
526 while (cursor != NULL) {
527 struct hash_entry * tmp;
530 cursor = cursor->next;
533 freekey((void *)(tmp->key));
535 V3_Free((void *)(tmp->value));
540 for (i = 0; i < htable->table_length; i++) {
543 while (cursor != NULL) {
544 struct hash_entry * tmp;
547 cursor = cursor->next;
550 freekey((void *)(tmp->key));
557 V3_Free(htable->table);
564 /* HASH TABLE ITERATORS */
568 struct hashtable_iter * create_hashtable_iterator(struct hashtable * htable) {
572 struct hashtable_iter * iter = (struct hashtable_iter *)V3_Malloc(sizeof(struct hashtable_iter));
578 iter->htable = htable;
581 table_length = htable->table_length;
582 iter->index = table_length;
584 if (htable->entry_count == 0) {
588 for (i = 0; i < table_length; i++) {
589 if (htable->table[i] != NULL) {
590 iter->entry = htable->table[i];
600 addr_t hashtable_get_iter_key(struct hashtable_iter * iter) {
601 return iter->entry->key;
604 addr_t hashtable_get_iter_value(struct hashtable_iter * iter) {
605 return iter->entry->value;
609 /* advance - advance the iterator to the next element
610 * returns zero if advanced to end of table */
611 int hashtable_iterator_advance(struct hashtable_iter * iter) {
614 struct hash_entry ** table;
615 struct hash_entry * next;
617 if (iter->entry == NULL) {
618 return 0; /* stupidity check */
622 next = iter->entry->next;
625 iter->parent = iter->entry;
630 table_length = iter->htable->table_length;
633 if (table_length <= (j = ++(iter->index))) {
638 table = iter->htable->table;
640 while ((next = table[j]) == NULL) {
641 if (++j >= table_length) {
642 iter->index = table_length;
655 /* remove - remove the entry at the current iterator position
656 * and advance the iterator, if there is a successive
658 * If you want the value, read it before you remove:
659 * beware memory leaks if you don't.
660 * Returns zero if end of iteration. */
661 int hashtable_iterator_remove(struct hashtable_iter * iter, int free_key) {
662 struct hash_entry * remember_entry;
663 struct hash_entry * remember_parent;
667 if ((iter->parent) == NULL) {
668 /* element is head of a chain */
669 iter->htable->table[iter->index] = iter->entry->next;
671 /* element is mid-chain */
672 iter->parent->next = iter->entry->next;
676 /* itr->e is now outside the hashtable */
677 remember_entry = iter->entry;
678 iter->htable->entry_count--;
680 freekey((void *)(remember_entry->key));
683 /* Advance the iterator, correcting the parent */
684 remember_parent = iter->parent;
685 ret = hashtable_iterator_advance(iter);
687 if (iter->parent == remember_entry) {
688 iter->parent = remember_parent;
691 V3_Free(remember_entry);
696 /* returns zero if not found */
697 int hashtable_iterator_search(struct hashtable_iter * iter,
698 struct hashtable * htable, addr_t key) {
699 struct hash_entry * entry;
700 struct hash_entry * parent;
704 hash_value = do_hash(htable, key);
705 index = indexFor(htable->table_length, hash_value);
707 entry = htable->table[index];
710 while (entry != NULL) {
711 /* Check hash value to short circuit heavier comparison */
712 if ((hash_value == entry->hash) &&
713 (htable->eq_fn(key, entry->key))) {
716 iter->parent = parent;
717 iter->htable = htable;
741 * Copyright (c) 2002, Christopher Clark
742 * All rights reserved.
744 * Redistribution and use in source and binary forms, with or without
745 * modification, are permitted provided that the following conditions
748 * * Redistributions of source code must retain the above copyright
749 * notice, this list of conditions and the following disclaimer.
751 * * Redistributions in binary form must reproduce the above copyright
752 * notice, this list of conditions and the following disclaimer in the
753 * documentation and/or other materials provided with the distribution.
755 * * Neither the name of the original author; nor the names of any contributors
756 * may be used to endorse or promote products derived from this software
757 * without specific prior written permission.
760 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
761 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
762 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
763 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
764 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
765 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
766 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
767 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
768 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
769 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
770 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.