2 Copyright (c) 2002, 2004, Christopher Clark
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions
9 * Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 * Neither the name of the original author; nor the names of any contributors
17 may be used to endorse or promote products derived from this software
18 without specific prior written permission.
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 /* Modifications made by Jack Lange <jarusl@cs.northwestern.edu> */
38 #include <palacios/vmm.h>
39 #include <palacios/vmm_hashtable.h>
40 #include <palacios/vmm_string.h>
50 struct hash_entry * next;
55 struct hash_entry ** table;
59 uint_t (*hash_fn) (addr_t key);
60 int (*eq_fn) (addr_t key1, addr_t key2);
69 static inline uint_t do_hash(struct hashtable * htable, addr_t key) {
70 /* Aim to protect against poor hash functions by adding logic here
71 * - logic taken from java 1.4 hashtable source */
72 uint_t i = htable->hash_fn(key);
74 i ^= ((i >> 14) | (i << 18)); /* >>> */
76 i ^= ((i >> 10) | (i << 22)); /* >>> */
82 /* HASH AN UNSIGNED LONG */
83 /* LINUX UNSIGHED LONG HASH FUNCTION */
85 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
86 #define GOLDEN_RATIO_PRIME 0x9e370001UL
87 #define BITS_PER_LONG 32
88 #elif defined(__V3_64BIT__)
89 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
90 #define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
91 #define BITS_PER_LONG 64
93 #error Define GOLDEN_RATIO_PRIME for your wordsize.
96 ulong_t v3_hash_long(ulong_t val, uint_t bits) {
100 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
115 /* On some cpus multiply is faster, on others gcc will do shifts */
116 hash *= GOLDEN_RATIO_PRIME;
119 /* High bits are more random, so use them. */
120 return hash >> (BITS_PER_LONG - bits);
123 /* HASH GENERIC MEMORY BUFFER */
124 /* ELF HEADER HASH FUNCTION */
125 ulong_t v3_hash_buffer(uchar_t * msg, uint_t length) {
130 for (i = 0; i < length; i++) {
131 hash = (hash << 4) + *(msg + i) + i;
132 if ((temp = (hash & 0xF0000000))) {
133 hash ^= (temp >> 24);
142 /*****************************************************************************/
144 static inline uint_t indexFor(uint_t table_length, uint_t hash_value) {
145 return (hash_value % table_length);
148 /* Only works if table_length == 2^N */
150 static inline uint_t indexFor(uint_t table_length, uint_t hashvalue)
152 return (hashvalue & (table_length - 1u));
156 /*****************************************************************************/
157 #define freekey(X) V3_Free(X)
158 /*define freekey(X) ; */
161 static void * tmp_realloc(void * old_ptr, uint_t old_size, uint_t new_size) {
162 void * new_buf = V3_Malloc(new_size);
164 if (new_buf == NULL) {
168 memcpy(new_buf, old_ptr, old_size);
176 Credit for primes table: Aaron Krowne
177 http://br.endernet.org/~akrowne/
178 http://planetmath.org/encyclopedia/GoodHashTablePrimes.html
180 static const uint_t primes[] = {
182 769, 1543, 3079, 6151,
183 12289, 24593, 49157, 98317,
184 196613, 393241, 786433, 1572869,
185 3145739, 6291469, 12582917, 25165843,
186 50331653, 100663319, 201326611, 402653189,
187 805306457, 1610612741 };
190 // this assumes that the max load factor is .65
191 static const uint_t load_factors[] = {
193 500, 1003, 2002, 3999,
194 7988, 15986, 31953, 63907,
195 127799, 255607, 511182, 1022365,
196 2044731, 4089455, 8178897, 16357798,
197 32715575, 65431158, 130862298, 261724573,
198 523449198, 1046898282 };
200 static const uint_t prime_table_length = sizeof(primes) / sizeof(primes[0]);
204 /*****************************************************************************/
205 struct hashtable * v3_create_htable(uint_t min_size,
206 uint_t (*hash_fn) (addr_t),
207 int (*eq_fn) (addr_t, addr_t)) {
208 struct hashtable * htable;
210 uint_t size = primes[0];
212 /* Check requested hashtable isn't too large */
213 if (min_size > (1u << 30)) {
217 /* Enforce size as prime */
218 for (prime_index = 0; prime_index < prime_table_length; prime_index++) {
219 if (primes[prime_index] > min_size) {
220 size = primes[prime_index];
225 if (prime_index==prime_table_length) {
229 htable = (struct hashtable *)V3_Malloc(sizeof(struct hashtable));
231 if (htable == NULL) {
235 htable->table = (struct hash_entry **)V3_Malloc(sizeof(struct hash_entry*) * size);
237 if (htable->table == NULL) {
243 memset(htable->table, 0, size * sizeof(struct hash_entry *));
245 htable->table_length = size;
246 htable->prime_index = prime_index;
247 htable->entry_count = 0;
248 htable->hash_fn = hash_fn;
249 htable->eq_fn = eq_fn;
250 htable->load_limit = load_factors[prime_index];
257 /*****************************************************************************/
258 static int hashtable_expand(struct hashtable * htable) {
259 /* Double the size of the table to accomodate more entries */
260 struct hash_entry ** new_table;
261 struct hash_entry * tmp_entry;
262 struct hash_entry ** entry_ptr;
267 /* Check we're not hitting max capacity */
268 if (htable->prime_index == (prime_table_length - 1)) {
272 new_size = primes[++(htable->prime_index)];
274 new_table = (struct hash_entry **)V3_Malloc(sizeof(struct hash_entry*) * new_size);
276 if (new_table != NULL) {
277 memset(new_table, 0, new_size * sizeof(struct hash_entry *));
278 /* This algorithm is not 'stable'. ie. it reverses the list
279 * when it transfers entries between the tables */
281 for (i = 0; i < htable->table_length; i++) {
283 while ((tmp_entry = htable->table[i]) != NULL) {
284 htable->table[i] = tmp_entry->next;
286 index = indexFor(new_size, tmp_entry->hash);
288 tmp_entry->next = new_table[index];
290 new_table[index] = tmp_entry;
294 V3_Free(htable->table);
296 htable->table = new_table;
298 /* Plan B: realloc instead */
300 //new_table = (struct hash_entry **)realloc(htable->table, new_size * sizeof(struct hash_entry *));
301 new_table = (struct hash_entry **)tmp_realloc(htable->table, primes[htable->prime_index - 1],
302 new_size * sizeof(struct hash_entry *));
304 if (new_table == NULL) {
305 (htable->prime_index)--;
309 htable->table = new_table;
311 memset(new_table[htable->table_length], 0, new_size - htable->table_length);
313 for (i = 0; i < htable->table_length; i++) {
315 for (entry_ptr = &(new_table[i]), tmp_entry = *entry_ptr;
317 tmp_entry = *entry_ptr) {
319 index = indexFor(new_size, tmp_entry->hash);
322 entry_ptr = &(tmp_entry->next);
324 *entry_ptr = tmp_entry->next;
325 tmp_entry->next = new_table[index];
326 new_table[index] = tmp_entry;
332 htable->table_length = new_size;
334 htable->load_limit = load_factors[htable->prime_index];
339 /*****************************************************************************/
340 uint_t v3_htable_count(struct hashtable * htable) {
341 return htable->entry_count;
344 /*****************************************************************************/
345 int v3_htable_insert(struct hashtable * htable, addr_t key, addr_t value) {
346 /* This method allows duplicate keys - but they shouldn't be used */
348 struct hash_entry * new_entry;
350 if (++(htable->entry_count) > htable->load_limit) {
351 /* Ignore the return value. If expand fails, we should
352 * still try cramming just this value into the existing table
353 * -- we may not have memory for a larger table, but one more
354 * element may be ok. Next time we insert, we'll try expanding again.*/
355 hashtable_expand(htable);
359 new_entry = (struct hash_entry *)V3_Malloc(sizeof(struct hash_entry));
361 if (new_entry == NULL) {
362 (htable->entry_count)--;
366 new_entry->hash = do_hash(htable, key);
368 index = indexFor(htable->table_length, new_entry->hash);
370 new_entry->key = key;
371 new_entry->value = value;
373 new_entry->next = htable->table[index];
375 htable->table[index] = new_entry;
382 int v3_htable_change(struct hashtable * htable, addr_t key, addr_t value, int free_value) {
383 struct hash_entry * tmp_entry;
387 hash_value = do_hash(htable, key);
389 index = indexFor(htable->table_length, hash_value);
391 tmp_entry = htable->table[index];
393 while (tmp_entry != NULL) {
394 /* Check hash value to short circuit heavier comparison */
395 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
398 V3_Free((void *)(tmp_entry->value));
401 tmp_entry->value = value;
404 tmp_entry = tmp_entry->next;
411 int v3_htable_inc(struct hashtable * htable, addr_t key, addr_t value) {
412 struct hash_entry * tmp_entry;
416 hash_value = do_hash(htable, key);
418 index = indexFor(htable->table_length, hash_value);
420 tmp_entry = htable->table[index];
422 while (tmp_entry != NULL) {
423 /* Check hash value to short circuit heavier comparison */
424 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
426 tmp_entry->value += value;
429 tmp_entry = tmp_entry->next;
435 int v3_htable_dec(struct hashtable * htable, addr_t key, addr_t value) {
436 struct hash_entry * tmp_entry;
440 hash_value = do_hash(htable, key);
442 index = indexFor(htable->table_length, hash_value);
444 tmp_entry = htable->table[index];
446 while (tmp_entry != NULL) {
447 /* Check hash value to short circuit heavier comparison */
448 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
450 tmp_entry->value -= value;
453 tmp_entry = tmp_entry->next;
461 /*****************************************************************************/
462 /* returns value associated with key */
463 addr_t v3_htable_search(struct hashtable * htable, addr_t key) {
464 struct hash_entry * cursor;
468 hash_value = do_hash(htable, key);
470 index = indexFor(htable->table_length, hash_value);
472 cursor = htable->table[index];
474 while (cursor != NULL) {
475 /* Check hash value to short circuit heavier comparison */
476 if ((hash_value == cursor->hash) &&
477 (htable->eq_fn(key, cursor->key))) {
478 return cursor->value;
481 cursor = cursor->next;
487 /*****************************************************************************/
488 /* returns value associated with key */
489 addr_t v3_htable_remove(struct hashtable * htable, addr_t key, int free_key) {
490 /* TODO: consider compacting the table when the load factor drops enough,
491 * or provide a 'compact' method. */
493 struct hash_entry * cursor;
494 struct hash_entry ** entry_ptr;
499 hash_value = do_hash(htable, key);
501 index = indexFor(htable->table_length, hash_value);
503 entry_ptr = &(htable->table[index]);
506 while (cursor != NULL) {
507 /* Check hash value to short circuit heavier comparison */
508 if ((hash_value == cursor->hash) &&
509 (htable->eq_fn(key, cursor->key))) {
511 *entry_ptr = cursor->next;
512 htable->entry_count--;
513 value = cursor->value;
516 freekey((void *)(cursor->key));
523 entry_ptr = &(cursor->next);
524 cursor = cursor->next;
529 /*****************************************************************************/
531 void v3_free_htable(struct hashtable * htable, int free_values, int free_keys) {
533 struct hash_entry * cursor;;
534 struct hash_entry **table = htable->table;
537 for (i = 0; i < htable->table_length; i++) {
540 while (cursor != NULL) {
541 struct hash_entry * tmp;
544 cursor = cursor->next;
547 freekey((void *)(tmp->key));
549 V3_Free((void *)(tmp->value));
554 for (i = 0; i < htable->table_length; i++) {
557 while (cursor != NULL) {
558 struct hash_entry * tmp;
561 cursor = cursor->next;
564 freekey((void *)(tmp->key));
571 V3_Free(htable->table);
578 /* HASH TABLE ITERATORS */
582 struct hashtable_iter * v3_create_htable_iter(struct hashtable * htable) {
586 struct hashtable_iter * iter = (struct hashtable_iter *)V3_Malloc(sizeof(struct hashtable_iter));
592 iter->htable = htable;
595 table_length = htable->table_length;
596 iter->index = table_length;
598 if (htable->entry_count == 0) {
602 for (i = 0; i < table_length; i++) {
603 if (htable->table[i] != NULL) {
604 iter->entry = htable->table[i];
614 addr_t v3_htable_get_iter_key(struct hashtable_iter * iter) {
615 return iter->entry->key;
618 addr_t v3_htable_get_iter_value(struct hashtable_iter * iter) {
619 return iter->entry->value;
623 /* advance - advance the iterator to the next element
624 * returns zero if advanced to end of table */
625 int v3_htable_iter_advance(struct hashtable_iter * iter) {
628 struct hash_entry ** table;
629 struct hash_entry * next;
631 if (iter->entry == NULL) {
632 return 0; /* stupidity check */
636 next = iter->entry->next;
639 iter->parent = iter->entry;
644 table_length = iter->htable->table_length;
647 if (table_length <= (j = ++(iter->index))) {
652 table = iter->htable->table;
654 while ((next = table[j]) == NULL) {
655 if (++j >= table_length) {
656 iter->index = table_length;
669 /* remove - remove the entry at the current iterator position
670 * and advance the iterator, if there is a successive
672 * If you want the value, read it before you remove:
673 * beware memory leaks if you don't.
674 * Returns zero if end of iteration. */
675 int v3_htable_iter_remove(struct hashtable_iter * iter, int free_key) {
676 struct hash_entry * remember_entry;
677 struct hash_entry * remember_parent;
681 if ((iter->parent) == NULL) {
682 /* element is head of a chain */
683 iter->htable->table[iter->index] = iter->entry->next;
685 /* element is mid-chain */
686 iter->parent->next = iter->entry->next;
690 /* itr->e is now outside the hashtable */
691 remember_entry = iter->entry;
692 iter->htable->entry_count--;
694 freekey((void *)(remember_entry->key));
697 /* Advance the iterator, correcting the parent */
698 remember_parent = iter->parent;
699 ret = v3_htable_iter_advance(iter);
701 if (iter->parent == remember_entry) {
702 iter->parent = remember_parent;
705 V3_Free(remember_entry);
710 /* returns zero if not found */
711 int v3_htable_iter_search(struct hashtable_iter * iter,
712 struct hashtable * htable, addr_t key) {
713 struct hash_entry * entry;
714 struct hash_entry * parent;
718 hash_value = do_hash(htable, key);
719 index = indexFor(htable->table_length, hash_value);
721 entry = htable->table[index];
724 while (entry != NULL) {
725 /* Check hash value to short circuit heavier comparison */
726 if ((hash_value == entry->hash) &&
727 (htable->eq_fn(key, entry->key))) {
730 iter->parent = parent;
731 iter->htable = htable;
740 void v3_destroy_htable_iter(struct hashtable_iter * iter) {
760 * Copyright (c) 2002, Christopher Clark
761 * All rights reserved.
763 * Redistribution and use in source and binary forms, with or without
764 * modification, are permitted provided that the following conditions
767 * * Redistributions of source code must retain the above copyright
768 * notice, this list of conditions and the following disclaimer.
770 * * Redistributions in binary form must reproduce the above copyright
771 * notice, this list of conditions and the following disclaimer in the
772 * documentation and/or other materials provided with the distribution.
774 * * Neither the name of the original author; nor the names of any contributors
775 * may be used to endorse or promote products derived from this software
776 * without specific prior written permission.
779 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
780 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
781 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
782 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
783 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
784 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
785 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
786 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
787 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
788 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
789 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.