1 /* Copyright (C) 2004 Christopher Clark <firstname.lastname@cl.cam.ac.uk> */
2 /* Modifications made by Jack Lange <jarusl@cs.northwestern.edu> */
4 #include <palacios/vmm.h>
5 #include <palacios/vmm_hashtable.h>
6 #include <palacios/vmm_string.h>
16 struct hash_entry * next;
21 struct hash_entry ** table;
25 uint_t (*hash_fn) (addr_t key);
26 int (*eq_fn) (addr_t key1, addr_t key2);
35 uint_t do_hash(struct hashtable * htable, addr_t key) {
36 /* Aim to protect against poor hash functions by adding logic here
37 * - logic taken from java 1.4 hashtable source */
38 uint_t i = htable->hash_fn(key);
40 i ^= ((i >> 14) | (i << 18)); /* >>> */
42 i ^= ((i >> 10) | (i << 22)); /* >>> */
48 /* HASH AN UNSIGNED LONG */
49 /* LINUX UNSIGHED LONG HASH FUNCTION */
51 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
52 #define GOLDEN_RATIO_PRIME 0x9e370001UL
53 #define BITS_PER_LONG 32
54 #elif defined(__V3_64BIT__)
55 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
56 #define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
57 #define BITS_PER_LONG 64
59 #error Define GOLDEN_RATIO_PRIME for your wordsize.
62 ulong_t hash_long(ulong_t val, uint_t bits) {
66 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
81 /* On some cpus multiply is faster, on others gcc will do shifts */
82 hash *= GOLDEN_RATIO_PRIME;
85 /* High bits are more random, so use them. */
86 return hash >> (BITS_PER_LONG - bits);
89 /* HASH GENERIC MEMORY BUFFER */
90 /* ELF HEADER HASH FUNCTION */
91 ulong_t hash_buffer(uchar_t * msg, uint_t length) {
96 for (i = 0; i < length; i++) {
97 hash = (hash << 4) + *(msg + i) + i;
98 if ((temp = (hash & 0xF0000000))) {
108 /*****************************************************************************/
110 static inline uint_t indexFor(uint_t table_length, uint_t hash_value) {
111 return (hash_value % table_length);
114 /* Only works if table_length == 2^N */
116 static inline uint_t indexFor(uint_t table_length, uint_t hashvalue)
118 return (hashvalue & (table_length - 1u));
122 /*****************************************************************************/
123 #define freekey(X) V3_Free(X)
124 /*define freekey(X) ; */
127 static void * tmp_realloc(void * old_ptr, uint_t old_size, uint_t new_size) {
128 void * new_buf = V3_Malloc(new_size);
130 if (new_buf == NULL) {
134 memcpy(new_buf, old_ptr, old_size);
142 Credit for primes table: Aaron Krowne
143 http://br.endernet.org/~akrowne/
144 http://planetmath.org/encyclopedia/GoodHashTablePrimes.html
146 static const uint_t primes[] = {
148 769, 1543, 3079, 6151,
149 12289, 24593, 49157, 98317,
150 196613, 393241, 786433, 1572869,
151 3145739, 6291469, 12582917, 25165843,
152 50331653, 100663319, 201326611, 402653189,
153 805306457, 1610612741 };
156 const uint_t prime_table_length = sizeof(primes) / sizeof(primes[0]);
158 const float max_load_factor = 0.65;
160 /*****************************************************************************/
161 struct hashtable * create_hashtable(uint_t min_size,
162 uint_t (*hash_fn) (addr_t),
163 int (*eq_fn) (addr_t, addr_t)) {
164 struct hashtable * htable;
166 uint_t size = primes[0];
168 /* Check requested hashtable isn't too large */
169 if (min_size > (1u << 30)) {
173 /* Enforce size as prime */
174 for (prime_index = 0; prime_index < prime_table_length; prime_index++) {
175 if (primes[prime_index] > min_size) {
176 size = primes[prime_index];
181 htable = (struct hashtable *)V3_Malloc(sizeof(struct hashtable));
183 if (htable == NULL) {
187 htable->table = (struct hash_entry **)V3_Malloc(sizeof(struct hash_entry*) * size);
189 if (htable->table == NULL) {
195 memset(htable->table, 0, size * sizeof(struct hash_entry *));
197 htable->table_length = size;
198 htable->prime_index = prime_index;
199 htable->entry_count = 0;
200 htable->hash_fn = hash_fn;
201 htable->eq_fn = eq_fn;
202 htable->load_limit = (uint_t) ceil((double)(size * max_load_factor));
209 /*****************************************************************************/
210 static int hashtable_expand(struct hashtable * htable) {
211 /* Double the size of the table to accomodate more entries */
212 struct hash_entry ** new_table;
213 struct hash_entry * tmp_entry;
214 struct hash_entry ** entry_ptr;
219 /* Check we're not hitting max capacity */
220 if (htable->prime_index == (prime_table_length - 1)) {
224 new_size = primes[++(htable->prime_index)];
226 new_table = (struct hash_entry **)V3_Malloc(sizeof(struct hash_entry*) * new_size);
228 if (new_table != NULL) {
229 memset(new_table, 0, new_size * sizeof(struct hash_entry *));
230 /* This algorithm is not 'stable'. ie. it reverses the list
231 * when it transfers entries between the tables */
233 for (i = 0; i < htable->table_length; i++) {
235 while ((tmp_entry = htable->table[i]) != NULL) {
236 htable->table[i] = tmp_entry->next;
238 index = indexFor(new_size, tmp_entry->hash);
240 tmp_entry->next = new_table[index];
242 new_table[index] = tmp_entry;
246 V3_Free(htable->table);
248 htable->table = new_table;
250 /* Plan B: realloc instead */
252 //new_table = (struct hash_entry **)realloc(htable->table, new_size * sizeof(struct hash_entry *));
253 new_table = (struct hash_entry **)tmp_realloc(htable->table, primes[htable->prime_index - 1],
254 new_size * sizeof(struct hash_entry *));
256 if (new_table == NULL) {
257 (htable->prime_index)--;
261 htable->table = new_table;
263 memset(new_table[htable->table_length], 0, new_size - htable->table_length);
265 for (i = 0; i < htable->table_length; i++) {
267 for (entry_ptr = &(new_table[i]), tmp_entry = *entry_ptr;
269 tmp_entry = *entry_ptr) {
271 index = indexFor(new_size, tmp_entry->hash);
274 entry_ptr = &(tmp_entry->next);
276 *entry_ptr = tmp_entry->next;
277 tmp_entry->next = new_table[index];
278 new_table[index] = tmp_entry;
284 htable->table_length = new_size;
286 htable->load_limit = (uint_t) ceil(new_size * max_load_factor);
291 /*****************************************************************************/
292 uint_t hashtable_count(struct hashtable * htable) {
293 return htable->entry_count;
296 /*****************************************************************************/
297 int hashtable_insert(struct hashtable * htable, addr_t key, addr_t value) {
298 /* This method allows duplicate keys - but they shouldn't be used */
300 struct hash_entry * new_entry;
302 if (++(htable->entry_count) > htable->load_limit) {
303 /* Ignore the return value. If expand fails, we should
304 * still try cramming just this value into the existing table
305 * -- we may not have memory for a larger table, but one more
306 * element may be ok. Next time we insert, we'll try expanding again.*/
307 hashtable_expand(htable);
311 new_entry = (struct hash_entry *)V3_Malloc(sizeof(struct hash_entry));
313 if (new_entry == NULL) {
314 (htable->entry_count)--;
318 new_entry->hash = do_hash(htable, key);
320 index = indexFor(htable->table_length, new_entry->hash);
322 new_entry->key = key;
323 new_entry->value = value;
325 new_entry->next = htable->table[index];
327 htable->table[index] = new_entry;
334 int hashtable_change(struct hashtable * htable, addr_t key, addr_t value, int free_value) {
335 struct hash_entry * tmp_entry;
339 hash_value = do_hash(htable, key);
341 index = indexFor(htable->table_length, hash_value);
343 tmp_entry = htable->table[index];
345 while (tmp_entry != NULL) {
346 /* Check hash value to short circuit heavier comparison */
347 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
350 V3_Free((void *)(tmp_entry->value));
353 tmp_entry->value = value;
356 tmp_entry = tmp_entry->next;
364 /*****************************************************************************/
365 /* returns value associated with key */
366 addr_t hashtable_search(struct hashtable * htable, addr_t key) {
367 struct hash_entry * cursor;
371 hash_value = do_hash(htable, key);
373 index = indexFor(htable->table_length, hash_value);
375 cursor = htable->table[index];
377 while (cursor != NULL) {
378 /* Check hash value to short circuit heavier comparison */
379 if ((hash_value == cursor->hash) &&
380 (htable->eq_fn(key, cursor->key))) {
381 return cursor->value;
384 cursor = cursor->next;
390 /*****************************************************************************/
391 /* returns value associated with key */
392 addr_t hashtable_remove(struct hashtable * htable, addr_t key, int free_key) {
393 /* TODO: consider compacting the table when the load factor drops enough,
394 * or provide a 'compact' method. */
396 struct hash_entry * cursor;
397 struct hash_entry ** entry_ptr;
402 hash_value = do_hash(htable, key);
404 index = indexFor(htable->table_length, hash_value);
406 entry_ptr = &(htable->table[index]);
409 while (cursor != NULL) {
410 /* Check hash value to short circuit heavier comparison */
411 if ((hash_value == cursor->hash) &&
412 (htable->eq_fn(key, cursor->key))) {
414 *entry_ptr = cursor->next;
415 htable->entry_count--;
416 value = cursor->value;
419 freekey((void *)(cursor->key));
426 entry_ptr = &(cursor->next);
427 cursor = cursor->next;
432 /*****************************************************************************/
434 void hashtable_destroy(struct hashtable * htable, int free_values, int free_keys) {
436 struct hash_entry * cursor;;
437 struct hash_entry **table = htable->table;
440 for (i = 0; i < htable->table_length; i++) {
443 while (cursor != NULL) {
444 struct hash_entry * tmp;
447 cursor = cursor->next;
450 freekey((void *)(tmp->key));
452 V3_Free((void *)(tmp->value));
457 for (i = 0; i < htable->table_length; i++) {
460 while (cursor != NULL) {
461 struct hash_entry * tmp;
464 cursor = cursor->next;
467 freekey((void *)(tmp->key));
474 V3_Free(htable->table);
481 /* HASH TABLE ITERATORS */
485 struct hashtable_iter * create_hashtable_iterator(struct hashtable * htable) {
489 struct hashtable_iter * iter = (struct hashtable_iter *)V3_Malloc(sizeof(struct hashtable_iter));
495 iter->htable = htable;
498 table_length = htable->table_length;
499 iter->index = table_length;
501 if (htable->entry_count == 0) {
505 for (i = 0; i < table_length; i++) {
506 if (htable->table[i] != NULL) {
507 iter->entry = htable->table[i];
517 addr_t hashtable_get_iter_key(struct hashtable_iter * iter) {
518 return iter->entry->key;
521 addr_t hashtable_get_iter_value(struct hashtable_iter * iter) {
522 return iter->entry->value;
526 /* advance - advance the iterator to the next element
527 * returns zero if advanced to end of table */
528 int hashtable_iterator_advance(struct hashtable_iter * iter) {
531 struct hash_entry ** table;
532 struct hash_entry * next;
534 if (iter->entry == NULL) {
535 return 0; /* stupidity check */
539 next = iter->entry->next;
542 iter->parent = iter->entry;
547 table_length = iter->htable->table_length;
550 if (table_length <= (j = ++(iter->index))) {
555 table = iter->htable->table;
557 while ((next = table[j]) == NULL) {
558 if (++j >= table_length) {
559 iter->index = table_length;
572 /* remove - remove the entry at the current iterator position
573 * and advance the iterator, if there is a successive
575 * If you want the value, read it before you remove:
576 * beware memory leaks if you don't.
577 * Returns zero if end of iteration. */
578 int hashtable_iterator_remove(struct hashtable_iter * iter, int free_key) {
579 struct hash_entry * remember_entry;
580 struct hash_entry * remember_parent;
584 if ((iter->parent) == NULL) {
585 /* element is head of a chain */
586 iter->htable->table[iter->index] = iter->entry->next;
588 /* element is mid-chain */
589 iter->parent->next = iter->entry->next;
593 /* itr->e is now outside the hashtable */
594 remember_entry = iter->entry;
595 iter->htable->entry_count--;
597 freekey((void *)(remember_entry->key));
600 /* Advance the iterator, correcting the parent */
601 remember_parent = iter->parent;
602 ret = hashtable_iterator_advance(iter);
604 if (iter->parent == remember_entry) {
605 iter->parent = remember_parent;
608 V3_Free(remember_entry);
613 /* returns zero if not found */
614 int hashtable_iterator_search(struct hashtable_iter * iter,
615 struct hashtable * htable, addr_t key) {
616 struct hash_entry * entry;
617 struct hash_entry * parent;
621 hash_value = do_hash(htable, key);
622 index = indexFor(htable->table_length, hash_value);
624 entry = htable->table[index];
627 while (entry != NULL) {
628 /* Check hash value to short circuit heavier comparison */
629 if ((hash_value == entry->hash) &&
630 (htable->eq_fn(key, entry->key))) {
633 iter->parent = parent;
634 iter->htable = htable;
658 * Copyright (c) 2002, Christopher Clark
659 * All rights reserved.
661 * Redistribution and use in source and binary forms, with or without
662 * modification, are permitted provided that the following conditions
665 * * Redistributions of source code must retain the above copyright
666 * notice, this list of conditions and the following disclaimer.
668 * * Redistributions in binary form must reproduce the above copyright
669 * notice, this list of conditions and the following disclaimer in the
670 * documentation and/or other materials provided with the distribution.
672 * * Neither the name of the original author; nor the names of any contributors
673 * may be used to endorse or promote products derived from this software
674 * without specific prior written permission.
677 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
678 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
679 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
680 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
681 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
682 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
683 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
684 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
685 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
686 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
687 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.