2 Copyright (c) 2002, 2004, Christopher Clark
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions
9 * Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 * Neither the name of the original author; nor the names of any contributors
17 may be used to endorse or promote products derived from this software
18 without specific prior written permission.
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 /* Modifications made by Lei Xia */
36 #include <linux/string.h>
37 #include <linux/errno.h>
38 #include <linux/preempt.h>
39 #include <linux/sched.h>
41 #include "util-hashtable.h"
48 struct hash_entry * next;
53 struct hash_entry ** table;
57 uint_t (*hash_fn) (addr_t key);
58 int (*eq_fn) (addr_t key1, addr_t key2);
64 static inline uint_t do_hash(struct hashtable * htable, addr_t key) {
65 /* Aim to protect against poor hash functions by adding logic here
66 * - logic taken from java 1.4 hashtable source */
67 uint_t i = htable->hash_fn(key);
69 i ^= ((i >> 14) | (i << 18)); /* >>> */
71 i ^= ((i >> 10) | (i << 22)); /* >>> */
77 /* HASH AN UNSIGNED LONG */
78 /* LINUX UNSIGHED LONG HASH FUNCTION */
80 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
81 #define GOLDEN_RATIO_PRIME 0x9e370001UL
82 //#define BITS_PER_LONG 32
83 #elif defined(__64BIT__)
84 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
85 #define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
86 //#define BITS_PER_LONG 64
88 #error Define GOLDEN_RATIO_PRIME for your wordsize.
91 ulong_t palacios_hash_long(ulong_t val, uint_t bits) {
94 #ifdef __PALACIOS_64BIT__
95 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
110 /* On some cpus multiply is faster, on others gcc will do shifts */
111 hash *= GOLDEN_RATIO_PRIME;
114 /* High bits are more random, so use them. */
115 return hash >> (BITS_PER_LONG - bits);
118 /* HASH GENERIC MEMORY BUFFER */
119 /* ELF HEADER HASH FUNCTION */
120 ulong_t palacios_hash_buffer(uchar_t * msg, uint_t length) {
125 for (i = 0; i < length; i++) {
126 hash = (hash << 4) + *(msg + i) + i;
127 if ((temp = (hash & 0xF0000000))) {
128 hash ^= (temp >> 24);
136 static inline uint_t indexFor(uint_t table_length, uint_t hash_value) {
137 return (hash_value % table_length);
140 #define freekey(X) kfree(X)
143 static void * tmp_realloc(void * old_ptr, uint_t old_size, uint_t new_size) {
144 void * new_buf = kmalloc(new_size, GFP_KERNEL);
146 if (new_buf == NULL) {
150 memcpy(new_buf, old_ptr, old_size);
158 Credit for primes table: Aaron Krowne
159 http://br.endernet.org/~akrowne/
160 http://planetmath.org/encyclopedia/GoodHashTablePrimes.html
162 static const uint_t primes[] = {
164 769, 1543, 3079, 6151,
165 12289, 24593, 49157, 98317,
166 196613, 393241, 786433, 1572869,
167 3145739, 6291469, 12582917, 25165843,
168 50331653, 100663319, 201326611, 402653189,
169 805306457, 1610612741 };
172 // this assumes that the max load factor is .65
173 static const uint_t load_factors[] = {
175 500, 1003, 2002, 3999,
176 7988, 15986, 31953, 63907,
177 127799, 255607, 511182, 1022365,
178 2044731, 4089455, 8178897, 16357798,
179 32715575, 65431158, 130862298, 261724573,
180 523449198, 1046898282 };
182 const uint_t prime_table_len = sizeof(primes) / sizeof(primes[0]);
184 struct hashtable * palacios_create_htable(uint_t min_size,
185 uint_t (*hash_fn) (addr_t),
186 int (*eq_fn) (addr_t, addr_t)) {
187 struct hashtable * htable;
189 uint_t size = primes[0];
191 /* Check requested hashtable isn't too large */
192 if (min_size > (1u << 30)) {
196 /* Enforce size as prime */
197 for (prime_index = 0; prime_index < prime_table_len; prime_index++) {
198 if (primes[prime_index] > min_size) {
199 size = primes[prime_index];
204 htable = (struct hashtable *)kmalloc(sizeof(struct hashtable), GFP_KERNEL);
206 if (htable == NULL) {
210 htable->table = (struct hash_entry **)kmalloc(sizeof(struct hash_entry*) * size, GFP_KERNEL);
212 if (htable->table == NULL) {
217 memset(htable->table, 0, size * sizeof(struct hash_entry *));
219 htable->table_length = size;
220 htable->prime_index = prime_index;
221 htable->entry_count = 0;
222 htable->hash_fn = hash_fn;
223 htable->eq_fn = eq_fn;
224 htable->load_limit = load_factors[prime_index];
230 static int hashtable_expand(struct hashtable * htable) {
231 /* Double the size of the table to accomodate more entries */
232 struct hash_entry ** new_table;
233 struct hash_entry * tmp_entry;
234 struct hash_entry ** entry_ptr;
239 /* Check we're not hitting max capacity */
240 if (htable->prime_index == (prime_table_len - 1)) {
244 new_size = primes[++(htable->prime_index)];
246 new_table = (struct hash_entry **)kmalloc(sizeof(struct hash_entry*) * new_size, GFP_KERNEL);
248 if (new_table != NULL) {
249 memset(new_table, 0, new_size * sizeof(struct hash_entry *));
250 /* This algorithm is not 'stable'. ie. it reverses the list
251 * when it transfers entries between the tables */
253 for (i = 0; i < htable->table_length; i++) {
255 while ((tmp_entry = htable->table[i]) != NULL) {
256 htable->table[i] = tmp_entry->next;
258 index = indexFor(new_size, tmp_entry->hash);
260 tmp_entry->next = new_table[index];
262 new_table[index] = tmp_entry;
266 kfree(htable->table);
268 htable->table = new_table;
270 /* Plan B: realloc instead */
272 //new_table = (struct hash_entry **)realloc(htable->table, new_size * sizeof(struct hash_entry *));
273 new_table = (struct hash_entry **)tmp_realloc(htable->table, primes[htable->prime_index - 1],
274 new_size * sizeof(struct hash_entry *));
276 if (new_table == NULL) {
277 (htable->prime_index)--;
281 htable->table = new_table;
283 memset(new_table[htable->table_length], 0, new_size - htable->table_length);
285 for (i = 0; i < htable->table_length; i++) {
287 for (entry_ptr = &(new_table[i]), tmp_entry = *entry_ptr;
289 tmp_entry = *entry_ptr) {
291 index = indexFor(new_size, tmp_entry->hash);
294 entry_ptr = &(tmp_entry->next);
296 *entry_ptr = tmp_entry->next;
297 tmp_entry->next = new_table[index];
298 new_table[index] = tmp_entry;
304 htable->table_length = new_size;
306 htable->load_limit = load_factors[htable->prime_index];
311 uint_t palacios_htable_count(struct hashtable * htable) {
312 return htable->entry_count;
315 int palacios_htable_insert(struct hashtable * htable, addr_t key, addr_t value) {
316 /* This method allows duplicate keys - but they shouldn't be used */
318 struct hash_entry * new_entry;
320 if (++(htable->entry_count) > htable->load_limit) {
321 /* Ignore the return value. If expand fails, we should
322 * still try cramming just this value into the existing table
323 * -- we may not have memory for a larger table, but one more
324 * element may be ok. Next time we insert, we'll try expanding again.*/
325 hashtable_expand(htable);
328 new_entry = (struct hash_entry *)kmalloc(sizeof(struct hash_entry), GFP_KERNEL);
330 if (new_entry == NULL) {
331 (htable->entry_count)--;
335 new_entry->hash = do_hash(htable, key);
337 index = indexFor(htable->table_length, new_entry->hash);
339 new_entry->key = key;
340 new_entry->value = value;
342 new_entry->next = htable->table[index];
344 htable->table[index] = new_entry;
350 int palacios_htable_change(struct hashtable * htable, addr_t key, addr_t value, int free_value) {
351 struct hash_entry * tmp_entry;
355 hash_value = do_hash(htable, key);
357 index = indexFor(htable->table_length, hash_value);
359 tmp_entry = htable->table[index];
361 while (tmp_entry != NULL) {
362 /* Check hash value to short circuit heavier comparison */
363 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
366 kfree((void *)(tmp_entry->value));
369 tmp_entry->value = value;
372 tmp_entry = tmp_entry->next;
379 int palacios_htable_inc(struct hashtable * htable, addr_t key, addr_t value) {
380 struct hash_entry * tmp_entry;
384 hash_value = do_hash(htable, key);
386 index = indexFor(htable->table_length, hash_value);
388 tmp_entry = htable->table[index];
390 while (tmp_entry != NULL) {
391 /* Check hash value to short circuit heavier comparison */
392 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
394 tmp_entry->value += value;
397 tmp_entry = tmp_entry->next;
403 int palacios_htable_dec(struct hashtable * htable, addr_t key, addr_t value) {
404 struct hash_entry * tmp_entry;
408 hash_value = do_hash(htable, key);
410 index = indexFor(htable->table_length, hash_value);
412 tmp_entry = htable->table[index];
414 while (tmp_entry != NULL) {
415 /* Check hash value to short circuit heavier comparison */
416 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
418 tmp_entry->value -= value;
421 tmp_entry = tmp_entry->next;
427 /* returns value associated with key */
428 addr_t palacios_htable_search(struct hashtable * htable, addr_t key) {
429 struct hash_entry * cursor;
433 hash_value = do_hash(htable, key);
435 index = indexFor(htable->table_length, hash_value);
437 cursor = htable->table[index];
439 while (cursor != NULL) {
440 /* Check hash value to short circuit heavier comparison */
441 if ((hash_value == cursor->hash) &&
442 (htable->eq_fn(key, cursor->key))) {
443 return cursor->value;
446 cursor = cursor->next;
453 /* returns value associated with key */
454 addr_t palacios_htable_remove(struct hashtable * htable, addr_t key, int free_key) {
455 /* TODO: consider compacting the table when the load factor drops enough,
456 * or provide a 'compact' method. */
458 struct hash_entry * cursor;
459 struct hash_entry ** entry_ptr;
464 hash_value = do_hash(htable, key);
466 index = indexFor(htable->table_length, hash_value);
468 entry_ptr = &(htable->table[index]);
471 while (cursor != NULL) {
472 /* Check hash value to short circuit heavier comparison */
473 if ((hash_value == cursor->hash) &&
474 (htable->eq_fn(key, cursor->key))) {
476 *entry_ptr = cursor->next;
477 htable->entry_count--;
478 value = cursor->value;
481 freekey((void *)(cursor->key));
488 entry_ptr = &(cursor->next);
489 cursor = cursor->next;
496 void palacios_free_htable(struct hashtable * htable, int free_values, int free_keys) {
498 struct hash_entry * cursor;
499 struct hash_entry * tmp;
500 struct hash_entry **table = htable->table;
503 for (i = 0; i < htable->table_length; i++) {
506 while (cursor != NULL) {
508 cursor = cursor->next;
511 freekey((void *)(tmp->key));
513 kfree((void *)(tmp->value));
518 for (i = 0; i < htable->table_length; i++) {
521 while (cursor != NULL) {
522 struct hash_entry * tmp;
525 cursor = cursor->next;
528 freekey((void *)(tmp->key));
535 kfree(htable->table);