6 #include <linux/string.h>
7 #include <linux/errno.h>
8 #include <linux/preempt.h>
9 #include <linux/sched.h>
11 #include "palacios-hashtable.h"
18 struct hash_entry * next;
23 struct hash_entry ** table;
27 uint_t (*hash_fn) (addr_t key);
28 int (*eq_fn) (addr_t key1, addr_t key2);
34 static inline uint_t do_hash(struct hashtable * htable, addr_t key) {
35 /* Aim to protect against poor hash functions by adding logic here
36 * - logic taken from java 1.4 hashtable source */
37 uint_t i = htable->hash_fn(key);
39 i ^= ((i >> 14) | (i << 18)); /* >>> */
41 i ^= ((i >> 10) | (i << 22)); /* >>> */
47 /* HASH AN UNSIGNED LONG */
48 /* LINUX UNSIGHED LONG HASH FUNCTION */
50 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
51 #define GOLDEN_RATIO_PRIME 0x9e370001UL
52 //#define BITS_PER_LONG 32
53 #elif defined(__64BIT__)
54 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
55 #define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
56 //#define BITS_PER_LONG 64
58 #error Define GOLDEN_RATIO_PRIME for your wordsize.
61 ulong_t palacios_hash_long(ulong_t val, uint_t bits) {
64 #ifdef __PALACIOS_64BIT__
65 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
80 /* On some cpus multiply is faster, on others gcc will do shifts */
81 hash *= GOLDEN_RATIO_PRIME;
84 /* High bits are more random, so use them. */
85 return hash >> (BITS_PER_LONG - bits);
88 /* HASH GENERIC MEMORY BUFFER */
89 /* ELF HEADER HASH FUNCTION */
90 ulong_t palacios_hash_buffer(uchar_t * msg, uint_t length) {
95 for (i = 0; i < length; i++) {
96 hash = (hash << 4) + *(msg + i) + i;
97 if ((temp = (hash & 0xF0000000))) {
106 static inline uint_t indexFor(uint_t table_length, uint_t hash_value) {
107 return (hash_value % table_length);
110 #define freekey(X) kfree(X)
113 static void * tmp_realloc(void * old_ptr, uint_t old_size, uint_t new_size) {
114 void * new_buf = kmalloc(new_size, GFP_KERNEL);
116 if (new_buf == NULL) {
120 memcpy(new_buf, old_ptr, old_size);
128 Credit for primes table: Aaron Krowne
129 http://br.endernet.org/~akrowne/
130 http://planetmath.org/encyclopedia/GoodHashTablePrimes.html
132 static const uint_t primes[] = {
134 769, 1543, 3079, 6151,
135 12289, 24593, 49157, 98317,
136 196613, 393241, 786433, 1572869,
137 3145739, 6291469, 12582917, 25165843,
138 50331653, 100663319, 201326611, 402653189,
139 805306457, 1610612741 };
142 // this assumes that the max load factor is .65
143 static const uint_t load_factors[] = {
145 500, 1003, 2002, 3999,
146 7988, 15986, 31953, 63907,
147 127799, 255607, 511182, 1022365,
148 2044731, 4089455, 8178897, 16357798,
149 32715575, 65431158, 130862298, 261724573,
150 523449198, 1046898282 };
152 const uint_t prime_table_len = sizeof(primes) / sizeof(primes[0]);
154 struct hashtable * palacios_create_htable(uint_t min_size,
155 uint_t (*hash_fn) (addr_t),
156 int (*eq_fn) (addr_t, addr_t)) {
157 struct hashtable * htable;
159 uint_t size = primes[0];
161 /* Check requested hashtable isn't too large */
162 if (min_size > (1u << 30)) {
166 /* Enforce size as prime */
167 for (prime_index = 0; prime_index < prime_table_len; prime_index++) {
168 if (primes[prime_index] > min_size) {
169 size = primes[prime_index];
174 htable = (struct hashtable *)kmalloc(sizeof(struct hashtable), GFP_KERNEL);
176 if (htable == NULL) {
180 htable->table = (struct hash_entry **)kmalloc(sizeof(struct hash_entry*) * size, GFP_KERNEL);
182 if (htable->table == NULL) {
187 memset(htable->table, 0, size * sizeof(struct hash_entry *));
189 htable->table_length = size;
190 htable->prime_index = prime_index;
191 htable->entry_count = 0;
192 htable->hash_fn = hash_fn;
193 htable->eq_fn = eq_fn;
194 htable->load_limit = load_factors[prime_index];
200 static int hashtable_expand(struct hashtable * htable) {
201 /* Double the size of the table to accomodate more entries */
202 struct hash_entry ** new_table;
203 struct hash_entry * tmp_entry;
204 struct hash_entry ** entry_ptr;
209 /* Check we're not hitting max capacity */
210 if (htable->prime_index == (prime_table_len - 1)) {
214 new_size = primes[++(htable->prime_index)];
216 new_table = (struct hash_entry **)kmalloc(sizeof(struct hash_entry*) * new_size, GFP_KERNEL);
218 if (new_table != NULL) {
219 memset(new_table, 0, new_size * sizeof(struct hash_entry *));
220 /* This algorithm is not 'stable'. ie. it reverses the list
221 * when it transfers entries between the tables */
223 for (i = 0; i < htable->table_length; i++) {
225 while ((tmp_entry = htable->table[i]) != NULL) {
226 htable->table[i] = tmp_entry->next;
228 index = indexFor(new_size, tmp_entry->hash);
230 tmp_entry->next = new_table[index];
232 new_table[index] = tmp_entry;
236 kfree(htable->table);
238 htable->table = new_table;
240 /* Plan B: realloc instead */
242 //new_table = (struct hash_entry **)realloc(htable->table, new_size * sizeof(struct hash_entry *));
243 new_table = (struct hash_entry **)tmp_realloc(htable->table, primes[htable->prime_index - 1],
244 new_size * sizeof(struct hash_entry *));
246 if (new_table == NULL) {
247 (htable->prime_index)--;
251 htable->table = new_table;
253 memset(new_table[htable->table_length], 0, new_size - htable->table_length);
255 for (i = 0; i < htable->table_length; i++) {
257 for (entry_ptr = &(new_table[i]), tmp_entry = *entry_ptr;
259 tmp_entry = *entry_ptr) {
261 index = indexFor(new_size, tmp_entry->hash);
264 entry_ptr = &(tmp_entry->next);
266 *entry_ptr = tmp_entry->next;
267 tmp_entry->next = new_table[index];
268 new_table[index] = tmp_entry;
274 htable->table_length = new_size;
276 htable->load_limit = load_factors[htable->prime_index];
281 uint_t palacios_htable_count(struct hashtable * htable) {
282 return htable->entry_count;
285 int palacios_htable_insert(struct hashtable * htable, addr_t key, addr_t value) {
286 /* This method allows duplicate keys - but they shouldn't be used */
288 struct hash_entry * new_entry;
290 if (++(htable->entry_count) > htable->load_limit) {
291 /* Ignore the return value. If expand fails, we should
292 * still try cramming just this value into the existing table
293 * -- we may not have memory for a larger table, but one more
294 * element may be ok. Next time we insert, we'll try expanding again.*/
295 hashtable_expand(htable);
298 new_entry = (struct hash_entry *)kmalloc(sizeof(struct hash_entry), GFP_KERNEL);
300 if (new_entry == NULL) {
301 (htable->entry_count)--;
305 new_entry->hash = do_hash(htable, key);
307 index = indexFor(htable->table_length, new_entry->hash);
309 new_entry->key = key;
310 new_entry->value = value;
312 new_entry->next = htable->table[index];
314 htable->table[index] = new_entry;
320 int palacios_htable_change(struct hashtable * htable, addr_t key, addr_t value, int free_value) {
321 struct hash_entry * tmp_entry;
325 hash_value = do_hash(htable, key);
327 index = indexFor(htable->table_length, hash_value);
329 tmp_entry = htable->table[index];
331 while (tmp_entry != NULL) {
332 /* Check hash value to short circuit heavier comparison */
333 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
336 kfree((void *)(tmp_entry->value));
339 tmp_entry->value = value;
342 tmp_entry = tmp_entry->next;
349 int palacios_htable_inc(struct hashtable * htable, addr_t key, addr_t value) {
350 struct hash_entry * tmp_entry;
354 hash_value = do_hash(htable, key);
356 index = indexFor(htable->table_length, hash_value);
358 tmp_entry = htable->table[index];
360 while (tmp_entry != NULL) {
361 /* Check hash value to short circuit heavier comparison */
362 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
364 tmp_entry->value += value;
367 tmp_entry = tmp_entry->next;
373 int palacios_htable_dec(struct hashtable * htable, addr_t key, addr_t value) {
374 struct hash_entry * tmp_entry;
378 hash_value = do_hash(htable, key);
380 index = indexFor(htable->table_length, hash_value);
382 tmp_entry = htable->table[index];
384 while (tmp_entry != NULL) {
385 /* Check hash value to short circuit heavier comparison */
386 if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
388 tmp_entry->value -= value;
391 tmp_entry = tmp_entry->next;
397 /* returns value associated with key */
398 addr_t palacios_htable_search(struct hashtable * htable, addr_t key) {
399 struct hash_entry * cursor;
403 hash_value = do_hash(htable, key);
405 index = indexFor(htable->table_length, hash_value);
407 cursor = htable->table[index];
409 while (cursor != NULL) {
410 /* Check hash value to short circuit heavier comparison */
411 if ((hash_value == cursor->hash) &&
412 (htable->eq_fn(key, cursor->key))) {
413 return cursor->value;
416 cursor = cursor->next;
423 /* returns value associated with key */
424 addr_t palacios_htable_remove(struct hashtable * htable, addr_t key, int free_key) {
425 /* TODO: consider compacting the table when the load factor drops enough,
426 * or provide a 'compact' method. */
428 struct hash_entry * cursor;
429 struct hash_entry ** entry_ptr;
434 hash_value = do_hash(htable, key);
436 index = indexFor(htable->table_length, hash_value);
438 entry_ptr = &(htable->table[index]);
441 while (cursor != NULL) {
442 /* Check hash value to short circuit heavier comparison */
443 if ((hash_value == cursor->hash) &&
444 (htable->eq_fn(key, cursor->key))) {
446 *entry_ptr = cursor->next;
447 htable->entry_count--;
448 value = cursor->value;
451 freekey((void *)(cursor->key));
458 entry_ptr = &(cursor->next);
459 cursor = cursor->next;
466 void palacios_free_htable(struct hashtable * htable, int free_values, int free_keys) {
468 struct hash_entry * cursor;
469 struct hash_entry * tmp;
470 struct hash_entry **table = htable->table;
473 for (i = 0; i < htable->table_length; i++) {
476 while (cursor != NULL) {
478 cursor = cursor->next;
481 freekey((void *)(tmp->key));
483 kfree((void *)(tmp->value));
488 for (i = 0; i < htable->table_length; i++) {
491 while (cursor != NULL) {
492 struct hash_entry * tmp;
495 cursor = cursor->next;
498 freekey((void *)(tmp->key));
505 kfree(htable->table);