2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 * Based on the following paper:
8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
10 * Code partially derived from nft_hash
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
23 #include <linux/hash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26 #include <linux/log2.h>
28 #define HASH_DEFAULT_SIZE 64UL
29 #define HASH_MIN_SIZE 4UL
31 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
33 #ifdef CONFIG_PROVE_LOCKING
34 int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
36 return ht->p.mutex_is_held();
38 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
42 * rht_obj - cast hash head to outer object
46 void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
48 return (void *) he - ht->p.head_offset;
50 EXPORT_SYMBOL_GPL(rht_obj);
52 static u32 __hashfn(const struct rhashtable *ht, const void *key,
57 h = ht->p.hashfn(key, len, ht->p.hash_rnd);
59 return h & (hsize - 1);
63 * rhashtable_hashfn - compute hash for key of given length
64 * @ht: hash table to compuate for
65 * @key: pointer to key
68 * Computes the hash value using the hash function provided in the 'hashfn'
69 * of struct rhashtable_params. The returned value is guaranteed to be
70 * smaller than the number of buckets in the hash table.
72 u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
74 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
76 return __hashfn(ht, key, len, tbl->size);
78 EXPORT_SYMBOL_GPL(rhashtable_hashfn);
80 static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
82 if (unlikely(!ht->p.key_len)) {
85 h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
87 return h & (hsize - 1);
90 return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
94 * rhashtable_obj_hashfn - compute hash for hashed object
95 * @ht: hash table to compuate for
96 * @ptr: pointer to hashed object
98 * Computes the hash value using the hash function `hashfn` respectively
99 * 'obj_hashfn' depending on whether the hash table is set up to work with
100 * a fixed length key. The returned value is guaranteed to be smaller than
101 * the number of buckets in the hash table.
103 u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
105 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
107 return obj_hashfn(ht, ptr, tbl->size);
109 EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
111 static u32 head_hashfn(const struct rhashtable *ht,
112 const struct rhash_head *he, u32 hsize)
114 return obj_hashfn(ht, rht_obj(ht, he), hsize);
117 static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
119 struct bucket_table *tbl;
122 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
123 tbl = kzalloc(size, flags);
130 tbl->size = nbuckets;
135 static void bucket_table_free(const struct bucket_table *tbl)
141 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
143 * @new_size: new table size
145 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
147 /* Expand table when exceeding 75% load */
148 return ht->nelems > (new_size / 4 * 3);
150 EXPORT_SYMBOL_GPL(rht_grow_above_75);
153 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
155 * @new_size: new table size
157 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
159 /* Shrink table beneath 30% load */
160 return ht->nelems < (new_size * 3 / 10);
162 EXPORT_SYMBOL_GPL(rht_shrink_below_30);
164 static void hashtable_chain_unzip(const struct rhashtable *ht,
165 const struct bucket_table *new_tbl,
166 struct bucket_table *old_tbl, size_t n)
168 struct rhash_head *he, *p, *next;
171 /* Old bucket empty, no work needed. */
172 p = rht_dereference(old_tbl->buckets[n], ht);
176 /* Advance the old bucket pointer one or more times until it
177 * reaches a node that doesn't hash to the same bucket as the
178 * previous node p. Call the previous node p;
180 h = head_hashfn(ht, p, new_tbl->size);
181 rht_for_each(he, p->next, ht) {
182 if (head_hashfn(ht, he, new_tbl->size) != h)
186 RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
188 /* Find the subsequent node which does hash to the same
189 * bucket as node P, or NULL if no such node exists.
193 rht_for_each(he, he->next, ht) {
194 if (head_hashfn(ht, he, new_tbl->size) == h) {
201 /* Set p's next pointer to that subsequent node pointer,
202 * bypassing the nodes which do not hash to p's bucket
204 RCU_INIT_POINTER(p->next, next);
208 * rhashtable_expand - Expand hash table while allowing concurrent lookups
209 * @ht: the hash table to expand
210 * @flags: allocation flags
212 * A secondary bucket array is allocated and the hash entries are migrated
213 * while keeping them on both lists until the end of the RCU grace period.
215 * This function may only be called in a context where it is safe to call
216 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
218 * The caller must ensure that no concurrent table mutations take place.
219 * It is however valid to have concurrent lookups if they are RCU protected.
221 int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
223 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
224 struct rhash_head *he;
228 ASSERT_RHT_MUTEX(ht);
230 if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
233 new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
239 /* For each new bucket, search the corresponding old bucket
240 * for the first entry that hashes to the new bucket, and
241 * link the new bucket to that entry. Since all the entries
242 * which will end up in the new bucket appear in the same
243 * old bucket, this constructs an entirely valid new hash
244 * table, but with multiple buckets "zipped" together into a
245 * single imprecise chain.
247 for (i = 0; i < new_tbl->size; i++) {
248 h = i & (old_tbl->size - 1);
249 rht_for_each(he, old_tbl->buckets[h], ht) {
250 if (head_hashfn(ht, he, new_tbl->size) == i) {
251 RCU_INIT_POINTER(new_tbl->buckets[i], he);
257 /* Publish the new table pointer. Lookups may now traverse
258 * the new table, but they will not benefit from any
259 * additional efficiency until later steps unzip the buckets.
261 rcu_assign_pointer(ht->tbl, new_tbl);
263 /* Unzip interleaved hash chains */
265 /* Wait for readers. All new readers will see the new
266 * table, and thus no references to the old table will
271 /* For each bucket in the old table (each of which
272 * contains items from multiple buckets of the new
276 for (i = 0; i < old_tbl->size; i++) {
277 hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
278 if (old_tbl->buckets[i] != NULL)
283 bucket_table_free(old_tbl);
286 EXPORT_SYMBOL_GPL(rhashtable_expand);
289 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
290 * @ht: the hash table to shrink
291 * @flags: allocation flags
293 * This function may only be called in a context where it is safe to call
294 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
296 * The caller must ensure that no concurrent table mutations take place.
297 * It is however valid to have concurrent lookups if they are RCU protected.
299 int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
301 struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
302 struct rhash_head __rcu **pprev;
305 ASSERT_RHT_MUTEX(ht);
307 if (tbl->size <= HASH_MIN_SIZE)
310 ntbl = bucket_table_alloc(tbl->size / 2, flags);
316 /* Link each bucket in the new table to the first bucket
317 * in the old table that contains entries which will hash
320 for (i = 0; i < ntbl->size; i++) {
321 ntbl->buckets[i] = tbl->buckets[i];
323 /* Link each bucket in the new table to the first bucket
324 * in the old table that contains entries which will hash
327 for (pprev = &ntbl->buckets[i]; *pprev != NULL;
328 pprev = &rht_dereference(*pprev, ht)->next)
330 RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
333 /* Publish the new, valid hash table */
334 rcu_assign_pointer(ht->tbl, ntbl);
336 /* Wait for readers. No new readers will have references to the
341 bucket_table_free(tbl);
345 EXPORT_SYMBOL_GPL(rhashtable_shrink);
348 * rhashtable_insert - insert object into hash hash table
350 * @obj: pointer to hash head inside object
351 * @flags: allocation flags (table expansion)
353 * Will automatically grow the table via rhashtable_expand() if the the
354 * grow_decision function specified at rhashtable_init() returns true.
356 * The caller must ensure that no concurrent table mutations occur. It is
357 * however valid to have concurrent lookups if they are RCU protected.
359 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
362 struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
365 ASSERT_RHT_MUTEX(ht);
367 hash = head_hashfn(ht, obj, tbl->size);
368 RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
369 rcu_assign_pointer(tbl->buckets[hash], obj);
372 if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
373 rhashtable_expand(ht, flags);
375 EXPORT_SYMBOL_GPL(rhashtable_insert);
378 * rhashtable_remove_pprev - remove object from hash table given previous element
380 * @obj: pointer to hash head inside object
381 * @pprev: pointer to previous element
382 * @flags: allocation flags (table expansion)
384 * Identical to rhashtable_remove() but caller is alreayd aware of the element
385 * in front of the element to be deleted. This is in particular useful for
386 * deletion when combined with walking or lookup.
388 void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
389 struct rhash_head **pprev, gfp_t flags)
391 struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
393 ASSERT_RHT_MUTEX(ht);
395 RCU_INIT_POINTER(*pprev, obj->next);
398 if (ht->p.shrink_decision &&
399 ht->p.shrink_decision(ht, tbl->size))
400 rhashtable_shrink(ht, flags);
402 EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
405 * rhashtable_remove - remove object from hash table
407 * @obj: pointer to hash head inside object
408 * @flags: allocation flags (table expansion)
410 * Since the hash chain is single linked, the removal operation needs to
411 * walk the bucket chain upon removal. The removal operation is thus
412 * considerable slow if the hash table is not correctly sized.
414 * Will automatically shrink the table via rhashtable_expand() if the the
415 * shrink_decision function specified at rhashtable_init() returns true.
417 * The caller must ensure that no concurrent table mutations occur. It is
418 * however valid to have concurrent lookups if they are RCU protected.
420 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
423 struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
424 struct rhash_head __rcu **pprev;
425 struct rhash_head *he;
428 ASSERT_RHT_MUTEX(ht);
430 h = head_hashfn(ht, obj, tbl->size);
432 pprev = &tbl->buckets[h];
433 rht_for_each(he, tbl->buckets[h], ht) {
439 rhashtable_remove_pprev(ht, he, pprev, flags);
445 EXPORT_SYMBOL_GPL(rhashtable_remove);
448 * rhashtable_lookup - lookup key in hash table
450 * @key: pointer to key
452 * Computes the hash value for the key and traverses the bucket chain looking
453 * for a entry with an identical key. The first matching entry is returned.
455 * This lookup function may only be used for fixed key hash table (key_len
456 * paramter set). It will BUG() if used inappropriately.
458 * Lookups may occur in parallel with hash mutations as long as the lookup is
459 * guarded by rcu_read_lock(). The caller must take care of this.
461 void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
463 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
464 struct rhash_head *he;
467 BUG_ON(!ht->p.key_len);
469 h = __hashfn(ht, key, ht->p.key_len, tbl->size);
470 rht_for_each_rcu(he, tbl->buckets[h], ht) {
471 if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
474 return (void *) he - ht->p.head_offset;
479 EXPORT_SYMBOL_GPL(rhashtable_lookup);
482 * rhashtable_lookup_compare - search hash table with compare function
484 * @hash: hash value of desired entry
485 * @compare: compare function, must return true on match
486 * @arg: argument passed on to compare function
488 * Traverses the bucket chain behind the provided hash value and calls the
489 * specified compare function for each entry.
491 * Lookups may occur in parallel with hash mutations as long as the lookup is
492 * guarded by rcu_read_lock(). The caller must take care of this.
494 * Returns the first entry on which the compare function returned true.
496 void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
497 bool (*compare)(void *, void *), void *arg)
499 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
500 struct rhash_head *he;
502 if (unlikely(hash >= tbl->size))
505 rht_for_each_rcu(he, tbl->buckets[hash], ht) {
506 if (!compare(rht_obj(ht, he), arg))
508 return (void *) he - ht->p.head_offset;
513 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
515 static size_t rounded_hashtable_size(unsigned int nelem)
517 return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE);
521 * rhashtable_init - initialize a new hash table
522 * @ht: hash table to be initialized
523 * @params: configuration parameters
525 * Initializes a new hash table based on the provided configuration
526 * parameters. A table can be configured either with a variable or
529 * Configuration Example 1: Fixed length keys
533 * struct rhash_head node;
536 * struct rhashtable_params params = {
537 * .head_offset = offsetof(struct test_obj, node),
538 * .key_offset = offsetof(struct test_obj, key),
539 * .key_len = sizeof(int),
540 * .hashfn = arch_fast_hash,
541 * .mutex_is_held = &my_mutex_is_held,
544 * Configuration Example 2: Variable length keys
547 * struct rhash_head node;
550 * u32 my_hash_fn(const void *data, u32 seed)
552 * struct test_obj *obj = data;
554 * return [... hash ...];
557 * struct rhashtable_params params = {
558 * .head_offset = offsetof(struct test_obj, node),
559 * .hashfn = arch_fast_hash,
560 * .obj_hashfn = my_hash_fn,
561 * .mutex_is_held = &my_mutex_is_held,
564 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
566 struct bucket_table *tbl;
569 size = HASH_DEFAULT_SIZE;
571 if ((params->key_len && !params->hashfn) ||
572 (!params->key_len && !params->obj_hashfn))
575 if (params->nelem_hint)
576 size = rounded_hashtable_size(params->nelem_hint);
578 tbl = bucket_table_alloc(size, GFP_KERNEL);
582 memset(ht, 0, sizeof(*ht));
583 ht->shift = ilog2(tbl->size);
584 memcpy(&ht->p, params, sizeof(*params));
585 RCU_INIT_POINTER(ht->tbl, tbl);
588 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
592 EXPORT_SYMBOL_GPL(rhashtable_init);
595 * rhashtable_destroy - destroy hash table
596 * @ht: the hash table to destroy
598 * Frees the bucket array.
600 void rhashtable_destroy(const struct rhashtable *ht)
602 const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
604 bucket_table_free(tbl);
606 EXPORT_SYMBOL_GPL(rhashtable_destroy);
608 /**************************************************************************
610 **************************************************************************/
612 #ifdef CONFIG_TEST_RHASHTABLE
614 #define TEST_HT_SIZE 8
615 #define TEST_ENTRIES 2048
616 #define TEST_PTR ((void *) 0xdeadbeef)
617 #define TEST_NEXPANDS 4
619 static int test_mutex_is_held(void)
627 struct rhash_head node;
630 static int __init test_rht_lookup(struct rhashtable *ht)
634 for (i = 0; i < TEST_ENTRIES * 2; i++) {
635 struct test_obj *obj;
636 bool expected = !(i % 2);
639 obj = rhashtable_lookup(ht, &key);
641 if (expected && !obj) {
642 pr_warn("Test failed: Could not find key %u\n", key);
644 } else if (!expected && obj) {
645 pr_warn("Test failed: Unexpected entry found for key %u\n",
648 } else if (expected && obj) {
649 if (obj->ptr != TEST_PTR || obj->value != i) {
650 pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
651 obj->ptr, TEST_PTR, obj->value, i);
660 static void test_bucket_stats(struct rhashtable *ht,
661 struct bucket_table *tbl,
664 unsigned int cnt, i, total = 0;
665 struct test_obj *obj;
667 for (i = 0; i < tbl->size; i++) {
671 pr_info(" [%#4x/%zu]", i, tbl->size);
673 rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
677 pr_cont(" [%p],", obj);
681 pr_cont("\n [%#x] first element: %p, chain length: %u\n",
682 i, tbl->buckets[i], cnt);
685 pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
686 total, ht->nelems, TEST_ENTRIES);
689 static int __init test_rhashtable(struct rhashtable *ht)
691 struct bucket_table *tbl;
692 struct test_obj *obj, *next;
698 * Insert TEST_ENTRIES into table with all keys even numbers
700 pr_info(" Adding %d keys\n", TEST_ENTRIES);
701 for (i = 0; i < TEST_ENTRIES; i++) {
702 struct test_obj *obj;
704 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
713 rhashtable_insert(ht, &obj->node, GFP_KERNEL);
717 tbl = rht_dereference_rcu(ht->tbl, ht);
718 test_bucket_stats(ht, tbl, true);
722 for (i = 0; i < TEST_NEXPANDS; i++) {
723 pr_info(" Table expansion iteration %u...\n", i);
724 rhashtable_expand(ht, GFP_KERNEL);
727 pr_info(" Verifying lookups...\n");
732 for (i = 0; i < TEST_NEXPANDS; i++) {
733 pr_info(" Table shrinkage iteration %u...\n", i);
734 rhashtable_shrink(ht, GFP_KERNEL);
737 pr_info(" Verifying lookups...\n");
742 pr_info(" Deleting %d keys\n", TEST_ENTRIES);
743 for (i = 0; i < TEST_ENTRIES; i++) {
746 obj = rhashtable_lookup(ht, &key);
749 rhashtable_remove(ht, &obj->node, GFP_KERNEL);
756 tbl = rht_dereference_rcu(ht->tbl, ht);
757 for (i = 0; i < tbl->size; i++)
758 rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
764 static int __init test_rht_init(void)
766 struct rhashtable ht;
767 struct rhashtable_params params = {
768 .nelem_hint = TEST_HT_SIZE,
769 .head_offset = offsetof(struct test_obj, node),
770 .key_offset = offsetof(struct test_obj, value),
771 .key_len = sizeof(int),
772 .hashfn = arch_fast_hash,
773 .mutex_is_held = &test_mutex_is_held,
774 .grow_decision = rht_grow_above_75,
775 .shrink_decision = rht_shrink_below_30,
779 pr_info("Running resizable hashtable tests...\n");
781 err = rhashtable_init(&ht, ¶ms);
783 pr_warn("Test failed: Unable to initialize hashtable: %d\n",
788 err = test_rhashtable(&ht);
790 rhashtable_destroy(&ht);
795 subsys_initcall(test_rht_init);
797 #endif /* CONFIG_TEST_RHASHTABLE */