2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 * Based on the following paper:
8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
10 * Code partially derived from nft_hash
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
27 #define HASH_DEFAULT_SIZE 64UL
28 #define HASH_MIN_SIZE 4UL
29 #define BUCKET_LOCKS_PER_CPU 128UL
31 /* Base bits plus 1 bit for nulls marker */
32 #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
39 /* The bucket lock is selected based on the hash and protects mutations
40 * on a group of hash buckets.
42 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
43 * a single lock always covers both buckets which may both contains
44 * entries which link to the same bucket of the old table during resizing.
45 * This allows to simplify the locking as locking the bucket in both
46 * tables during resize always guarantee protection.
48 * IMPORTANT: When holding the bucket lock of both the old and new table
49 * during expansions and shrinking, the old bucket lock must always be
52 static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
54 return &tbl->locks[hash & tbl->locks_mask];
57 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
59 return (void *) he - ht->p.head_offset;
62 static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
64 return hash & (tbl->size - 1);
67 static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
71 if (unlikely(!ht->p.key_len))
72 hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
74 hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
77 return hash >> HASH_RESERVED_SPACE;
80 static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
82 return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
85 static u32 head_hashfn(const struct rhashtable *ht,
86 const struct bucket_table *tbl,
87 const struct rhash_head *he)
89 return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
92 #ifdef CONFIG_PROVE_LOCKING
93 static void debug_dump_buckets(const struct rhashtable *ht,
94 const struct bucket_table *tbl)
96 struct rhash_head *he;
99 for (i = 0; i < tbl->size; i++) {
100 pr_warn(" [Bucket %d] ", i);
101 rht_for_each_rcu(he, tbl, i) {
102 hash = head_hashfn(ht, tbl, he);
103 pr_cont("[hash = %#x, lock = %p] ",
104 hash, bucket_lock(tbl, hash));
111 static void debug_dump_table(struct rhashtable *ht,
112 const struct bucket_table *tbl,
115 struct bucket_table *old_tbl, *future_tbl;
117 pr_emerg("BUG: lock for hash %#x in table %p not held\n",
121 future_tbl = rht_dereference_rcu(ht->future_tbl, ht);
122 old_tbl = rht_dereference_rcu(ht->tbl, ht);
123 if (future_tbl != old_tbl) {
124 pr_warn("Future table %p (size: %zd)\n",
125 future_tbl, future_tbl->size);
126 debug_dump_buckets(ht, future_tbl);
129 pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size);
130 debug_dump_buckets(ht, old_tbl);
135 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
136 #define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \
138 if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \
139 debug_dump_table(HT, TBL, HASH); \
144 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
146 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
148 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
150 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
152 spinlock_t *lock = bucket_lock(tbl, hash);
154 return (debug_locks) ? lockdep_is_held(lock) : 1;
156 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
158 #define ASSERT_RHT_MUTEX(HT)
159 #define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
163 static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
165 struct rhash_head __rcu **pprev;
167 for (pprev = &tbl->buckets[n];
168 !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
169 pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
175 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
177 unsigned int i, size;
178 #if defined(CONFIG_PROVE_LOCKING)
179 unsigned int nr_pcpus = 2;
181 unsigned int nr_pcpus = num_possible_cpus();
184 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
185 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
187 /* Never allocate more than 0.5 locks per bucket */
188 size = min_t(unsigned int, size, tbl->size >> 1);
190 if (sizeof(spinlock_t) != 0) {
192 if (size * sizeof(spinlock_t) > PAGE_SIZE)
193 tbl->locks = vmalloc(size * sizeof(spinlock_t));
196 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
200 for (i = 0; i < size; i++)
201 spin_lock_init(&tbl->locks[i]);
203 tbl->locks_mask = size - 1;
208 static void bucket_table_free(const struct bucket_table *tbl)
216 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
219 struct bucket_table *tbl;
223 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
224 tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
231 tbl->size = nbuckets;
233 if (alloc_bucket_locks(ht, tbl) < 0) {
234 bucket_table_free(tbl);
238 for (i = 0; i < nbuckets; i++)
239 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
245 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
247 * @new_size: new table size
249 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
251 /* Expand table when exceeding 75% load */
252 return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
253 (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
255 EXPORT_SYMBOL_GPL(rht_grow_above_75);
258 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
260 * @new_size: new table size
262 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
264 /* Shrink table beneath 30% load */
265 return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
266 (atomic_read(&ht->shift) > ht->p.min_shift);
268 EXPORT_SYMBOL_GPL(rht_shrink_below_30);
270 static void lock_buckets(struct bucket_table *new_tbl,
271 struct bucket_table *old_tbl, unsigned int hash)
272 __acquires(old_bucket_lock)
274 spin_lock_bh(bucket_lock(old_tbl, hash));
275 if (new_tbl != old_tbl)
276 spin_lock_bh_nested(bucket_lock(new_tbl, hash),
280 static void unlock_buckets(struct bucket_table *new_tbl,
281 struct bucket_table *old_tbl, unsigned int hash)
282 __releases(old_bucket_lock)
284 if (new_tbl != old_tbl)
285 spin_unlock_bh(bucket_lock(new_tbl, hash));
286 spin_unlock_bh(bucket_lock(old_tbl, hash));
290 * Unlink entries on bucket which hash to different bucket.
292 * Returns true if no more work needs to be performed on the bucket.
294 static bool hashtable_chain_unzip(struct rhashtable *ht,
295 const struct bucket_table *new_tbl,
296 struct bucket_table *old_tbl,
299 struct rhash_head *he, *p, *next;
300 unsigned int new_hash, new_hash2;
302 ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash);
304 /* Old bucket empty, no work needed. */
305 p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
307 if (rht_is_a_nulls(p))
310 new_hash = head_hashfn(ht, new_tbl, p);
311 ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
313 /* Advance the old bucket pointer one or more times until it
314 * reaches a node that doesn't hash to the same bucket as the
315 * previous node p. Call the previous node p;
317 rht_for_each_continue(he, p->next, old_tbl, old_hash) {
318 new_hash2 = head_hashfn(ht, new_tbl, he);
319 ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2);
321 if (new_hash != new_hash2)
325 rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
327 /* Find the subsequent node which does hash to the same
328 * bucket as node P, or NULL if no such node exists.
330 INIT_RHT_NULLS_HEAD(next, ht, old_hash);
331 if (!rht_is_a_nulls(he)) {
332 rht_for_each_continue(he, he->next, old_tbl, old_hash) {
333 if (head_hashfn(ht, new_tbl, he) == new_hash) {
340 /* Set p's next pointer to that subsequent node pointer,
341 * bypassing the nodes which do not hash to p's bucket
343 rcu_assign_pointer(p->next, next);
345 p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
348 return !rht_is_a_nulls(p);
351 static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
352 unsigned int new_hash, struct rhash_head *entry)
354 ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
356 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
360 * rhashtable_expand - Expand hash table while allowing concurrent lookups
361 * @ht: the hash table to expand
363 * A secondary bucket array is allocated and the hash entries are migrated
364 * while keeping them on both lists until the end of the RCU grace period.
366 * This function may only be called in a context where it is safe to call
367 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
369 * The caller must ensure that no concurrent resizing occurs by holding
372 * It is valid to have concurrent insertions and deletions protected by per
373 * bucket locks or concurrent RCU protected lookups and traversals.
375 int rhashtable_expand(struct rhashtable *ht)
377 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
378 struct rhash_head *he;
379 unsigned int new_hash, old_hash;
380 bool complete = false;
382 ASSERT_RHT_MUTEX(ht);
384 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
388 atomic_inc(&ht->shift);
390 /* Make insertions go into the new, empty table right away. Deletions
391 * and lookups will be attempted in both tables until we synchronize.
392 * The synchronize_rcu() guarantees for the new table to be picked up
393 * so no new additions go into the old table while we relink.
395 rcu_assign_pointer(ht->future_tbl, new_tbl);
398 /* For each new bucket, search the corresponding old bucket for the
399 * first entry that hashes to the new bucket, and link the end of
400 * newly formed bucket chain (containing entries added to future
401 * table) to that entry. Since all the entries which will end up in
402 * the new bucket appear in the same old bucket, this constructs an
403 * entirely valid new hash table, but with multiple buckets
404 * "zipped" together into a single imprecise chain.
406 for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
407 old_hash = rht_bucket_index(old_tbl, new_hash);
408 lock_buckets(new_tbl, old_tbl, new_hash);
409 rht_for_each(he, old_tbl, old_hash) {
410 if (head_hashfn(ht, new_tbl, he) == new_hash) {
411 link_old_to_new(ht, new_tbl, new_hash, he);
415 unlock_buckets(new_tbl, old_tbl, new_hash);
418 /* Unzip interleaved hash chains */
419 while (!complete && !ht->being_destroyed) {
420 /* Wait for readers. All new readers will see the new
421 * table, and thus no references to the old table will
426 /* For each bucket in the old table (each of which
427 * contains items from multiple buckets of the new
431 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
432 lock_buckets(new_tbl, old_tbl, old_hash);
434 if (hashtable_chain_unzip(ht, new_tbl, old_tbl,
438 unlock_buckets(new_tbl, old_tbl, old_hash);
442 rcu_assign_pointer(ht->tbl, new_tbl);
445 bucket_table_free(old_tbl);
448 EXPORT_SYMBOL_GPL(rhashtable_expand);
451 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
452 * @ht: the hash table to shrink
454 * This function may only be called in a context where it is safe to call
455 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
457 * The caller must ensure that no concurrent resizing occurs by holding
460 * The caller must ensure that no concurrent table mutations take place.
461 * It is however valid to have concurrent lookups if they are RCU protected.
463 * It is valid to have concurrent insertions and deletions protected by per
464 * bucket locks or concurrent RCU protected lookups and traversals.
466 int rhashtable_shrink(struct rhashtable *ht)
468 struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
469 unsigned int new_hash;
471 ASSERT_RHT_MUTEX(ht);
473 new_tbl = bucket_table_alloc(ht, tbl->size / 2);
477 rcu_assign_pointer(ht->future_tbl, new_tbl);
480 /* Link the first entry in the old bucket to the end of the
481 * bucket in the new table. As entries are concurrently being
482 * added to the new table, lock down the new bucket. As we
483 * always divide the size in half when shrinking, each bucket
484 * in the new table maps to exactly two buckets in the old
487 for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
488 lock_buckets(new_tbl, tbl, new_hash);
490 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
491 tbl->buckets[new_hash]);
492 ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size);
493 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
494 tbl->buckets[new_hash + new_tbl->size]);
496 unlock_buckets(new_tbl, tbl, new_hash);
499 /* Publish the new, valid hash table */
500 rcu_assign_pointer(ht->tbl, new_tbl);
501 atomic_dec(&ht->shift);
503 /* Wait for readers. No new readers will have references to the
508 bucket_table_free(tbl);
512 EXPORT_SYMBOL_GPL(rhashtable_shrink);
514 static void rht_deferred_worker(struct work_struct *work)
516 struct rhashtable *ht;
517 struct bucket_table *tbl;
518 struct rhashtable_walker *walker;
520 ht = container_of(work, struct rhashtable, run_work);
521 mutex_lock(&ht->mutex);
522 if (ht->being_destroyed)
525 tbl = rht_dereference(ht->tbl, ht);
527 list_for_each_entry(walker, &ht->walkers, list)
528 walker->resize = true;
530 if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
531 rhashtable_expand(ht);
532 else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
533 rhashtable_shrink(ht);
536 mutex_unlock(&ht->mutex);
539 static void rhashtable_wakeup_worker(struct rhashtable *ht)
541 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
542 struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
543 size_t size = tbl->size;
545 /* Only adjust the table if no resizing is currently in progress. */
546 if (tbl == new_tbl &&
547 ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
548 (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
549 schedule_work(&ht->run_work);
552 static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
553 struct bucket_table *tbl, u32 hash)
555 struct rhash_head *head;
557 hash = rht_bucket_index(tbl, hash);
558 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
560 ASSERT_BUCKET_LOCK(ht, tbl, hash);
562 if (rht_is_a_nulls(head))
563 INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
565 RCU_INIT_POINTER(obj->next, head);
567 rcu_assign_pointer(tbl->buckets[hash], obj);
569 atomic_inc(&ht->nelems);
571 rhashtable_wakeup_worker(ht);
575 * rhashtable_insert - insert object into hash table
577 * @obj: pointer to hash head inside object
579 * Will take a per bucket spinlock to protect against mutual mutations
580 * on the same bucket. Multiple insertions may occur in parallel unless
581 * they map to the same bucket lock.
583 * It is safe to call this function from atomic context.
585 * Will trigger an automatic deferred table resizing if the size grows
586 * beyond the watermark indicated by grow_decision() which can be passed
587 * to rhashtable_init().
589 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
591 struct bucket_table *tbl, *old_tbl;
596 tbl = rht_dereference_rcu(ht->future_tbl, ht);
597 old_tbl = rht_dereference_rcu(ht->tbl, ht);
598 hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
600 lock_buckets(tbl, old_tbl, hash);
601 __rhashtable_insert(ht, obj, tbl, hash);
602 unlock_buckets(tbl, old_tbl, hash);
606 EXPORT_SYMBOL_GPL(rhashtable_insert);
609 * rhashtable_remove - remove object from hash table
611 * @obj: pointer to hash head inside object
613 * Since the hash chain is single linked, the removal operation needs to
614 * walk the bucket chain upon removal. The removal operation is thus
615 * considerable slow if the hash table is not correctly sized.
617 * Will automatically shrink the table via rhashtable_expand() if the
618 * shrink_decision function specified at rhashtable_init() returns true.
620 * The caller must ensure that no concurrent table mutations occur. It is
621 * however valid to have concurrent lookups if they are RCU protected.
623 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
625 struct bucket_table *tbl, *new_tbl, *old_tbl;
626 struct rhash_head __rcu **pprev;
627 struct rhash_head *he, *he2;
628 unsigned int hash, new_hash;
632 old_tbl = rht_dereference_rcu(ht->tbl, ht);
633 tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
634 new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
636 lock_buckets(new_tbl, old_tbl, new_hash);
638 hash = rht_bucket_index(tbl, new_hash);
639 pprev = &tbl->buckets[hash];
640 rht_for_each(he, tbl, hash) {
646 ASSERT_BUCKET_LOCK(ht, tbl, hash);
648 if (old_tbl->size > new_tbl->size && tbl == old_tbl &&
649 !rht_is_a_nulls(obj->next) &&
650 head_hashfn(ht, tbl, obj->next) != hash) {
651 rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
652 } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) {
653 rht_for_each_continue(he2, obj->next, tbl, hash) {
654 if (head_hashfn(ht, tbl, he2) == hash) {
655 rcu_assign_pointer(*pprev, he2);
660 rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
662 rcu_assign_pointer(*pprev, obj->next);
670 /* The entry may be linked in either 'tbl', 'future_tbl', or both.
671 * 'future_tbl' only exists for a short period of time during
672 * resizing. Thus traversing both is fine and the added cost is
675 if (tbl != old_tbl) {
680 unlock_buckets(new_tbl, old_tbl, new_hash);
683 atomic_dec(&ht->nelems);
684 rhashtable_wakeup_worker(ht);
691 EXPORT_SYMBOL_GPL(rhashtable_remove);
693 struct rhashtable_compare_arg {
694 struct rhashtable *ht;
698 static bool rhashtable_compare(void *ptr, void *arg)
700 struct rhashtable_compare_arg *x = arg;
701 struct rhashtable *ht = x->ht;
703 return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
707 * rhashtable_lookup - lookup key in hash table
709 * @key: pointer to key
711 * Computes the hash value for the key and traverses the bucket chain looking
712 * for a entry with an identical key. The first matching entry is returned.
714 * This lookup function may only be used for fixed key hash table (key_len
715 * parameter set). It will BUG() if used inappropriately.
717 * Lookups may occur in parallel with hashtable mutations and resizing.
719 void *rhashtable_lookup(struct rhashtable *ht, const void *key)
721 struct rhashtable_compare_arg arg = {
726 BUG_ON(!ht->p.key_len);
728 return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
730 EXPORT_SYMBOL_GPL(rhashtable_lookup);
733 * rhashtable_lookup_compare - search hash table with compare function
735 * @key: the pointer to the key
736 * @compare: compare function, must return true on match
737 * @arg: argument passed on to compare function
739 * Traverses the bucket chain behind the provided hash value and calls the
740 * specified compare function for each entry.
742 * Lookups may occur in parallel with hashtable mutations and resizing.
744 * Returns the first entry on which the compare function returned true.
746 void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
747 bool (*compare)(void *, void *), void *arg)
749 const struct bucket_table *tbl, *old_tbl;
750 struct rhash_head *he;
755 old_tbl = rht_dereference_rcu(ht->tbl, ht);
756 tbl = rht_dereference_rcu(ht->future_tbl, ht);
757 hash = key_hashfn(ht, key, ht->p.key_len);
759 rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
760 if (!compare(rht_obj(ht, he), arg))
763 return rht_obj(ht, he);
766 if (unlikely(tbl != old_tbl)) {
774 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
777 * rhashtable_lookup_insert - lookup and insert object into hash table
779 * @obj: pointer to hash head inside object
781 * Locks down the bucket chain in both the old and new table if a resize
782 * is in progress to ensure that writers can't remove from the old table
783 * and can't insert to the new table during the atomic operation of search
784 * and insertion. Searches for duplicates in both the old and new table if
785 * a resize is in progress.
787 * This lookup function may only be used for fixed key hash table (key_len
788 * parameter set). It will BUG() if used inappropriately.
790 * It is safe to call this function from atomic context.
792 * Will trigger an automatic deferred table resizing if the size grows
793 * beyond the watermark indicated by grow_decision() which can be passed
794 * to rhashtable_init().
796 bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
798 struct rhashtable_compare_arg arg = {
800 .key = rht_obj(ht, obj) + ht->p.key_offset,
803 BUG_ON(!ht->p.key_len);
805 return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
808 EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
811 * rhashtable_lookup_compare_insert - search and insert object to hash table
812 * with compare function
814 * @obj: pointer to hash head inside object
815 * @compare: compare function, must return true on match
816 * @arg: argument passed on to compare function
818 * Locks down the bucket chain in both the old and new table if a resize
819 * is in progress to ensure that writers can't remove from the old table
820 * and can't insert to the new table during the atomic operation of search
821 * and insertion. Searches for duplicates in both the old and new table if
822 * a resize is in progress.
824 * Lookups may occur in parallel with hashtable mutations and resizing.
826 * Will trigger an automatic deferred table resizing if the size grows
827 * beyond the watermark indicated by grow_decision() which can be passed
828 * to rhashtable_init().
830 bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
831 struct rhash_head *obj,
832 bool (*compare)(void *, void *),
835 struct bucket_table *new_tbl, *old_tbl;
839 BUG_ON(!ht->p.key_len);
842 old_tbl = rht_dereference_rcu(ht->tbl, ht);
843 new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
844 new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
846 lock_buckets(new_tbl, old_tbl, new_hash);
848 if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
854 __rhashtable_insert(ht, obj, new_tbl, new_hash);
857 unlock_buckets(new_tbl, old_tbl, new_hash);
862 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
865 * rhashtable_walk_init - Initialise an iterator
866 * @ht: Table to walk over
867 * @iter: Hash table Iterator
869 * This function prepares a hash table walk.
871 * Note that if you restart a walk after rhashtable_walk_stop you
872 * may see the same object twice. Also, you may miss objects if
873 * there are removals in between rhashtable_walk_stop and the next
874 * call to rhashtable_walk_start.
876 * For a completely stable walk you should construct your own data
877 * structure outside the hash table.
879 * This function may sleep so you must not call it from interrupt
880 * context or with spin locks held.
882 * You must call rhashtable_walk_exit if this function returns
885 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
892 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
896 mutex_lock(&ht->mutex);
897 list_add(&iter->walker->list, &ht->walkers);
898 mutex_unlock(&ht->mutex);
902 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
905 * rhashtable_walk_exit - Free an iterator
906 * @iter: Hash table Iterator
908 * This function frees resources allocated by rhashtable_walk_init.
910 void rhashtable_walk_exit(struct rhashtable_iter *iter)
912 mutex_lock(&iter->ht->mutex);
913 list_del(&iter->walker->list);
914 mutex_unlock(&iter->ht->mutex);
917 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
920 * rhashtable_walk_start - Start a hash table walk
921 * @iter: Hash table iterator
923 * Start a hash table walk. Note that we take the RCU lock in all
924 * cases including when we return an error. So you must always call
925 * rhashtable_walk_stop to clean up.
927 * Returns zero if successful.
929 * Returns -EAGAIN if resize event occured. Note that the iterator
930 * will rewind back to the beginning and you may use it immediately
931 * by calling rhashtable_walk_next.
933 int rhashtable_walk_start(struct rhashtable_iter *iter)
937 if (iter->walker->resize) {
940 iter->walker->resize = false;
946 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
949 * rhashtable_walk_next - Return the next object and advance the iterator
950 * @iter: Hash table iterator
952 * Note that you must call rhashtable_walk_stop when you are finished
955 * Returns the next object or NULL when the end of the table is reached.
957 * Returns -EAGAIN if resize event occured. Note that the iterator
958 * will rewind back to the beginning and you may continue to use it.
960 void *rhashtable_walk_next(struct rhashtable_iter *iter)
962 const struct bucket_table *tbl;
963 struct rhashtable *ht = iter->ht;
964 struct rhash_head *p = iter->p;
967 tbl = rht_dereference_rcu(ht->tbl, ht);
970 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
974 for (; iter->slot < tbl->size; iter->slot++) {
975 int skip = iter->skip;
977 rht_for_each_rcu(p, tbl, iter->slot) {
984 if (!rht_is_a_nulls(p)) {
987 obj = rht_obj(ht, p);
997 if (iter->walker->resize) {
1001 iter->walker->resize = false;
1002 return ERR_PTR(-EAGAIN);
1007 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
1010 * rhashtable_walk_stop - Finish a hash table walk
1011 * @iter: Hash table iterator
1013 * Finish a hash table walk.
1015 void rhashtable_walk_stop(struct rhashtable_iter *iter)
1020 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
1022 static size_t rounded_hashtable_size(struct rhashtable_params *params)
1024 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
1025 1UL << params->min_shift);
1029 * rhashtable_init - initialize a new hash table
1030 * @ht: hash table to be initialized
1031 * @params: configuration parameters
1033 * Initializes a new hash table based on the provided configuration
1034 * parameters. A table can be configured either with a variable or
1037 * Configuration Example 1: Fixed length keys
1041 * struct rhash_head node;
1044 * struct rhashtable_params params = {
1045 * .head_offset = offsetof(struct test_obj, node),
1046 * .key_offset = offsetof(struct test_obj, key),
1047 * .key_len = sizeof(int),
1049 * .nulls_base = (1U << RHT_BASE_SHIFT),
1052 * Configuration Example 2: Variable length keys
1055 * struct rhash_head node;
1058 * u32 my_hash_fn(const void *data, u32 seed)
1060 * struct test_obj *obj = data;
1062 * return [... hash ...];
1065 * struct rhashtable_params params = {
1066 * .head_offset = offsetof(struct test_obj, node),
1068 * .obj_hashfn = my_hash_fn,
1071 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
1073 struct bucket_table *tbl;
1076 size = HASH_DEFAULT_SIZE;
1078 if ((params->key_len && !params->hashfn) ||
1079 (!params->key_len && !params->obj_hashfn))
1082 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
1085 params->min_shift = max_t(size_t, params->min_shift,
1086 ilog2(HASH_MIN_SIZE));
1088 if (params->nelem_hint)
1089 size = rounded_hashtable_size(params);
1091 memset(ht, 0, sizeof(*ht));
1092 mutex_init(&ht->mutex);
1093 memcpy(&ht->p, params, sizeof(*params));
1094 INIT_LIST_HEAD(&ht->walkers);
1096 if (params->locks_mul)
1097 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1099 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1101 tbl = bucket_table_alloc(ht, size);
1105 atomic_set(&ht->nelems, 0);
1106 atomic_set(&ht->shift, ilog2(tbl->size));
1107 RCU_INIT_POINTER(ht->tbl, tbl);
1108 RCU_INIT_POINTER(ht->future_tbl, tbl);
1110 if (!ht->p.hash_rnd)
1111 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
1113 if (ht->p.grow_decision || ht->p.shrink_decision)
1114 INIT_WORK(&ht->run_work, rht_deferred_worker);
1118 EXPORT_SYMBOL_GPL(rhashtable_init);
1121 * rhashtable_destroy - destroy hash table
1122 * @ht: the hash table to destroy
1124 * Frees the bucket array. This function is not rcu safe, therefore the caller
1125 * has to make sure that no resizing may happen by unpublishing the hashtable
1126 * and waiting for the quiescent cycle before releasing the bucket array.
1128 void rhashtable_destroy(struct rhashtable *ht)
1130 ht->being_destroyed = true;
1132 if (ht->p.grow_decision || ht->p.shrink_decision)
1133 cancel_work_sync(&ht->run_work);
1135 mutex_lock(&ht->mutex);
1136 bucket_table_free(rht_dereference(ht->tbl, ht));
1137 mutex_unlock(&ht->mutex);
1139 EXPORT_SYMBOL_GPL(rhashtable_destroy);