batman-adv: checkpatch - Please use a blank line after declarations
[firefly-linux-kernel-4.4.55.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6  *
7  * Based on the following paper:
8  * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9  *
10  * Code partially derived from nft_hash
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26
27 #define HASH_DEFAULT_SIZE       64UL
28 #define HASH_MIN_SIZE           4UL
29 #define BUCKET_LOCKS_PER_CPU   128UL
30
31 /* Base bits plus 1 bit for nulls marker */
32 #define HASH_RESERVED_SPACE     (RHT_BASE_BITS + 1)
33
34 enum {
35         RHT_LOCK_NORMAL,
36         RHT_LOCK_NESTED,
37         RHT_LOCK_NESTED2,
38 };
39
40 /* The bucket lock is selected based on the hash and protects mutations
41  * on a group of hash buckets.
42  *
43  * IMPORTANT: When holding the bucket lock of both the old and new table
44  * during expansions and shrinking, the old bucket lock must always be
45  * acquired first.
46  */
47 static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
48 {
49         return &tbl->locks[hash & tbl->locks_mask];
50 }
51
52 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
53 #define ASSERT_BUCKET_LOCK(TBL, HASH) \
54         BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH))
55
56 #ifdef CONFIG_PROVE_LOCKING
57 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
58 {
59         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
60 }
61 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
62
63 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
64 {
65         spinlock_t *lock = bucket_lock(tbl, hash);
66
67         return (debug_locks) ? lockdep_is_held(lock) : 1;
68 }
69 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
70 #endif
71
72 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
73 {
74         return (void *) he - ht->p.head_offset;
75 }
76
77 static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
78 {
79         return hash & (tbl->size - 1);
80 }
81
82 static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
83 {
84         u32 hash;
85
86         if (unlikely(!ht->p.key_len))
87                 hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
88         else
89                 hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
90                                     ht->p.hash_rnd);
91
92         return hash >> HASH_RESERVED_SPACE;
93 }
94
95 static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
96 {
97         struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
98         u32 hash;
99
100         hash = ht->p.hashfn(key, len, ht->p.hash_rnd);
101         hash >>= HASH_RESERVED_SPACE;
102
103         return rht_bucket_index(tbl, hash);
104 }
105
106 static u32 head_hashfn(const struct rhashtable *ht,
107                        const struct bucket_table *tbl,
108                        const struct rhash_head *he)
109 {
110         return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
111 }
112
113 static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
114 {
115         struct rhash_head __rcu **pprev;
116
117         for (pprev = &tbl->buckets[n];
118              !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
119              pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
120                 ;
121
122         return pprev;
123 }
124
125 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
126 {
127         unsigned int i, size;
128 #if defined(CONFIG_PROVE_LOCKING)
129         unsigned int nr_pcpus = 2;
130 #else
131         unsigned int nr_pcpus = num_possible_cpus();
132 #endif
133
134         nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
135         size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
136
137         /* Never allocate more than one lock per bucket */
138         size = min_t(unsigned int, size, tbl->size);
139
140         if (sizeof(spinlock_t) != 0) {
141 #ifdef CONFIG_NUMA
142                 if (size * sizeof(spinlock_t) > PAGE_SIZE)
143                         tbl->locks = vmalloc(size * sizeof(spinlock_t));
144                 else
145 #endif
146                 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
147                                            GFP_KERNEL);
148                 if (!tbl->locks)
149                         return -ENOMEM;
150                 for (i = 0; i < size; i++)
151                         spin_lock_init(&tbl->locks[i]);
152         }
153         tbl->locks_mask = size - 1;
154
155         return 0;
156 }
157
158 static void bucket_table_free(const struct bucket_table *tbl)
159 {
160         if (tbl)
161                 kvfree(tbl->locks);
162
163         kvfree(tbl);
164 }
165
166 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
167                                                size_t nbuckets)
168 {
169         struct bucket_table *tbl;
170         size_t size;
171         int i;
172
173         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
174         tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
175         if (tbl == NULL)
176                 tbl = vzalloc(size);
177
178         if (tbl == NULL)
179                 return NULL;
180
181         tbl->size = nbuckets;
182
183         if (alloc_bucket_locks(ht, tbl) < 0) {
184                 bucket_table_free(tbl);
185                 return NULL;
186         }
187
188         for (i = 0; i < nbuckets; i++)
189                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
190
191         return tbl;
192 }
193
194 /**
195  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
196  * @ht:         hash table
197  * @new_size:   new table size
198  */
199 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
200 {
201         /* Expand table when exceeding 75% load */
202         return atomic_read(&ht->nelems) > (new_size / 4 * 3);
203 }
204 EXPORT_SYMBOL_GPL(rht_grow_above_75);
205
206 /**
207  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
208  * @ht:         hash table
209  * @new_size:   new table size
210  */
211 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
212 {
213         /* Shrink table beneath 30% load */
214         return atomic_read(&ht->nelems) < (new_size * 3 / 10);
215 }
216 EXPORT_SYMBOL_GPL(rht_shrink_below_30);
217
218 static void hashtable_chain_unzip(const struct rhashtable *ht,
219                                   const struct bucket_table *new_tbl,
220                                   struct bucket_table *old_tbl,
221                                   size_t old_hash)
222 {
223         struct rhash_head *he, *p, *next;
224         spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL;
225         unsigned int new_hash, new_hash2;
226
227         ASSERT_BUCKET_LOCK(old_tbl, old_hash);
228
229         /* Old bucket empty, no work needed. */
230         p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
231                                    old_hash);
232         if (rht_is_a_nulls(p))
233                 return;
234
235         new_hash = new_hash2 = head_hashfn(ht, new_tbl, p);
236         new_bucket_lock = bucket_lock(new_tbl, new_hash);
237
238         /* Advance the old bucket pointer one or more times until it
239          * reaches a node that doesn't hash to the same bucket as the
240          * previous node p. Call the previous node p;
241          */
242         rht_for_each_continue(he, p->next, old_tbl, old_hash) {
243                 new_hash2 = head_hashfn(ht, new_tbl, he);
244                 if (new_hash != new_hash2)
245                         break;
246                 p = he;
247         }
248         rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
249
250         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
251
252         /* If we have encountered an entry that maps to a different bucket in
253          * the new table, lock down that bucket as well as we might cut off
254          * the end of the chain.
255          */
256         new_bucket_lock2 = bucket_lock(new_tbl, new_hash);
257         if (new_bucket_lock != new_bucket_lock2)
258                 spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2);
259
260         /* Find the subsequent node which does hash to the same
261          * bucket as node P, or NULL if no such node exists.
262          */
263         INIT_RHT_NULLS_HEAD(next, ht, old_hash);
264         if (!rht_is_a_nulls(he)) {
265                 rht_for_each_continue(he, he->next, old_tbl, old_hash) {
266                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
267                                 next = he;
268                                 break;
269                         }
270                 }
271         }
272
273         /* Set p's next pointer to that subsequent node pointer,
274          * bypassing the nodes which do not hash to p's bucket
275          */
276         rcu_assign_pointer(p->next, next);
277
278         if (new_bucket_lock != new_bucket_lock2)
279                 spin_unlock_bh(new_bucket_lock2);
280         spin_unlock_bh(new_bucket_lock);
281 }
282
283 static void link_old_to_new(struct bucket_table *new_tbl,
284                             unsigned int new_hash, struct rhash_head *entry)
285 {
286         spinlock_t *new_bucket_lock;
287
288         new_bucket_lock = bucket_lock(new_tbl, new_hash);
289
290         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
291         rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
292         spin_unlock_bh(new_bucket_lock);
293 }
294
295 /**
296  * rhashtable_expand - Expand hash table while allowing concurrent lookups
297  * @ht:         the hash table to expand
298  *
299  * A secondary bucket array is allocated and the hash entries are migrated
300  * while keeping them on both lists until the end of the RCU grace period.
301  *
302  * This function may only be called in a context where it is safe to call
303  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
304  *
305  * The caller must ensure that no concurrent resizing occurs by holding
306  * ht->mutex.
307  *
308  * It is valid to have concurrent insertions and deletions protected by per
309  * bucket locks or concurrent RCU protected lookups and traversals.
310  */
311 int rhashtable_expand(struct rhashtable *ht)
312 {
313         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
314         struct rhash_head *he;
315         spinlock_t *old_bucket_lock;
316         unsigned int new_hash, old_hash;
317         bool complete = false;
318
319         ASSERT_RHT_MUTEX(ht);
320
321         if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
322                 return 0;
323
324         new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
325         if (new_tbl == NULL)
326                 return -ENOMEM;
327
328         ht->shift++;
329
330         /* Make insertions go into the new, empty table right away. Deletions
331          * and lookups will be attempted in both tables until we synchronize.
332          * The synchronize_rcu() guarantees for the new table to be picked up
333          * so no new additions go into the old table while we relink.
334          */
335         rcu_assign_pointer(ht->future_tbl, new_tbl);
336         synchronize_rcu();
337
338         /* For each new bucket, search the corresponding old bucket for the
339          * first entry that hashes to the new bucket, and link the end of
340          * newly formed bucket chain (containing entries added to future
341          * table) to that entry. Since all the entries which will end up in
342          * the new bucket appear in the same old bucket, this constructs an
343          * entirely valid new hash table, but with multiple buckets
344          * "zipped" together into a single imprecise chain.
345          */
346         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
347                 old_hash = rht_bucket_index(old_tbl, new_hash);
348                 old_bucket_lock = bucket_lock(old_tbl, old_hash);
349
350                 spin_lock_bh(old_bucket_lock);
351                 rht_for_each(he, old_tbl, old_hash) {
352                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
353                                 link_old_to_new(new_tbl, new_hash, he);
354                                 break;
355                         }
356                 }
357                 spin_unlock_bh(old_bucket_lock);
358         }
359
360         /* Publish the new table pointer. Lookups may now traverse
361          * the new table, but they will not benefit from any
362          * additional efficiency until later steps unzip the buckets.
363          */
364         rcu_assign_pointer(ht->tbl, new_tbl);
365
366         /* Unzip interleaved hash chains */
367         while (!complete && !ht->being_destroyed) {
368                 /* Wait for readers. All new readers will see the new
369                  * table, and thus no references to the old table will
370                  * remain.
371                  */
372                 synchronize_rcu();
373
374                 /* For each bucket in the old table (each of which
375                  * contains items from multiple buckets of the new
376                  * table): ...
377                  */
378                 complete = true;
379                 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
380                         struct rhash_head *head;
381
382                         old_bucket_lock = bucket_lock(old_tbl, old_hash);
383                         spin_lock_bh(old_bucket_lock);
384
385                         hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash);
386                         head = rht_dereference_bucket(old_tbl->buckets[old_hash],
387                                                       old_tbl, old_hash);
388                         if (!rht_is_a_nulls(head))
389                                 complete = false;
390
391                         spin_unlock_bh(old_bucket_lock);
392                 }
393         }
394
395         bucket_table_free(old_tbl);
396         return 0;
397 }
398 EXPORT_SYMBOL_GPL(rhashtable_expand);
399
400 /**
401  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
402  * @ht:         the hash table to shrink
403  *
404  * This function may only be called in a context where it is safe to call
405  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
406  *
407  * The caller must ensure that no concurrent resizing occurs by holding
408  * ht->mutex.
409  *
410  * The caller must ensure that no concurrent table mutations take place.
411  * It is however valid to have concurrent lookups if they are RCU protected.
412  *
413  * It is valid to have concurrent insertions and deletions protected by per
414  * bucket locks or concurrent RCU protected lookups and traversals.
415  */
416 int rhashtable_shrink(struct rhashtable *ht)
417 {
418         struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
419         spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2;
420         unsigned int new_hash;
421
422         ASSERT_RHT_MUTEX(ht);
423
424         if (ht->shift <= ht->p.min_shift)
425                 return 0;
426
427         new_tbl = bucket_table_alloc(ht, tbl->size / 2);
428         if (new_tbl == NULL)
429                 return -ENOMEM;
430
431         rcu_assign_pointer(ht->future_tbl, new_tbl);
432         synchronize_rcu();
433
434         /* Link the first entry in the old bucket to the end of the
435          * bucket in the new table. As entries are concurrently being
436          * added to the new table, lock down the new bucket. As we
437          * always divide the size in half when shrinking, each bucket
438          * in the new table maps to exactly two buckets in the old
439          * table.
440          *
441          * As removals can occur concurrently on the old table, we need
442          * to lock down both matching buckets in the old table.
443          */
444         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
445                 old_bucket_lock1 = bucket_lock(tbl, new_hash);
446                 old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size);
447                 new_bucket_lock = bucket_lock(new_tbl, new_hash);
448
449                 spin_lock_bh(old_bucket_lock1);
450                 spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
451                 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
452
453                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
454                                    tbl->buckets[new_hash]);
455                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
456                                    tbl->buckets[new_hash + new_tbl->size]);
457
458                 spin_unlock_bh(new_bucket_lock);
459                 spin_unlock_bh(old_bucket_lock2);
460                 spin_unlock_bh(old_bucket_lock1);
461         }
462
463         /* Publish the new, valid hash table */
464         rcu_assign_pointer(ht->tbl, new_tbl);
465         ht->shift--;
466
467         /* Wait for readers. No new readers will have references to the
468          * old hash table.
469          */
470         synchronize_rcu();
471
472         bucket_table_free(tbl);
473
474         return 0;
475 }
476 EXPORT_SYMBOL_GPL(rhashtable_shrink);
477
478 static void rht_deferred_worker(struct work_struct *work)
479 {
480         struct rhashtable *ht;
481         struct bucket_table *tbl;
482
483         ht = container_of(work, struct rhashtable, run_work.work);
484         mutex_lock(&ht->mutex);
485         tbl = rht_dereference(ht->tbl, ht);
486
487         if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
488                 rhashtable_expand(ht);
489         else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
490                 rhashtable_shrink(ht);
491
492         mutex_unlock(&ht->mutex);
493 }
494
495 /**
496  * rhashtable_insert - insert object into hash hash table
497  * @ht:         hash table
498  * @obj:        pointer to hash head inside object
499  *
500  * Will take a per bucket spinlock to protect against mutual mutations
501  * on the same bucket. Multiple insertions may occur in parallel unless
502  * they map to the same bucket lock.
503  *
504  * It is safe to call this function from atomic context.
505  *
506  * Will trigger an automatic deferred table resizing if the size grows
507  * beyond the watermark indicated by grow_decision() which can be passed
508  * to rhashtable_init().
509  */
510 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
511 {
512         struct bucket_table *tbl;
513         struct rhash_head *head;
514         spinlock_t *lock;
515         unsigned hash;
516
517         rcu_read_lock();
518
519         tbl = rht_dereference_rcu(ht->future_tbl, ht);
520         hash = head_hashfn(ht, tbl, obj);
521         lock = bucket_lock(tbl, hash);
522
523         spin_lock_bh(lock);
524         head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
525         if (rht_is_a_nulls(head))
526                 INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
527         else
528                 RCU_INIT_POINTER(obj->next, head);
529
530         rcu_assign_pointer(tbl->buckets[hash], obj);
531         spin_unlock_bh(lock);
532
533         atomic_inc(&ht->nelems);
534
535         /* Only grow the table if no resizing is currently in progress. */
536         if (ht->tbl != ht->future_tbl &&
537             ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
538                 schedule_delayed_work(&ht->run_work, 0);
539
540         rcu_read_unlock();
541 }
542 EXPORT_SYMBOL_GPL(rhashtable_insert);
543
544 /**
545  * rhashtable_remove - remove object from hash table
546  * @ht:         hash table
547  * @obj:        pointer to hash head inside object
548  *
549  * Since the hash chain is single linked, the removal operation needs to
550  * walk the bucket chain upon removal. The removal operation is thus
551  * considerable slow if the hash table is not correctly sized.
552  *
553  * Will automatically shrink the table via rhashtable_expand() if the the
554  * shrink_decision function specified at rhashtable_init() returns true.
555  *
556  * The caller must ensure that no concurrent table mutations occur. It is
557  * however valid to have concurrent lookups if they are RCU protected.
558  */
559 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
560 {
561         struct bucket_table *tbl;
562         struct rhash_head __rcu **pprev;
563         struct rhash_head *he;
564         spinlock_t *lock;
565         unsigned int hash;
566
567         rcu_read_lock();
568         tbl = rht_dereference_rcu(ht->tbl, ht);
569         hash = head_hashfn(ht, tbl, obj);
570
571         lock = bucket_lock(tbl, hash);
572         spin_lock_bh(lock);
573
574 restart:
575         pprev = &tbl->buckets[hash];
576         rht_for_each(he, tbl, hash) {
577                 if (he != obj) {
578                         pprev = &he->next;
579                         continue;
580                 }
581
582                 rcu_assign_pointer(*pprev, obj->next);
583                 atomic_dec(&ht->nelems);
584
585                 spin_unlock_bh(lock);
586
587                 if (ht->tbl != ht->future_tbl &&
588                     ht->p.shrink_decision &&
589                     ht->p.shrink_decision(ht, tbl->size))
590                         schedule_delayed_work(&ht->run_work, 0);
591
592                 rcu_read_unlock();
593
594                 return true;
595         }
596
597         if (tbl != rht_dereference_rcu(ht->tbl, ht)) {
598                 spin_unlock_bh(lock);
599
600                 tbl = rht_dereference_rcu(ht->tbl, ht);
601                 hash = head_hashfn(ht, tbl, obj);
602
603                 lock = bucket_lock(tbl, hash);
604                 spin_lock_bh(lock);
605                 goto restart;
606         }
607
608         spin_unlock_bh(lock);
609         rcu_read_unlock();
610
611         return false;
612 }
613 EXPORT_SYMBOL_GPL(rhashtable_remove);
614
615 /**
616  * rhashtable_lookup - lookup key in hash table
617  * @ht:         hash table
618  * @key:        pointer to key
619  *
620  * Computes the hash value for the key and traverses the bucket chain looking
621  * for a entry with an identical key. The first matching entry is returned.
622  *
623  * This lookup function may only be used for fixed key hash table (key_len
624  * paramter set). It will BUG() if used inappropriately.
625  *
626  * Lookups may occur in parallel with hashtable mutations and resizing.
627  */
628 void *rhashtable_lookup(struct rhashtable *ht, const void *key)
629 {
630         const struct bucket_table *tbl, *old_tbl;
631         struct rhash_head *he;
632         u32 hash;
633
634         BUG_ON(!ht->p.key_len);
635
636         rcu_read_lock();
637         old_tbl = rht_dereference_rcu(ht->tbl, ht);
638         tbl = rht_dereference_rcu(ht->future_tbl, ht);
639         hash = key_hashfn(ht, key, ht->p.key_len);
640 restart:
641         rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
642                 if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
643                            ht->p.key_len))
644                         continue;
645                 rcu_read_unlock();
646                 return rht_obj(ht, he);
647         }
648
649         if (unlikely(tbl != old_tbl)) {
650                 tbl = old_tbl;
651                 goto restart;
652         }
653
654         rcu_read_unlock();
655         return NULL;
656 }
657 EXPORT_SYMBOL_GPL(rhashtable_lookup);
658
659 /**
660  * rhashtable_lookup_compare - search hash table with compare function
661  * @ht:         hash table
662  * @key:        the pointer to the key
663  * @compare:    compare function, must return true on match
664  * @arg:        argument passed on to compare function
665  *
666  * Traverses the bucket chain behind the provided hash value and calls the
667  * specified compare function for each entry.
668  *
669  * Lookups may occur in parallel with hashtable mutations and resizing.
670  *
671  * Returns the first entry on which the compare function returned true.
672  */
673 void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
674                                 bool (*compare)(void *, void *), void *arg)
675 {
676         const struct bucket_table *tbl, *old_tbl;
677         struct rhash_head *he;
678         u32 hash;
679
680         rcu_read_lock();
681
682         old_tbl = rht_dereference_rcu(ht->tbl, ht);
683         tbl = rht_dereference_rcu(ht->future_tbl, ht);
684         hash = key_hashfn(ht, key, ht->p.key_len);
685 restart:
686         rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
687                 if (!compare(rht_obj(ht, he), arg))
688                         continue;
689                 rcu_read_unlock();
690                 return rht_obj(ht, he);
691         }
692
693         if (unlikely(tbl != old_tbl)) {
694                 tbl = old_tbl;
695                 goto restart;
696         }
697         rcu_read_unlock();
698
699         return NULL;
700 }
701 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
702
703 static size_t rounded_hashtable_size(struct rhashtable_params *params)
704 {
705         return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
706                    1UL << params->min_shift);
707 }
708
709 /**
710  * rhashtable_init - initialize a new hash table
711  * @ht:         hash table to be initialized
712  * @params:     configuration parameters
713  *
714  * Initializes a new hash table based on the provided configuration
715  * parameters. A table can be configured either with a variable or
716  * fixed length key:
717  *
718  * Configuration Example 1: Fixed length keys
719  * struct test_obj {
720  *      int                     key;
721  *      void *                  my_member;
722  *      struct rhash_head       node;
723  * };
724  *
725  * struct rhashtable_params params = {
726  *      .head_offset = offsetof(struct test_obj, node),
727  *      .key_offset = offsetof(struct test_obj, key),
728  *      .key_len = sizeof(int),
729  *      .hashfn = jhash,
730  *      .nulls_base = (1U << RHT_BASE_SHIFT),
731  * };
732  *
733  * Configuration Example 2: Variable length keys
734  * struct test_obj {
735  *      [...]
736  *      struct rhash_head       node;
737  * };
738  *
739  * u32 my_hash_fn(const void *data, u32 seed)
740  * {
741  *      struct test_obj *obj = data;
742  *
743  *      return [... hash ...];
744  * }
745  *
746  * struct rhashtable_params params = {
747  *      .head_offset = offsetof(struct test_obj, node),
748  *      .hashfn = jhash,
749  *      .obj_hashfn = my_hash_fn,
750  * };
751  */
752 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
753 {
754         struct bucket_table *tbl;
755         size_t size;
756
757         size = HASH_DEFAULT_SIZE;
758
759         if ((params->key_len && !params->hashfn) ||
760             (!params->key_len && !params->obj_hashfn))
761                 return -EINVAL;
762
763         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
764                 return -EINVAL;
765
766         params->min_shift = max_t(size_t, params->min_shift,
767                                   ilog2(HASH_MIN_SIZE));
768
769         if (params->nelem_hint)
770                 size = rounded_hashtable_size(params);
771
772         memset(ht, 0, sizeof(*ht));
773         mutex_init(&ht->mutex);
774         memcpy(&ht->p, params, sizeof(*params));
775
776         if (params->locks_mul)
777                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
778         else
779                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
780
781         tbl = bucket_table_alloc(ht, size);
782         if (tbl == NULL)
783                 return -ENOMEM;
784
785         ht->shift = ilog2(tbl->size);
786         RCU_INIT_POINTER(ht->tbl, tbl);
787         RCU_INIT_POINTER(ht->future_tbl, tbl);
788
789         if (!ht->p.hash_rnd)
790                 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
791
792         if (ht->p.grow_decision || ht->p.shrink_decision)
793                 INIT_DEFERRABLE_WORK(&ht->run_work, rht_deferred_worker);
794
795         return 0;
796 }
797 EXPORT_SYMBOL_GPL(rhashtable_init);
798
799 /**
800  * rhashtable_destroy - destroy hash table
801  * @ht:         the hash table to destroy
802  *
803  * Frees the bucket array. This function is not rcu safe, therefore the caller
804  * has to make sure that no resizing may happen by unpublishing the hashtable
805  * and waiting for the quiescent cycle before releasing the bucket array.
806  */
807 void rhashtable_destroy(struct rhashtable *ht)
808 {
809         ht->being_destroyed = true;
810
811         mutex_lock(&ht->mutex);
812
813         cancel_delayed_work(&ht->run_work);
814         bucket_table_free(rht_dereference(ht->tbl, ht));
815
816         mutex_unlock(&ht->mutex);
817 }
818 EXPORT_SYMBOL_GPL(rhashtable_destroy);
819
820 /**************************************************************************
821  * Self Test
822  **************************************************************************/
823
824 #ifdef CONFIG_TEST_RHASHTABLE
825
826 #define TEST_HT_SIZE    8
827 #define TEST_ENTRIES    2048
828 #define TEST_PTR        ((void *) 0xdeadbeef)
829 #define TEST_NEXPANDS   4
830
831 struct test_obj {
832         void                    *ptr;
833         int                     value;
834         struct rhash_head       node;
835 };
836
837 static int __init test_rht_lookup(struct rhashtable *ht)
838 {
839         unsigned int i;
840
841         for (i = 0; i < TEST_ENTRIES * 2; i++) {
842                 struct test_obj *obj;
843                 bool expected = !(i % 2);
844                 u32 key = i;
845
846                 obj = rhashtable_lookup(ht, &key);
847
848                 if (expected && !obj) {
849                         pr_warn("Test failed: Could not find key %u\n", key);
850                         return -ENOENT;
851                 } else if (!expected && obj) {
852                         pr_warn("Test failed: Unexpected entry found for key %u\n",
853                                 key);
854                         return -EEXIST;
855                 } else if (expected && obj) {
856                         if (obj->ptr != TEST_PTR || obj->value != i) {
857                                 pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
858                                         obj->ptr, TEST_PTR, obj->value, i);
859                                 return -EINVAL;
860                         }
861                 }
862         }
863
864         return 0;
865 }
866
867 static void test_bucket_stats(struct rhashtable *ht, bool quiet)
868 {
869         unsigned int cnt, rcu_cnt, i, total = 0;
870         struct rhash_head *pos;
871         struct test_obj *obj;
872         struct bucket_table *tbl;
873
874         tbl = rht_dereference_rcu(ht->tbl, ht);
875         for (i = 0; i < tbl->size; i++) {
876                 rcu_cnt = cnt = 0;
877
878                 if (!quiet)
879                         pr_info(" [%#4x/%zu]", i, tbl->size);
880
881                 rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
882                         cnt++;
883                         total++;
884                         if (!quiet)
885                                 pr_cont(" [%p],", obj);
886                 }
887
888                 rht_for_each_entry_rcu(obj, pos, tbl, i, node)
889                         rcu_cnt++;
890
891                 if (rcu_cnt != cnt)
892                         pr_warn("Test failed: Chain count mismach %d != %d",
893                                 cnt, rcu_cnt);
894
895                 if (!quiet)
896                         pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
897                                 i, tbl->buckets[i], cnt);
898         }
899
900         pr_info("  Traversal complete: counted=%u, nelems=%u, entries=%d\n",
901                 total, atomic_read(&ht->nelems), TEST_ENTRIES);
902
903         if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
904                 pr_warn("Test failed: Total count mismatch ^^^");
905 }
906
907 static int __init test_rhashtable(struct rhashtable *ht)
908 {
909         struct bucket_table *tbl;
910         struct test_obj *obj;
911         struct rhash_head *pos, *next;
912         int err;
913         unsigned int i;
914
915         /*
916          * Insertion Test:
917          * Insert TEST_ENTRIES into table with all keys even numbers
918          */
919         pr_info("  Adding %d keys\n", TEST_ENTRIES);
920         for (i = 0; i < TEST_ENTRIES; i++) {
921                 struct test_obj *obj;
922
923                 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
924                 if (!obj) {
925                         err = -ENOMEM;
926                         goto error;
927                 }
928
929                 obj->ptr = TEST_PTR;
930                 obj->value = i * 2;
931
932                 rhashtable_insert(ht, &obj->node);
933         }
934
935         rcu_read_lock();
936         test_bucket_stats(ht, true);
937         test_rht_lookup(ht);
938         rcu_read_unlock();
939
940         for (i = 0; i < TEST_NEXPANDS; i++) {
941                 pr_info("  Table expansion iteration %u...\n", i);
942                 mutex_lock(&ht->mutex);
943                 rhashtable_expand(ht);
944                 mutex_unlock(&ht->mutex);
945
946                 rcu_read_lock();
947                 pr_info("  Verifying lookups...\n");
948                 test_rht_lookup(ht);
949                 rcu_read_unlock();
950         }
951
952         for (i = 0; i < TEST_NEXPANDS; i++) {
953                 pr_info("  Table shrinkage iteration %u...\n", i);
954                 mutex_lock(&ht->mutex);
955                 rhashtable_shrink(ht);
956                 mutex_unlock(&ht->mutex);
957
958                 rcu_read_lock();
959                 pr_info("  Verifying lookups...\n");
960                 test_rht_lookup(ht);
961                 rcu_read_unlock();
962         }
963
964         rcu_read_lock();
965         test_bucket_stats(ht, true);
966         rcu_read_unlock();
967
968         pr_info("  Deleting %d keys\n", TEST_ENTRIES);
969         for (i = 0; i < TEST_ENTRIES; i++) {
970                 u32 key = i * 2;
971
972                 obj = rhashtable_lookup(ht, &key);
973                 BUG_ON(!obj);
974
975                 rhashtable_remove(ht, &obj->node);
976                 kfree(obj);
977         }
978
979         return 0;
980
981 error:
982         tbl = rht_dereference_rcu(ht->tbl, ht);
983         for (i = 0; i < tbl->size; i++)
984                 rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
985                         kfree(obj);
986
987         return err;
988 }
989
990 static int __init test_rht_init(void)
991 {
992         struct rhashtable ht;
993         struct rhashtable_params params = {
994                 .nelem_hint = TEST_HT_SIZE,
995                 .head_offset = offsetof(struct test_obj, node),
996                 .key_offset = offsetof(struct test_obj, value),
997                 .key_len = sizeof(int),
998                 .hashfn = jhash,
999                 .nulls_base = (3U << RHT_BASE_SHIFT),
1000                 .grow_decision = rht_grow_above_75,
1001                 .shrink_decision = rht_shrink_below_30,
1002         };
1003         int err;
1004
1005         pr_info("Running resizable hashtable tests...\n");
1006
1007         err = rhashtable_init(&ht, &params);
1008         if (err < 0) {
1009                 pr_warn("Test failed: Unable to initialize hashtable: %d\n",
1010                         err);
1011                 return err;
1012         }
1013
1014         err = test_rhashtable(&ht);
1015
1016         rhashtable_destroy(&ht);
1017
1018         return err;
1019 }
1020
1021 subsys_initcall(test_rht_init);
1022
1023 #endif /* CONFIG_TEST_RHASHTABLE */