rhashtable: remove indirection for grow/shrink decision functions
[linux-2.6-block.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
5  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6  *
7  * Based on the following paper:
8  * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9  *
10  * Code partially derived from nft_hash
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26 #include <linux/err.h>
27
28 #define HASH_DEFAULT_SIZE       64UL
29 #define HASH_MIN_SIZE           4UL
30 #define BUCKET_LOCKS_PER_CPU   128UL
31
32 /* Base bits plus 1 bit for nulls marker */
33 #define HASH_RESERVED_SPACE     (RHT_BASE_BITS + 1)
34
35 enum {
36         RHT_LOCK_NORMAL,
37         RHT_LOCK_NESTED,
38 };
39
40 /* The bucket lock is selected based on the hash and protects mutations
41  * on a group of hash buckets.
42  *
43  * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
44  * a single lock always covers both buckets which may both contains
45  * entries which link to the same bucket of the old table during resizing.
46  * This allows to simplify the locking as locking the bucket in both
47  * tables during resize always guarantee protection.
48  *
49  * IMPORTANT: When holding the bucket lock of both the old and new table
50  * during expansions and shrinking, the old bucket lock must always be
51  * acquired first.
52  */
53 static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
54 {
55         return &tbl->locks[hash & tbl->locks_mask];
56 }
57
58 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
59 {
60         return (void *) he - ht->p.head_offset;
61 }
62
63 static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
64 {
65         return hash & (tbl->size - 1);
66 }
67
68 static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
69 {
70         u32 hash;
71
72         if (unlikely(!ht->p.key_len))
73                 hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
74         else
75                 hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
76                                     ht->p.hash_rnd);
77
78         return hash >> HASH_RESERVED_SPACE;
79 }
80
81 static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
82 {
83         return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
84 }
85
86 static u32 head_hashfn(const struct rhashtable *ht,
87                        const struct bucket_table *tbl,
88                        const struct rhash_head *he)
89 {
90         return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
91 }
92
93 #ifdef CONFIG_PROVE_LOCKING
94 static void debug_dump_buckets(const struct rhashtable *ht,
95                                const struct bucket_table *tbl)
96 {
97         struct rhash_head *he;
98         unsigned int i, hash;
99
100         for (i = 0; i < tbl->size; i++) {
101                 pr_warn(" [Bucket %d] ", i);
102                 rht_for_each_rcu(he, tbl, i) {
103                         hash = head_hashfn(ht, tbl, he);
104                         pr_cont("[hash = %#x, lock = %p] ",
105                                 hash, bucket_lock(tbl, hash));
106                 }
107                 pr_cont("\n");
108         }
109
110 }
111
112 static void debug_dump_table(struct rhashtable *ht,
113                              const struct bucket_table *tbl,
114                              unsigned int hash)
115 {
116         struct bucket_table *old_tbl, *future_tbl;
117
118         pr_emerg("BUG: lock for hash %#x in table %p not held\n",
119                  hash, tbl);
120
121         rcu_read_lock();
122         future_tbl = rht_dereference_rcu(ht->future_tbl, ht);
123         old_tbl = rht_dereference_rcu(ht->tbl, ht);
124         if (future_tbl != old_tbl) {
125                 pr_warn("Future table %p (size: %zd)\n",
126                         future_tbl, future_tbl->size);
127                 debug_dump_buckets(ht, future_tbl);
128         }
129
130         pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size);
131         debug_dump_buckets(ht, old_tbl);
132
133         rcu_read_unlock();
134 }
135
136 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
137 #define ASSERT_BUCKET_LOCK(HT, TBL, HASH)                               \
138         do {                                                            \
139                 if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \
140                         debug_dump_table(HT, TBL, HASH);                \
141                         BUG();                                          \
142                 }                                                       \
143         } while (0)
144
145 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
146 {
147         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
148 }
149 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
150
151 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
152 {
153         spinlock_t *lock = bucket_lock(tbl, hash);
154
155         return (debug_locks) ? lockdep_is_held(lock) : 1;
156 }
157 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
158 #else
159 #define ASSERT_RHT_MUTEX(HT)
160 #define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
161 #endif
162
163
164 static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
165 {
166         struct rhash_head __rcu **pprev;
167
168         for (pprev = &tbl->buckets[n];
169              !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
170              pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
171                 ;
172
173         return pprev;
174 }
175
176 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
177 {
178         unsigned int i, size;
179 #if defined(CONFIG_PROVE_LOCKING)
180         unsigned int nr_pcpus = 2;
181 #else
182         unsigned int nr_pcpus = num_possible_cpus();
183 #endif
184
185         nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
186         size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
187
188         /* Never allocate more than 0.5 locks per bucket */
189         size = min_t(unsigned int, size, tbl->size >> 1);
190
191         if (sizeof(spinlock_t) != 0) {
192 #ifdef CONFIG_NUMA
193                 if (size * sizeof(spinlock_t) > PAGE_SIZE)
194                         tbl->locks = vmalloc(size * sizeof(spinlock_t));
195                 else
196 #endif
197                 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
198                                            GFP_KERNEL);
199                 if (!tbl->locks)
200                         return -ENOMEM;
201                 for (i = 0; i < size; i++)
202                         spin_lock_init(&tbl->locks[i]);
203         }
204         tbl->locks_mask = size - 1;
205
206         return 0;
207 }
208
209 static void bucket_table_free(const struct bucket_table *tbl)
210 {
211         if (tbl)
212                 kvfree(tbl->locks);
213
214         kvfree(tbl);
215 }
216
217 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
218                                                size_t nbuckets)
219 {
220         struct bucket_table *tbl = NULL;
221         size_t size;
222         int i;
223
224         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
225         if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
226                 tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
227         if (tbl == NULL)
228                 tbl = vzalloc(size);
229         if (tbl == NULL)
230                 return NULL;
231
232         tbl->size = nbuckets;
233
234         if (alloc_bucket_locks(ht, tbl) < 0) {
235                 bucket_table_free(tbl);
236                 return NULL;
237         }
238
239         for (i = 0; i < nbuckets; i++)
240                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
241
242         return tbl;
243 }
244
245 /**
246  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
247  * @ht:         hash table
248  * @new_size:   new table size
249  */
250 static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
251 {
252         /* Expand table when exceeding 75% load */
253         return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
254                (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
255 }
256
257 /**
258  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
259  * @ht:         hash table
260  * @new_size:   new table size
261  */
262 static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
263 {
264         /* Shrink table beneath 30% load */
265         return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
266                (atomic_read(&ht->shift) > ht->p.min_shift);
267 }
268
269 static void lock_buckets(struct bucket_table *new_tbl,
270                          struct bucket_table *old_tbl, unsigned int hash)
271         __acquires(old_bucket_lock)
272 {
273         spin_lock_bh(bucket_lock(old_tbl, hash));
274         if (new_tbl != old_tbl)
275                 spin_lock_bh_nested(bucket_lock(new_tbl, hash),
276                                     RHT_LOCK_NESTED);
277 }
278
279 static void unlock_buckets(struct bucket_table *new_tbl,
280                            struct bucket_table *old_tbl, unsigned int hash)
281         __releases(old_bucket_lock)
282 {
283         if (new_tbl != old_tbl)
284                 spin_unlock_bh(bucket_lock(new_tbl, hash));
285         spin_unlock_bh(bucket_lock(old_tbl, hash));
286 }
287
288 /**
289  * Unlink entries on bucket which hash to different bucket.
290  *
291  * Returns true if no more work needs to be performed on the bucket.
292  */
293 static bool hashtable_chain_unzip(struct rhashtable *ht,
294                                   const struct bucket_table *new_tbl,
295                                   struct bucket_table *old_tbl,
296                                   size_t old_hash)
297 {
298         struct rhash_head *he, *p, *next;
299         unsigned int new_hash, new_hash2;
300
301         ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash);
302
303         /* Old bucket empty, no work needed. */
304         p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
305                                    old_hash);
306         if (rht_is_a_nulls(p))
307                 return false;
308
309         new_hash = head_hashfn(ht, new_tbl, p);
310         ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
311
312         /* Advance the old bucket pointer one or more times until it
313          * reaches a node that doesn't hash to the same bucket as the
314          * previous node p. Call the previous node p;
315          */
316         rht_for_each_continue(he, p->next, old_tbl, old_hash) {
317                 new_hash2 = head_hashfn(ht, new_tbl, he);
318                 ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2);
319
320                 if (new_hash != new_hash2)
321                         break;
322                 p = he;
323         }
324         rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
325
326         /* Find the subsequent node which does hash to the same
327          * bucket as node P, or NULL if no such node exists.
328          */
329         INIT_RHT_NULLS_HEAD(next, ht, old_hash);
330         if (!rht_is_a_nulls(he)) {
331                 rht_for_each_continue(he, he->next, old_tbl, old_hash) {
332                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
333                                 next = he;
334                                 break;
335                         }
336                 }
337         }
338
339         /* Set p's next pointer to that subsequent node pointer,
340          * bypassing the nodes which do not hash to p's bucket
341          */
342         rcu_assign_pointer(p->next, next);
343
344         p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
345                                    old_hash);
346
347         return !rht_is_a_nulls(p);
348 }
349
350 static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
351                             unsigned int new_hash, struct rhash_head *entry)
352 {
353         ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
354
355         rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
356 }
357
358 /**
359  * rhashtable_expand - Expand hash table while allowing concurrent lookups
360  * @ht:         the hash table to expand
361  *
362  * A secondary bucket array is allocated and the hash entries are migrated
363  * while keeping them on both lists until the end of the RCU grace period.
364  *
365  * This function may only be called in a context where it is safe to call
366  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
367  *
368  * The caller must ensure that no concurrent resizing occurs by holding
369  * ht->mutex.
370  *
371  * It is valid to have concurrent insertions and deletions protected by per
372  * bucket locks or concurrent RCU protected lookups and traversals.
373  */
374 int rhashtable_expand(struct rhashtable *ht)
375 {
376         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
377         struct rhash_head *he;
378         unsigned int new_hash, old_hash;
379         bool complete = false;
380
381         ASSERT_RHT_MUTEX(ht);
382
383         new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
384         if (new_tbl == NULL)
385                 return -ENOMEM;
386
387         atomic_inc(&ht->shift);
388
389         /* Make insertions go into the new, empty table right away. Deletions
390          * and lookups will be attempted in both tables until we synchronize.
391          * The synchronize_rcu() guarantees for the new table to be picked up
392          * so no new additions go into the old table while we relink.
393          */
394         rcu_assign_pointer(ht->future_tbl, new_tbl);
395         synchronize_rcu();
396
397         /* For each new bucket, search the corresponding old bucket for the
398          * first entry that hashes to the new bucket, and link the end of
399          * newly formed bucket chain (containing entries added to future
400          * table) to that entry. Since all the entries which will end up in
401          * the new bucket appear in the same old bucket, this constructs an
402          * entirely valid new hash table, but with multiple buckets
403          * "zipped" together into a single imprecise chain.
404          */
405         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
406                 old_hash = rht_bucket_index(old_tbl, new_hash);
407                 lock_buckets(new_tbl, old_tbl, new_hash);
408                 rht_for_each(he, old_tbl, old_hash) {
409                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
410                                 link_old_to_new(ht, new_tbl, new_hash, he);
411                                 break;
412                         }
413                 }
414                 unlock_buckets(new_tbl, old_tbl, new_hash);
415         }
416
417         /* Unzip interleaved hash chains */
418         while (!complete && !ht->being_destroyed) {
419                 /* Wait for readers. All new readers will see the new
420                  * table, and thus no references to the old table will
421                  * remain.
422                  */
423                 synchronize_rcu();
424
425                 /* For each bucket in the old table (each of which
426                  * contains items from multiple buckets of the new
427                  * table): ...
428                  */
429                 complete = true;
430                 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
431                         lock_buckets(new_tbl, old_tbl, old_hash);
432
433                         if (hashtable_chain_unzip(ht, new_tbl, old_tbl,
434                                                   old_hash))
435                                 complete = false;
436
437                         unlock_buckets(new_tbl, old_tbl, old_hash);
438                 }
439         }
440
441         rcu_assign_pointer(ht->tbl, new_tbl);
442         synchronize_rcu();
443
444         bucket_table_free(old_tbl);
445         return 0;
446 }
447 EXPORT_SYMBOL_GPL(rhashtable_expand);
448
449 /**
450  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
451  * @ht:         the hash table to shrink
452  *
453  * This function may only be called in a context where it is safe to call
454  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
455  *
456  * The caller must ensure that no concurrent resizing occurs by holding
457  * ht->mutex.
458  *
459  * The caller must ensure that no concurrent table mutations take place.
460  * It is however valid to have concurrent lookups if they are RCU protected.
461  *
462  * It is valid to have concurrent insertions and deletions protected by per
463  * bucket locks or concurrent RCU protected lookups and traversals.
464  */
465 int rhashtable_shrink(struct rhashtable *ht)
466 {
467         struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
468         unsigned int new_hash;
469
470         ASSERT_RHT_MUTEX(ht);
471
472         new_tbl = bucket_table_alloc(ht, tbl->size / 2);
473         if (new_tbl == NULL)
474                 return -ENOMEM;
475
476         rcu_assign_pointer(ht->future_tbl, new_tbl);
477         synchronize_rcu();
478
479         /* Link the first entry in the old bucket to the end of the
480          * bucket in the new table. As entries are concurrently being
481          * added to the new table, lock down the new bucket. As we
482          * always divide the size in half when shrinking, each bucket
483          * in the new table maps to exactly two buckets in the old
484          * table.
485          */
486         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
487                 lock_buckets(new_tbl, tbl, new_hash);
488
489                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
490                                    tbl->buckets[new_hash]);
491                 ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size);
492                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
493                                    tbl->buckets[new_hash + new_tbl->size]);
494
495                 unlock_buckets(new_tbl, tbl, new_hash);
496         }
497
498         /* Publish the new, valid hash table */
499         rcu_assign_pointer(ht->tbl, new_tbl);
500         atomic_dec(&ht->shift);
501
502         /* Wait for readers. No new readers will have references to the
503          * old hash table.
504          */
505         synchronize_rcu();
506
507         bucket_table_free(tbl);
508
509         return 0;
510 }
511 EXPORT_SYMBOL_GPL(rhashtable_shrink);
512
513 static void rht_deferred_worker(struct work_struct *work)
514 {
515         struct rhashtable *ht;
516         struct bucket_table *tbl;
517         struct rhashtable_walker *walker;
518
519         ht = container_of(work, struct rhashtable, run_work);
520         mutex_lock(&ht->mutex);
521         if (ht->being_destroyed)
522                 goto unlock;
523
524         tbl = rht_dereference(ht->tbl, ht);
525
526         list_for_each_entry(walker, &ht->walkers, list)
527                 walker->resize = true;
528
529         if (rht_grow_above_75(ht, tbl->size))
530                 rhashtable_expand(ht);
531         else if (rht_shrink_below_30(ht, tbl->size))
532                 rhashtable_shrink(ht);
533 unlock:
534         mutex_unlock(&ht->mutex);
535 }
536
537 static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
538                                 struct bucket_table *tbl,
539                                 const struct bucket_table *old_tbl, u32 hash)
540 {
541         bool no_resize_running = tbl == old_tbl;
542         struct rhash_head *head;
543
544         hash = rht_bucket_index(tbl, hash);
545         head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
546
547         ASSERT_BUCKET_LOCK(ht, tbl, hash);
548
549         if (rht_is_a_nulls(head))
550                 INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
551         else
552                 RCU_INIT_POINTER(obj->next, head);
553
554         rcu_assign_pointer(tbl->buckets[hash], obj);
555
556         atomic_inc(&ht->nelems);
557         if (no_resize_running && rht_grow_above_75(ht, tbl->size))
558                 schedule_work(&ht->run_work);
559 }
560
561 /**
562  * rhashtable_insert - insert object into hash table
563  * @ht:         hash table
564  * @obj:        pointer to hash head inside object
565  *
566  * Will take a per bucket spinlock to protect against mutual mutations
567  * on the same bucket. Multiple insertions may occur in parallel unless
568  * they map to the same bucket lock.
569  *
570  * It is safe to call this function from atomic context.
571  *
572  * Will trigger an automatic deferred table resizing if the size grows
573  * beyond the watermark indicated by grow_decision() which can be passed
574  * to rhashtable_init().
575  */
576 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
577 {
578         struct bucket_table *tbl, *old_tbl;
579         unsigned hash;
580
581         rcu_read_lock();
582
583         tbl = rht_dereference_rcu(ht->future_tbl, ht);
584         old_tbl = rht_dereference_rcu(ht->tbl, ht);
585         hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
586
587         lock_buckets(tbl, old_tbl, hash);
588         __rhashtable_insert(ht, obj, tbl, old_tbl, hash);
589         unlock_buckets(tbl, old_tbl, hash);
590
591         rcu_read_unlock();
592 }
593 EXPORT_SYMBOL_GPL(rhashtable_insert);
594
595 /**
596  * rhashtable_remove - remove object from hash table
597  * @ht:         hash table
598  * @obj:        pointer to hash head inside object
599  *
600  * Since the hash chain is single linked, the removal operation needs to
601  * walk the bucket chain upon removal. The removal operation is thus
602  * considerable slow if the hash table is not correctly sized.
603  *
604  * Will automatically shrink the table via rhashtable_expand() if the
605  * shrink_decision function specified at rhashtable_init() returns true.
606  *
607  * The caller must ensure that no concurrent table mutations occur. It is
608  * however valid to have concurrent lookups if they are RCU protected.
609  */
610 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
611 {
612         struct bucket_table *tbl, *new_tbl, *old_tbl;
613         struct rhash_head __rcu **pprev;
614         struct rhash_head *he, *he2;
615         unsigned int hash, new_hash;
616         bool ret = false;
617
618         rcu_read_lock();
619         old_tbl = rht_dereference_rcu(ht->tbl, ht);
620         tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
621         new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
622
623         lock_buckets(new_tbl, old_tbl, new_hash);
624 restart:
625         hash = rht_bucket_index(tbl, new_hash);
626         pprev = &tbl->buckets[hash];
627         rht_for_each(he, tbl, hash) {
628                 if (he != obj) {
629                         pprev = &he->next;
630                         continue;
631                 }
632
633                 ASSERT_BUCKET_LOCK(ht, tbl, hash);
634
635                 if (old_tbl->size > new_tbl->size && tbl == old_tbl &&
636                     !rht_is_a_nulls(obj->next) &&
637                     head_hashfn(ht, tbl, obj->next) != hash) {
638                         rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
639                 } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) {
640                         rht_for_each_continue(he2, obj->next, tbl, hash) {
641                                 if (head_hashfn(ht, tbl, he2) == hash) {
642                                         rcu_assign_pointer(*pprev, he2);
643                                         goto found;
644                                 }
645                         }
646
647                         rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
648                 } else {
649                         rcu_assign_pointer(*pprev, obj->next);
650                 }
651
652 found:
653                 ret = true;
654                 break;
655         }
656
657         /* The entry may be linked in either 'tbl', 'future_tbl', or both.
658          * 'future_tbl' only exists for a short period of time during
659          * resizing. Thus traversing both is fine and the added cost is
660          * very rare.
661          */
662         if (tbl != old_tbl) {
663                 tbl = old_tbl;
664                 goto restart;
665         }
666
667         unlock_buckets(new_tbl, old_tbl, new_hash);
668
669         if (ret) {
670                 bool no_resize_running = new_tbl == old_tbl;
671
672                 atomic_dec(&ht->nelems);
673                 if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
674                         schedule_work(&ht->run_work);
675         }
676
677         rcu_read_unlock();
678
679         return ret;
680 }
681 EXPORT_SYMBOL_GPL(rhashtable_remove);
682
683 struct rhashtable_compare_arg {
684         struct rhashtable *ht;
685         const void *key;
686 };
687
688 static bool rhashtable_compare(void *ptr, void *arg)
689 {
690         struct rhashtable_compare_arg *x = arg;
691         struct rhashtable *ht = x->ht;
692
693         return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
694 }
695
696 /**
697  * rhashtable_lookup - lookup key in hash table
698  * @ht:         hash table
699  * @key:        pointer to key
700  *
701  * Computes the hash value for the key and traverses the bucket chain looking
702  * for a entry with an identical key. The first matching entry is returned.
703  *
704  * This lookup function may only be used for fixed key hash table (key_len
705  * parameter set). It will BUG() if used inappropriately.
706  *
707  * Lookups may occur in parallel with hashtable mutations and resizing.
708  */
709 void *rhashtable_lookup(struct rhashtable *ht, const void *key)
710 {
711         struct rhashtable_compare_arg arg = {
712                 .ht = ht,
713                 .key = key,
714         };
715
716         BUG_ON(!ht->p.key_len);
717
718         return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
719 }
720 EXPORT_SYMBOL_GPL(rhashtable_lookup);
721
722 /**
723  * rhashtable_lookup_compare - search hash table with compare function
724  * @ht:         hash table
725  * @key:        the pointer to the key
726  * @compare:    compare function, must return true on match
727  * @arg:        argument passed on to compare function
728  *
729  * Traverses the bucket chain behind the provided hash value and calls the
730  * specified compare function for each entry.
731  *
732  * Lookups may occur in parallel with hashtable mutations and resizing.
733  *
734  * Returns the first entry on which the compare function returned true.
735  */
736 void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
737                                 bool (*compare)(void *, void *), void *arg)
738 {
739         const struct bucket_table *tbl, *old_tbl;
740         struct rhash_head *he;
741         u32 hash;
742
743         rcu_read_lock();
744
745         old_tbl = rht_dereference_rcu(ht->tbl, ht);
746         tbl = rht_dereference_rcu(ht->future_tbl, ht);
747         hash = key_hashfn(ht, key, ht->p.key_len);
748 restart:
749         rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
750                 if (!compare(rht_obj(ht, he), arg))
751                         continue;
752                 rcu_read_unlock();
753                 return rht_obj(ht, he);
754         }
755
756         if (unlikely(tbl != old_tbl)) {
757                 tbl = old_tbl;
758                 goto restart;
759         }
760         rcu_read_unlock();
761
762         return NULL;
763 }
764 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
765
766 /**
767  * rhashtable_lookup_insert - lookup and insert object into hash table
768  * @ht:         hash table
769  * @obj:        pointer to hash head inside object
770  *
771  * Locks down the bucket chain in both the old and new table if a resize
772  * is in progress to ensure that writers can't remove from the old table
773  * and can't insert to the new table during the atomic operation of search
774  * and insertion. Searches for duplicates in both the old and new table if
775  * a resize is in progress.
776  *
777  * This lookup function may only be used for fixed key hash table (key_len
778  * parameter set). It will BUG() if used inappropriately.
779  *
780  * It is safe to call this function from atomic context.
781  *
782  * Will trigger an automatic deferred table resizing if the size grows
783  * beyond the watermark indicated by grow_decision() which can be passed
784  * to rhashtable_init().
785  */
786 bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
787 {
788         struct rhashtable_compare_arg arg = {
789                 .ht = ht,
790                 .key = rht_obj(ht, obj) + ht->p.key_offset,
791         };
792
793         BUG_ON(!ht->p.key_len);
794
795         return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
796                                                 &arg);
797 }
798 EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
799
800 /**
801  * rhashtable_lookup_compare_insert - search and insert object to hash table
802  *                                    with compare function
803  * @ht:         hash table
804  * @obj:        pointer to hash head inside object
805  * @compare:    compare function, must return true on match
806  * @arg:        argument passed on to compare function
807  *
808  * Locks down the bucket chain in both the old and new table if a resize
809  * is in progress to ensure that writers can't remove from the old table
810  * and can't insert to the new table during the atomic operation of search
811  * and insertion. Searches for duplicates in both the old and new table if
812  * a resize is in progress.
813  *
814  * Lookups may occur in parallel with hashtable mutations and resizing.
815  *
816  * Will trigger an automatic deferred table resizing if the size grows
817  * beyond the watermark indicated by grow_decision() which can be passed
818  * to rhashtable_init().
819  */
820 bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
821                                       struct rhash_head *obj,
822                                       bool (*compare)(void *, void *),
823                                       void *arg)
824 {
825         struct bucket_table *new_tbl, *old_tbl;
826         u32 new_hash;
827         bool success = true;
828
829         BUG_ON(!ht->p.key_len);
830
831         rcu_read_lock();
832         old_tbl = rht_dereference_rcu(ht->tbl, ht);
833         new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
834         new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
835
836         lock_buckets(new_tbl, old_tbl, new_hash);
837
838         if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
839                                       compare, arg)) {
840                 success = false;
841                 goto exit;
842         }
843
844         __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
845
846 exit:
847         unlock_buckets(new_tbl, old_tbl, new_hash);
848         rcu_read_unlock();
849
850         return success;
851 }
852 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
853
854 /**
855  * rhashtable_walk_init - Initialise an iterator
856  * @ht:         Table to walk over
857  * @iter:       Hash table Iterator
858  *
859  * This function prepares a hash table walk.
860  *
861  * Note that if you restart a walk after rhashtable_walk_stop you
862  * may see the same object twice.  Also, you may miss objects if
863  * there are removals in between rhashtable_walk_stop and the next
864  * call to rhashtable_walk_start.
865  *
866  * For a completely stable walk you should construct your own data
867  * structure outside the hash table.
868  *
869  * This function may sleep so you must not call it from interrupt
870  * context or with spin locks held.
871  *
872  * You must call rhashtable_walk_exit if this function returns
873  * successfully.
874  */
875 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
876 {
877         iter->ht = ht;
878         iter->p = NULL;
879         iter->slot = 0;
880         iter->skip = 0;
881
882         iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
883         if (!iter->walker)
884                 return -ENOMEM;
885
886         INIT_LIST_HEAD(&iter->walker->list);
887         iter->walker->resize = false;
888
889         mutex_lock(&ht->mutex);
890         list_add(&iter->walker->list, &ht->walkers);
891         mutex_unlock(&ht->mutex);
892
893         return 0;
894 }
895 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
896
897 /**
898  * rhashtable_walk_exit - Free an iterator
899  * @iter:       Hash table Iterator
900  *
901  * This function frees resources allocated by rhashtable_walk_init.
902  */
903 void rhashtable_walk_exit(struct rhashtable_iter *iter)
904 {
905         mutex_lock(&iter->ht->mutex);
906         list_del(&iter->walker->list);
907         mutex_unlock(&iter->ht->mutex);
908         kfree(iter->walker);
909 }
910 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
911
912 /**
913  * rhashtable_walk_start - Start a hash table walk
914  * @iter:       Hash table iterator
915  *
916  * Start a hash table walk.  Note that we take the RCU lock in all
917  * cases including when we return an error.  So you must always call
918  * rhashtable_walk_stop to clean up.
919  *
920  * Returns zero if successful.
921  *
922  * Returns -EAGAIN if resize event occured.  Note that the iterator
923  * will rewind back to the beginning and you may use it immediately
924  * by calling rhashtable_walk_next.
925  */
926 int rhashtable_walk_start(struct rhashtable_iter *iter)
927 {
928         rcu_read_lock();
929
930         if (iter->walker->resize) {
931                 iter->slot = 0;
932                 iter->skip = 0;
933                 iter->walker->resize = false;
934                 return -EAGAIN;
935         }
936
937         return 0;
938 }
939 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
940
941 /**
942  * rhashtable_walk_next - Return the next object and advance the iterator
943  * @iter:       Hash table iterator
944  *
945  * Note that you must call rhashtable_walk_stop when you are finished
946  * with the walk.
947  *
948  * Returns the next object or NULL when the end of the table is reached.
949  *
950  * Returns -EAGAIN if resize event occured.  Note that the iterator
951  * will rewind back to the beginning and you may continue to use it.
952  */
953 void *rhashtable_walk_next(struct rhashtable_iter *iter)
954 {
955         const struct bucket_table *tbl;
956         struct rhashtable *ht = iter->ht;
957         struct rhash_head *p = iter->p;
958         void *obj = NULL;
959
960         tbl = rht_dereference_rcu(ht->tbl, ht);
961
962         if (p) {
963                 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
964                 goto next;
965         }
966
967         for (; iter->slot < tbl->size; iter->slot++) {
968                 int skip = iter->skip;
969
970                 rht_for_each_rcu(p, tbl, iter->slot) {
971                         if (!skip)
972                                 break;
973                         skip--;
974                 }
975
976 next:
977                 if (!rht_is_a_nulls(p)) {
978                         iter->skip++;
979                         iter->p = p;
980                         obj = rht_obj(ht, p);
981                         goto out;
982                 }
983
984                 iter->skip = 0;
985         }
986
987         iter->p = NULL;
988
989 out:
990         if (iter->walker->resize) {
991                 iter->p = NULL;
992                 iter->slot = 0;
993                 iter->skip = 0;
994                 iter->walker->resize = false;
995                 return ERR_PTR(-EAGAIN);
996         }
997
998         return obj;
999 }
1000 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
1001
1002 /**
1003  * rhashtable_walk_stop - Finish a hash table walk
1004  * @iter:       Hash table iterator
1005  *
1006  * Finish a hash table walk.
1007  */
1008 void rhashtable_walk_stop(struct rhashtable_iter *iter)
1009 {
1010         rcu_read_unlock();
1011         iter->p = NULL;
1012 }
1013 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
1014
1015 static size_t rounded_hashtable_size(struct rhashtable_params *params)
1016 {
1017         return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
1018                    1UL << params->min_shift);
1019 }
1020
1021 /**
1022  * rhashtable_init - initialize a new hash table
1023  * @ht:         hash table to be initialized
1024  * @params:     configuration parameters
1025  *
1026  * Initializes a new hash table based on the provided configuration
1027  * parameters. A table can be configured either with a variable or
1028  * fixed length key:
1029  *
1030  * Configuration Example 1: Fixed length keys
1031  * struct test_obj {
1032  *      int                     key;
1033  *      void *                  my_member;
1034  *      struct rhash_head       node;
1035  * };
1036  *
1037  * struct rhashtable_params params = {
1038  *      .head_offset = offsetof(struct test_obj, node),
1039  *      .key_offset = offsetof(struct test_obj, key),
1040  *      .key_len = sizeof(int),
1041  *      .hashfn = jhash,
1042  *      .nulls_base = (1U << RHT_BASE_SHIFT),
1043  * };
1044  *
1045  * Configuration Example 2: Variable length keys
1046  * struct test_obj {
1047  *      [...]
1048  *      struct rhash_head       node;
1049  * };
1050  *
1051  * u32 my_hash_fn(const void *data, u32 seed)
1052  * {
1053  *      struct test_obj *obj = data;
1054  *
1055  *      return [... hash ...];
1056  * }
1057  *
1058  * struct rhashtable_params params = {
1059  *      .head_offset = offsetof(struct test_obj, node),
1060  *      .hashfn = jhash,
1061  *      .obj_hashfn = my_hash_fn,
1062  * };
1063  */
1064 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
1065 {
1066         struct bucket_table *tbl;
1067         size_t size;
1068
1069         size = HASH_DEFAULT_SIZE;
1070
1071         if ((params->key_len && !params->hashfn) ||
1072             (!params->key_len && !params->obj_hashfn))
1073                 return -EINVAL;
1074
1075         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
1076                 return -EINVAL;
1077
1078         params->min_shift = max_t(size_t, params->min_shift,
1079                                   ilog2(HASH_MIN_SIZE));
1080
1081         if (params->nelem_hint)
1082                 size = rounded_hashtable_size(params);
1083
1084         memset(ht, 0, sizeof(*ht));
1085         mutex_init(&ht->mutex);
1086         memcpy(&ht->p, params, sizeof(*params));
1087         INIT_LIST_HEAD(&ht->walkers);
1088
1089         if (params->locks_mul)
1090                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1091         else
1092                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1093
1094         tbl = bucket_table_alloc(ht, size);
1095         if (tbl == NULL)
1096                 return -ENOMEM;
1097
1098         atomic_set(&ht->nelems, 0);
1099         atomic_set(&ht->shift, ilog2(tbl->size));
1100         RCU_INIT_POINTER(ht->tbl, tbl);
1101         RCU_INIT_POINTER(ht->future_tbl, tbl);
1102
1103         if (!ht->p.hash_rnd)
1104                 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
1105
1106         INIT_WORK(&ht->run_work, rht_deferred_worker);
1107
1108         return 0;
1109 }
1110 EXPORT_SYMBOL_GPL(rhashtable_init);
1111
1112 /**
1113  * rhashtable_destroy - destroy hash table
1114  * @ht:         the hash table to destroy
1115  *
1116  * Frees the bucket array. This function is not rcu safe, therefore the caller
1117  * has to make sure that no resizing may happen by unpublishing the hashtable
1118  * and waiting for the quiescent cycle before releasing the bucket array.
1119  */
1120 void rhashtable_destroy(struct rhashtable *ht)
1121 {
1122         ht->being_destroyed = true;
1123
1124         cancel_work_sync(&ht->run_work);
1125
1126         mutex_lock(&ht->mutex);
1127         bucket_table_free(rht_dereference(ht->tbl, ht));
1128         mutex_unlock(&ht->mutex);
1129 }
1130 EXPORT_SYMBOL_GPL(rhashtable_destroy);