rhashtable: better high order allocation attempts
[linux-2.6-block.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
5  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6  *
7  * Based on the following paper:
8  * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9  *
10  * Code partially derived from nft_hash
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26 #include <linux/err.h>
27
28 #define HASH_DEFAULT_SIZE       64UL
29 #define HASH_MIN_SIZE           4UL
30 #define BUCKET_LOCKS_PER_CPU   128UL
31
32 /* Base bits plus 1 bit for nulls marker */
33 #define HASH_RESERVED_SPACE     (RHT_BASE_BITS + 1)
34
35 enum {
36         RHT_LOCK_NORMAL,
37         RHT_LOCK_NESTED,
38 };
39
40 /* The bucket lock is selected based on the hash and protects mutations
41  * on a group of hash buckets.
42  *
43  * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
44  * a single lock always covers both buckets which may both contains
45  * entries which link to the same bucket of the old table during resizing.
46  * This allows to simplify the locking as locking the bucket in both
47  * tables during resize always guarantee protection.
48  *
49  * IMPORTANT: When holding the bucket lock of both the old and new table
50  * during expansions and shrinking, the old bucket lock must always be
51  * acquired first.
52  */
53 static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
54 {
55         return &tbl->locks[hash & tbl->locks_mask];
56 }
57
58 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
59 {
60         return (void *) he - ht->p.head_offset;
61 }
62
63 static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
64 {
65         return hash & (tbl->size - 1);
66 }
67
68 static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
69 {
70         u32 hash;
71
72         if (unlikely(!ht->p.key_len))
73                 hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
74         else
75                 hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
76                                     ht->p.hash_rnd);
77
78         return hash >> HASH_RESERVED_SPACE;
79 }
80
81 static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
82 {
83         return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
84 }
85
86 static u32 head_hashfn(const struct rhashtable *ht,
87                        const struct bucket_table *tbl,
88                        const struct rhash_head *he)
89 {
90         return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
91 }
92
93 #ifdef CONFIG_PROVE_LOCKING
94 static void debug_dump_buckets(const struct rhashtable *ht,
95                                const struct bucket_table *tbl)
96 {
97         struct rhash_head *he;
98         unsigned int i, hash;
99
100         for (i = 0; i < tbl->size; i++) {
101                 pr_warn(" [Bucket %d] ", i);
102                 rht_for_each_rcu(he, tbl, i) {
103                         hash = head_hashfn(ht, tbl, he);
104                         pr_cont("[hash = %#x, lock = %p] ",
105                                 hash, bucket_lock(tbl, hash));
106                 }
107                 pr_cont("\n");
108         }
109
110 }
111
112 static void debug_dump_table(struct rhashtable *ht,
113                              const struct bucket_table *tbl,
114                              unsigned int hash)
115 {
116         struct bucket_table *old_tbl, *future_tbl;
117
118         pr_emerg("BUG: lock for hash %#x in table %p not held\n",
119                  hash, tbl);
120
121         rcu_read_lock();
122         future_tbl = rht_dereference_rcu(ht->future_tbl, ht);
123         old_tbl = rht_dereference_rcu(ht->tbl, ht);
124         if (future_tbl != old_tbl) {
125                 pr_warn("Future table %p (size: %zd)\n",
126                         future_tbl, future_tbl->size);
127                 debug_dump_buckets(ht, future_tbl);
128         }
129
130         pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size);
131         debug_dump_buckets(ht, old_tbl);
132
133         rcu_read_unlock();
134 }
135
136 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
137 #define ASSERT_BUCKET_LOCK(HT, TBL, HASH)                               \
138         do {                                                            \
139                 if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \
140                         debug_dump_table(HT, TBL, HASH);                \
141                         BUG();                                          \
142                 }                                                       \
143         } while (0)
144
145 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
146 {
147         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
148 }
149 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
150
151 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
152 {
153         spinlock_t *lock = bucket_lock(tbl, hash);
154
155         return (debug_locks) ? lockdep_is_held(lock) : 1;
156 }
157 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
158 #else
159 #define ASSERT_RHT_MUTEX(HT)
160 #define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
161 #endif
162
163
164 static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
165 {
166         struct rhash_head __rcu **pprev;
167
168         for (pprev = &tbl->buckets[n];
169              !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
170              pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
171                 ;
172
173         return pprev;
174 }
175
176 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
177 {
178         unsigned int i, size;
179 #if defined(CONFIG_PROVE_LOCKING)
180         unsigned int nr_pcpus = 2;
181 #else
182         unsigned int nr_pcpus = num_possible_cpus();
183 #endif
184
185         nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
186         size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
187
188         /* Never allocate more than 0.5 locks per bucket */
189         size = min_t(unsigned int, size, tbl->size >> 1);
190
191         if (sizeof(spinlock_t) != 0) {
192 #ifdef CONFIG_NUMA
193                 if (size * sizeof(spinlock_t) > PAGE_SIZE)
194                         tbl->locks = vmalloc(size * sizeof(spinlock_t));
195                 else
196 #endif
197                 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
198                                            GFP_KERNEL);
199                 if (!tbl->locks)
200                         return -ENOMEM;
201                 for (i = 0; i < size; i++)
202                         spin_lock_init(&tbl->locks[i]);
203         }
204         tbl->locks_mask = size - 1;
205
206         return 0;
207 }
208
209 static void bucket_table_free(const struct bucket_table *tbl)
210 {
211         if (tbl)
212                 kvfree(tbl->locks);
213
214         kvfree(tbl);
215 }
216
217 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
218                                                size_t nbuckets)
219 {
220         struct bucket_table *tbl = NULL;
221         size_t size;
222         int i;
223
224         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
225         if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
226                 tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
227         if (tbl == NULL)
228                 tbl = vzalloc(size);
229         if (tbl == NULL)
230                 return NULL;
231
232         tbl->size = nbuckets;
233
234         if (alloc_bucket_locks(ht, tbl) < 0) {
235                 bucket_table_free(tbl);
236                 return NULL;
237         }
238
239         for (i = 0; i < nbuckets; i++)
240                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
241
242         return tbl;
243 }
244
245 /**
246  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
247  * @ht:         hash table
248  * @new_size:   new table size
249  */
250 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
251 {
252         /* Expand table when exceeding 75% load */
253         return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
254                (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
255 }
256 EXPORT_SYMBOL_GPL(rht_grow_above_75);
257
258 /**
259  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
260  * @ht:         hash table
261  * @new_size:   new table size
262  */
263 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
264 {
265         /* Shrink table beneath 30% load */
266         return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
267                (atomic_read(&ht->shift) > ht->p.min_shift);
268 }
269 EXPORT_SYMBOL_GPL(rht_shrink_below_30);
270
271 static void lock_buckets(struct bucket_table *new_tbl,
272                          struct bucket_table *old_tbl, unsigned int hash)
273         __acquires(old_bucket_lock)
274 {
275         spin_lock_bh(bucket_lock(old_tbl, hash));
276         if (new_tbl != old_tbl)
277                 spin_lock_bh_nested(bucket_lock(new_tbl, hash),
278                                     RHT_LOCK_NESTED);
279 }
280
281 static void unlock_buckets(struct bucket_table *new_tbl,
282                            struct bucket_table *old_tbl, unsigned int hash)
283         __releases(old_bucket_lock)
284 {
285         if (new_tbl != old_tbl)
286                 spin_unlock_bh(bucket_lock(new_tbl, hash));
287         spin_unlock_bh(bucket_lock(old_tbl, hash));
288 }
289
290 /**
291  * Unlink entries on bucket which hash to different bucket.
292  *
293  * Returns true if no more work needs to be performed on the bucket.
294  */
295 static bool hashtable_chain_unzip(struct rhashtable *ht,
296                                   const struct bucket_table *new_tbl,
297                                   struct bucket_table *old_tbl,
298                                   size_t old_hash)
299 {
300         struct rhash_head *he, *p, *next;
301         unsigned int new_hash, new_hash2;
302
303         ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash);
304
305         /* Old bucket empty, no work needed. */
306         p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
307                                    old_hash);
308         if (rht_is_a_nulls(p))
309                 return false;
310
311         new_hash = head_hashfn(ht, new_tbl, p);
312         ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
313
314         /* Advance the old bucket pointer one or more times until it
315          * reaches a node that doesn't hash to the same bucket as the
316          * previous node p. Call the previous node p;
317          */
318         rht_for_each_continue(he, p->next, old_tbl, old_hash) {
319                 new_hash2 = head_hashfn(ht, new_tbl, he);
320                 ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2);
321
322                 if (new_hash != new_hash2)
323                         break;
324                 p = he;
325         }
326         rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
327
328         /* Find the subsequent node which does hash to the same
329          * bucket as node P, or NULL if no such node exists.
330          */
331         INIT_RHT_NULLS_HEAD(next, ht, old_hash);
332         if (!rht_is_a_nulls(he)) {
333                 rht_for_each_continue(he, he->next, old_tbl, old_hash) {
334                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
335                                 next = he;
336                                 break;
337                         }
338                 }
339         }
340
341         /* Set p's next pointer to that subsequent node pointer,
342          * bypassing the nodes which do not hash to p's bucket
343          */
344         rcu_assign_pointer(p->next, next);
345
346         p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
347                                    old_hash);
348
349         return !rht_is_a_nulls(p);
350 }
351
352 static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
353                             unsigned int new_hash, struct rhash_head *entry)
354 {
355         ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
356
357         rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
358 }
359
360 /**
361  * rhashtable_expand - Expand hash table while allowing concurrent lookups
362  * @ht:         the hash table to expand
363  *
364  * A secondary bucket array is allocated and the hash entries are migrated
365  * while keeping them on both lists until the end of the RCU grace period.
366  *
367  * This function may only be called in a context where it is safe to call
368  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
369  *
370  * The caller must ensure that no concurrent resizing occurs by holding
371  * ht->mutex.
372  *
373  * It is valid to have concurrent insertions and deletions protected by per
374  * bucket locks or concurrent RCU protected lookups and traversals.
375  */
376 int rhashtable_expand(struct rhashtable *ht)
377 {
378         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
379         struct rhash_head *he;
380         unsigned int new_hash, old_hash;
381         bool complete = false;
382
383         ASSERT_RHT_MUTEX(ht);
384
385         new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
386         if (new_tbl == NULL)
387                 return -ENOMEM;
388
389         atomic_inc(&ht->shift);
390
391         /* Make insertions go into the new, empty table right away. Deletions
392          * and lookups will be attempted in both tables until we synchronize.
393          * The synchronize_rcu() guarantees for the new table to be picked up
394          * so no new additions go into the old table while we relink.
395          */
396         rcu_assign_pointer(ht->future_tbl, new_tbl);
397         synchronize_rcu();
398
399         /* For each new bucket, search the corresponding old bucket for the
400          * first entry that hashes to the new bucket, and link the end of
401          * newly formed bucket chain (containing entries added to future
402          * table) to that entry. Since all the entries which will end up in
403          * the new bucket appear in the same old bucket, this constructs an
404          * entirely valid new hash table, but with multiple buckets
405          * "zipped" together into a single imprecise chain.
406          */
407         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
408                 old_hash = rht_bucket_index(old_tbl, new_hash);
409                 lock_buckets(new_tbl, old_tbl, new_hash);
410                 rht_for_each(he, old_tbl, old_hash) {
411                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
412                                 link_old_to_new(ht, new_tbl, new_hash, he);
413                                 break;
414                         }
415                 }
416                 unlock_buckets(new_tbl, old_tbl, new_hash);
417         }
418
419         /* Unzip interleaved hash chains */
420         while (!complete && !ht->being_destroyed) {
421                 /* Wait for readers. All new readers will see the new
422                  * table, and thus no references to the old table will
423                  * remain.
424                  */
425                 synchronize_rcu();
426
427                 /* For each bucket in the old table (each of which
428                  * contains items from multiple buckets of the new
429                  * table): ...
430                  */
431                 complete = true;
432                 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
433                         lock_buckets(new_tbl, old_tbl, old_hash);
434
435                         if (hashtable_chain_unzip(ht, new_tbl, old_tbl,
436                                                   old_hash))
437                                 complete = false;
438
439                         unlock_buckets(new_tbl, old_tbl, old_hash);
440                 }
441         }
442
443         rcu_assign_pointer(ht->tbl, new_tbl);
444         synchronize_rcu();
445
446         bucket_table_free(old_tbl);
447         return 0;
448 }
449 EXPORT_SYMBOL_GPL(rhashtable_expand);
450
451 /**
452  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
453  * @ht:         the hash table to shrink
454  *
455  * This function may only be called in a context where it is safe to call
456  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
457  *
458  * The caller must ensure that no concurrent resizing occurs by holding
459  * ht->mutex.
460  *
461  * The caller must ensure that no concurrent table mutations take place.
462  * It is however valid to have concurrent lookups if they are RCU protected.
463  *
464  * It is valid to have concurrent insertions and deletions protected by per
465  * bucket locks or concurrent RCU protected lookups and traversals.
466  */
467 int rhashtable_shrink(struct rhashtable *ht)
468 {
469         struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
470         unsigned int new_hash;
471
472         ASSERT_RHT_MUTEX(ht);
473
474         new_tbl = bucket_table_alloc(ht, tbl->size / 2);
475         if (new_tbl == NULL)
476                 return -ENOMEM;
477
478         rcu_assign_pointer(ht->future_tbl, new_tbl);
479         synchronize_rcu();
480
481         /* Link the first entry in the old bucket to the end of the
482          * bucket in the new table. As entries are concurrently being
483          * added to the new table, lock down the new bucket. As we
484          * always divide the size in half when shrinking, each bucket
485          * in the new table maps to exactly two buckets in the old
486          * table.
487          */
488         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
489                 lock_buckets(new_tbl, tbl, new_hash);
490
491                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
492                                    tbl->buckets[new_hash]);
493                 ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size);
494                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
495                                    tbl->buckets[new_hash + new_tbl->size]);
496
497                 unlock_buckets(new_tbl, tbl, new_hash);
498         }
499
500         /* Publish the new, valid hash table */
501         rcu_assign_pointer(ht->tbl, new_tbl);
502         atomic_dec(&ht->shift);
503
504         /* Wait for readers. No new readers will have references to the
505          * old hash table.
506          */
507         synchronize_rcu();
508
509         bucket_table_free(tbl);
510
511         return 0;
512 }
513 EXPORT_SYMBOL_GPL(rhashtable_shrink);
514
515 static void rht_deferred_worker(struct work_struct *work)
516 {
517         struct rhashtable *ht;
518         struct bucket_table *tbl;
519         struct rhashtable_walker *walker;
520
521         ht = container_of(work, struct rhashtable, run_work);
522         mutex_lock(&ht->mutex);
523         if (ht->being_destroyed)
524                 goto unlock;
525
526         tbl = rht_dereference(ht->tbl, ht);
527
528         list_for_each_entry(walker, &ht->walkers, list)
529                 walker->resize = true;
530
531         if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
532                 rhashtable_expand(ht);
533         else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
534                 rhashtable_shrink(ht);
535
536 unlock:
537         mutex_unlock(&ht->mutex);
538 }
539
540 static void rhashtable_probe_expand(struct rhashtable *ht)
541 {
542         const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
543         const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
544
545         /* Only adjust the table if no resizing is currently in progress. */
546         if (tbl == new_tbl && ht->p.grow_decision &&
547             ht->p.grow_decision(ht, tbl->size))
548                 schedule_work(&ht->run_work);
549 }
550
551 static void rhashtable_probe_shrink(struct rhashtable *ht)
552 {
553         const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
554         const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
555
556         /* Only adjust the table if no resizing is currently in progress. */
557         if (tbl == new_tbl && ht->p.shrink_decision &&
558             ht->p.shrink_decision(ht, tbl->size))
559                 schedule_work(&ht->run_work);
560 }
561
562 static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
563                                 struct bucket_table *tbl, u32 hash)
564 {
565         struct rhash_head *head;
566
567         hash = rht_bucket_index(tbl, hash);
568         head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
569
570         ASSERT_BUCKET_LOCK(ht, tbl, hash);
571
572         if (rht_is_a_nulls(head))
573                 INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
574         else
575                 RCU_INIT_POINTER(obj->next, head);
576
577         rcu_assign_pointer(tbl->buckets[hash], obj);
578
579         atomic_inc(&ht->nelems);
580
581         rhashtable_probe_expand(ht);
582 }
583
584 /**
585  * rhashtable_insert - insert object into hash table
586  * @ht:         hash table
587  * @obj:        pointer to hash head inside object
588  *
589  * Will take a per bucket spinlock to protect against mutual mutations
590  * on the same bucket. Multiple insertions may occur in parallel unless
591  * they map to the same bucket lock.
592  *
593  * It is safe to call this function from atomic context.
594  *
595  * Will trigger an automatic deferred table resizing if the size grows
596  * beyond the watermark indicated by grow_decision() which can be passed
597  * to rhashtable_init().
598  */
599 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
600 {
601         struct bucket_table *tbl, *old_tbl;
602         unsigned hash;
603
604         rcu_read_lock();
605
606         tbl = rht_dereference_rcu(ht->future_tbl, ht);
607         old_tbl = rht_dereference_rcu(ht->tbl, ht);
608         hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
609
610         lock_buckets(tbl, old_tbl, hash);
611         __rhashtable_insert(ht, obj, tbl, hash);
612         unlock_buckets(tbl, old_tbl, hash);
613
614         rcu_read_unlock();
615 }
616 EXPORT_SYMBOL_GPL(rhashtable_insert);
617
618 /**
619  * rhashtable_remove - remove object from hash table
620  * @ht:         hash table
621  * @obj:        pointer to hash head inside object
622  *
623  * Since the hash chain is single linked, the removal operation needs to
624  * walk the bucket chain upon removal. The removal operation is thus
625  * considerable slow if the hash table is not correctly sized.
626  *
627  * Will automatically shrink the table via rhashtable_expand() if the
628  * shrink_decision function specified at rhashtable_init() returns true.
629  *
630  * The caller must ensure that no concurrent table mutations occur. It is
631  * however valid to have concurrent lookups if they are RCU protected.
632  */
633 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
634 {
635         struct bucket_table *tbl, *new_tbl, *old_tbl;
636         struct rhash_head __rcu **pprev;
637         struct rhash_head *he, *he2;
638         unsigned int hash, new_hash;
639         bool ret = false;
640
641         rcu_read_lock();
642         old_tbl = rht_dereference_rcu(ht->tbl, ht);
643         tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
644         new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
645
646         lock_buckets(new_tbl, old_tbl, new_hash);
647 restart:
648         hash = rht_bucket_index(tbl, new_hash);
649         pprev = &tbl->buckets[hash];
650         rht_for_each(he, tbl, hash) {
651                 if (he != obj) {
652                         pprev = &he->next;
653                         continue;
654                 }
655
656                 ASSERT_BUCKET_LOCK(ht, tbl, hash);
657
658                 if (old_tbl->size > new_tbl->size && tbl == old_tbl &&
659                     !rht_is_a_nulls(obj->next) &&
660                     head_hashfn(ht, tbl, obj->next) != hash) {
661                         rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
662                 } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) {
663                         rht_for_each_continue(he2, obj->next, tbl, hash) {
664                                 if (head_hashfn(ht, tbl, he2) == hash) {
665                                         rcu_assign_pointer(*pprev, he2);
666                                         goto found;
667                                 }
668                         }
669
670                         rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
671                 } else {
672                         rcu_assign_pointer(*pprev, obj->next);
673                 }
674
675 found:
676                 ret = true;
677                 break;
678         }
679
680         /* The entry may be linked in either 'tbl', 'future_tbl', or both.
681          * 'future_tbl' only exists for a short period of time during
682          * resizing. Thus traversing both is fine and the added cost is
683          * very rare.
684          */
685         if (tbl != old_tbl) {
686                 tbl = old_tbl;
687                 goto restart;
688         }
689
690         unlock_buckets(new_tbl, old_tbl, new_hash);
691
692         if (ret) {
693                 atomic_dec(&ht->nelems);
694                 rhashtable_probe_shrink(ht);
695         }
696
697         rcu_read_unlock();
698
699         return ret;
700 }
701 EXPORT_SYMBOL_GPL(rhashtable_remove);
702
703 struct rhashtable_compare_arg {
704         struct rhashtable *ht;
705         const void *key;
706 };
707
708 static bool rhashtable_compare(void *ptr, void *arg)
709 {
710         struct rhashtable_compare_arg *x = arg;
711         struct rhashtable *ht = x->ht;
712
713         return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
714 }
715
716 /**
717  * rhashtable_lookup - lookup key in hash table
718  * @ht:         hash table
719  * @key:        pointer to key
720  *
721  * Computes the hash value for the key and traverses the bucket chain looking
722  * for a entry with an identical key. The first matching entry is returned.
723  *
724  * This lookup function may only be used for fixed key hash table (key_len
725  * parameter set). It will BUG() if used inappropriately.
726  *
727  * Lookups may occur in parallel with hashtable mutations and resizing.
728  */
729 void *rhashtable_lookup(struct rhashtable *ht, const void *key)
730 {
731         struct rhashtable_compare_arg arg = {
732                 .ht = ht,
733                 .key = key,
734         };
735
736         BUG_ON(!ht->p.key_len);
737
738         return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
739 }
740 EXPORT_SYMBOL_GPL(rhashtable_lookup);
741
742 /**
743  * rhashtable_lookup_compare - search hash table with compare function
744  * @ht:         hash table
745  * @key:        the pointer to the key
746  * @compare:    compare function, must return true on match
747  * @arg:        argument passed on to compare function
748  *
749  * Traverses the bucket chain behind the provided hash value and calls the
750  * specified compare function for each entry.
751  *
752  * Lookups may occur in parallel with hashtable mutations and resizing.
753  *
754  * Returns the first entry on which the compare function returned true.
755  */
756 void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
757                                 bool (*compare)(void *, void *), void *arg)
758 {
759         const struct bucket_table *tbl, *old_tbl;
760         struct rhash_head *he;
761         u32 hash;
762
763         rcu_read_lock();
764
765         old_tbl = rht_dereference_rcu(ht->tbl, ht);
766         tbl = rht_dereference_rcu(ht->future_tbl, ht);
767         hash = key_hashfn(ht, key, ht->p.key_len);
768 restart:
769         rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
770                 if (!compare(rht_obj(ht, he), arg))
771                         continue;
772                 rcu_read_unlock();
773                 return rht_obj(ht, he);
774         }
775
776         if (unlikely(tbl != old_tbl)) {
777                 tbl = old_tbl;
778                 goto restart;
779         }
780         rcu_read_unlock();
781
782         return NULL;
783 }
784 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
785
786 /**
787  * rhashtable_lookup_insert - lookup and insert object into hash table
788  * @ht:         hash table
789  * @obj:        pointer to hash head inside object
790  *
791  * Locks down the bucket chain in both the old and new table if a resize
792  * is in progress to ensure that writers can't remove from the old table
793  * and can't insert to the new table during the atomic operation of search
794  * and insertion. Searches for duplicates in both the old and new table if
795  * a resize is in progress.
796  *
797  * This lookup function may only be used for fixed key hash table (key_len
798  * parameter set). It will BUG() if used inappropriately.
799  *
800  * It is safe to call this function from atomic context.
801  *
802  * Will trigger an automatic deferred table resizing if the size grows
803  * beyond the watermark indicated by grow_decision() which can be passed
804  * to rhashtable_init().
805  */
806 bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
807 {
808         struct rhashtable_compare_arg arg = {
809                 .ht = ht,
810                 .key = rht_obj(ht, obj) + ht->p.key_offset,
811         };
812
813         BUG_ON(!ht->p.key_len);
814
815         return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
816                                                 &arg);
817 }
818 EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
819
820 /**
821  * rhashtable_lookup_compare_insert - search and insert object to hash table
822  *                                    with compare function
823  * @ht:         hash table
824  * @obj:        pointer to hash head inside object
825  * @compare:    compare function, must return true on match
826  * @arg:        argument passed on to compare function
827  *
828  * Locks down the bucket chain in both the old and new table if a resize
829  * is in progress to ensure that writers can't remove from the old table
830  * and can't insert to the new table during the atomic operation of search
831  * and insertion. Searches for duplicates in both the old and new table if
832  * a resize is in progress.
833  *
834  * Lookups may occur in parallel with hashtable mutations and resizing.
835  *
836  * Will trigger an automatic deferred table resizing if the size grows
837  * beyond the watermark indicated by grow_decision() which can be passed
838  * to rhashtable_init().
839  */
840 bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
841                                       struct rhash_head *obj,
842                                       bool (*compare)(void *, void *),
843                                       void *arg)
844 {
845         struct bucket_table *new_tbl, *old_tbl;
846         u32 new_hash;
847         bool success = true;
848
849         BUG_ON(!ht->p.key_len);
850
851         rcu_read_lock();
852         old_tbl = rht_dereference_rcu(ht->tbl, ht);
853         new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
854         new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
855
856         lock_buckets(new_tbl, old_tbl, new_hash);
857
858         if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
859                                       compare, arg)) {
860                 success = false;
861                 goto exit;
862         }
863
864         __rhashtable_insert(ht, obj, new_tbl, new_hash);
865
866 exit:
867         unlock_buckets(new_tbl, old_tbl, new_hash);
868         rcu_read_unlock();
869
870         return success;
871 }
872 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
873
874 /**
875  * rhashtable_walk_init - Initialise an iterator
876  * @ht:         Table to walk over
877  * @iter:       Hash table Iterator
878  *
879  * This function prepares a hash table walk.
880  *
881  * Note that if you restart a walk after rhashtable_walk_stop you
882  * may see the same object twice.  Also, you may miss objects if
883  * there are removals in between rhashtable_walk_stop and the next
884  * call to rhashtable_walk_start.
885  *
886  * For a completely stable walk you should construct your own data
887  * structure outside the hash table.
888  *
889  * This function may sleep so you must not call it from interrupt
890  * context or with spin locks held.
891  *
892  * You must call rhashtable_walk_exit if this function returns
893  * successfully.
894  */
895 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
896 {
897         iter->ht = ht;
898         iter->p = NULL;
899         iter->slot = 0;
900         iter->skip = 0;
901
902         iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
903         if (!iter->walker)
904                 return -ENOMEM;
905
906         mutex_lock(&ht->mutex);
907         list_add(&iter->walker->list, &ht->walkers);
908         mutex_unlock(&ht->mutex);
909
910         return 0;
911 }
912 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
913
914 /**
915  * rhashtable_walk_exit - Free an iterator
916  * @iter:       Hash table Iterator
917  *
918  * This function frees resources allocated by rhashtable_walk_init.
919  */
920 void rhashtable_walk_exit(struct rhashtable_iter *iter)
921 {
922         mutex_lock(&iter->ht->mutex);
923         list_del(&iter->walker->list);
924         mutex_unlock(&iter->ht->mutex);
925         kfree(iter->walker);
926 }
927 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
928
929 /**
930  * rhashtable_walk_start - Start a hash table walk
931  * @iter:       Hash table iterator
932  *
933  * Start a hash table walk.  Note that we take the RCU lock in all
934  * cases including when we return an error.  So you must always call
935  * rhashtable_walk_stop to clean up.
936  *
937  * Returns zero if successful.
938  *
939  * Returns -EAGAIN if resize event occured.  Note that the iterator
940  * will rewind back to the beginning and you may use it immediately
941  * by calling rhashtable_walk_next.
942  */
943 int rhashtable_walk_start(struct rhashtable_iter *iter)
944 {
945         rcu_read_lock();
946
947         if (iter->walker->resize) {
948                 iter->slot = 0;
949                 iter->skip = 0;
950                 iter->walker->resize = false;
951                 return -EAGAIN;
952         }
953
954         return 0;
955 }
956 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
957
958 /**
959  * rhashtable_walk_next - Return the next object and advance the iterator
960  * @iter:       Hash table iterator
961  *
962  * Note that you must call rhashtable_walk_stop when you are finished
963  * with the walk.
964  *
965  * Returns the next object or NULL when the end of the table is reached.
966  *
967  * Returns -EAGAIN if resize event occured.  Note that the iterator
968  * will rewind back to the beginning and you may continue to use it.
969  */
970 void *rhashtable_walk_next(struct rhashtable_iter *iter)
971 {
972         const struct bucket_table *tbl;
973         struct rhashtable *ht = iter->ht;
974         struct rhash_head *p = iter->p;
975         void *obj = NULL;
976
977         tbl = rht_dereference_rcu(ht->tbl, ht);
978
979         if (p) {
980                 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
981                 goto next;
982         }
983
984         for (; iter->slot < tbl->size; iter->slot++) {
985                 int skip = iter->skip;
986
987                 rht_for_each_rcu(p, tbl, iter->slot) {
988                         if (!skip)
989                                 break;
990                         skip--;
991                 }
992
993 next:
994                 if (!rht_is_a_nulls(p)) {
995                         iter->skip++;
996                         iter->p = p;
997                         obj = rht_obj(ht, p);
998                         goto out;
999                 }
1000
1001                 iter->skip = 0;
1002         }
1003
1004         iter->p = NULL;
1005
1006 out:
1007         if (iter->walker->resize) {
1008                 iter->p = NULL;
1009                 iter->slot = 0;
1010                 iter->skip = 0;
1011                 iter->walker->resize = false;
1012                 return ERR_PTR(-EAGAIN);
1013         }
1014
1015         return obj;
1016 }
1017 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
1018
1019 /**
1020  * rhashtable_walk_stop - Finish a hash table walk
1021  * @iter:       Hash table iterator
1022  *
1023  * Finish a hash table walk.
1024  */
1025 void rhashtable_walk_stop(struct rhashtable_iter *iter)
1026 {
1027         rcu_read_unlock();
1028         iter->p = NULL;
1029 }
1030 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
1031
1032 static size_t rounded_hashtable_size(struct rhashtable_params *params)
1033 {
1034         return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
1035                    1UL << params->min_shift);
1036 }
1037
1038 /**
1039  * rhashtable_init - initialize a new hash table
1040  * @ht:         hash table to be initialized
1041  * @params:     configuration parameters
1042  *
1043  * Initializes a new hash table based on the provided configuration
1044  * parameters. A table can be configured either with a variable or
1045  * fixed length key:
1046  *
1047  * Configuration Example 1: Fixed length keys
1048  * struct test_obj {
1049  *      int                     key;
1050  *      void *                  my_member;
1051  *      struct rhash_head       node;
1052  * };
1053  *
1054  * struct rhashtable_params params = {
1055  *      .head_offset = offsetof(struct test_obj, node),
1056  *      .key_offset = offsetof(struct test_obj, key),
1057  *      .key_len = sizeof(int),
1058  *      .hashfn = jhash,
1059  *      .nulls_base = (1U << RHT_BASE_SHIFT),
1060  * };
1061  *
1062  * Configuration Example 2: Variable length keys
1063  * struct test_obj {
1064  *      [...]
1065  *      struct rhash_head       node;
1066  * };
1067  *
1068  * u32 my_hash_fn(const void *data, u32 seed)
1069  * {
1070  *      struct test_obj *obj = data;
1071  *
1072  *      return [... hash ...];
1073  * }
1074  *
1075  * struct rhashtable_params params = {
1076  *      .head_offset = offsetof(struct test_obj, node),
1077  *      .hashfn = jhash,
1078  *      .obj_hashfn = my_hash_fn,
1079  * };
1080  */
1081 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
1082 {
1083         struct bucket_table *tbl;
1084         size_t size;
1085
1086         size = HASH_DEFAULT_SIZE;
1087
1088         if ((params->key_len && !params->hashfn) ||
1089             (!params->key_len && !params->obj_hashfn))
1090                 return -EINVAL;
1091
1092         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
1093                 return -EINVAL;
1094
1095         params->min_shift = max_t(size_t, params->min_shift,
1096                                   ilog2(HASH_MIN_SIZE));
1097
1098         if (params->nelem_hint)
1099                 size = rounded_hashtable_size(params);
1100
1101         memset(ht, 0, sizeof(*ht));
1102         mutex_init(&ht->mutex);
1103         memcpy(&ht->p, params, sizeof(*params));
1104         INIT_LIST_HEAD(&ht->walkers);
1105
1106         if (params->locks_mul)
1107                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1108         else
1109                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1110
1111         tbl = bucket_table_alloc(ht, size);
1112         if (tbl == NULL)
1113                 return -ENOMEM;
1114
1115         atomic_set(&ht->nelems, 0);
1116         atomic_set(&ht->shift, ilog2(tbl->size));
1117         RCU_INIT_POINTER(ht->tbl, tbl);
1118         RCU_INIT_POINTER(ht->future_tbl, tbl);
1119
1120         if (!ht->p.hash_rnd)
1121                 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
1122
1123         if (ht->p.grow_decision || ht->p.shrink_decision)
1124                 INIT_WORK(&ht->run_work, rht_deferred_worker);
1125
1126         return 0;
1127 }
1128 EXPORT_SYMBOL_GPL(rhashtable_init);
1129
1130 /**
1131  * rhashtable_destroy - destroy hash table
1132  * @ht:         the hash table to destroy
1133  *
1134  * Frees the bucket array. This function is not rcu safe, therefore the caller
1135  * has to make sure that no resizing may happen by unpublishing the hashtable
1136  * and waiting for the quiescent cycle before releasing the bucket array.
1137  */
1138 void rhashtable_destroy(struct rhashtable *ht)
1139 {
1140         ht->being_destroyed = true;
1141
1142         if (ht->p.grow_decision || ht->p.shrink_decision)
1143                 cancel_work_sync(&ht->run_work);
1144
1145         mutex_lock(&ht->mutex);
1146         bucket_table_free(rht_dereference(ht->tbl, ht));
1147         mutex_unlock(&ht->mutex);
1148 }
1149 EXPORT_SYMBOL_GPL(rhashtable_destroy);