vfs: Convert oprofilefs to use the new mount API
[linux-2.6-block.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/rculist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mm.h>
26 #include <linux/jhash.h>
27 #include <linux/random.h>
28 #include <linux/rhashtable.h>
29 #include <linux/err.h>
30 #include <linux/export.h>
31
32 #define HASH_DEFAULT_SIZE       64UL
33 #define HASH_MIN_SIZE           4U
34
35 union nested_table {
36         union nested_table __rcu *table;
37         struct rhash_lock_head __rcu *bucket;
38 };
39
40 static u32 head_hashfn(struct rhashtable *ht,
41                        const struct bucket_table *tbl,
42                        const struct rhash_head *he)
43 {
44         return rht_head_hashfn(ht, tbl, he, ht->p);
45 }
46
47 #ifdef CONFIG_PROVE_LOCKING
48 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
49
50 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
51 {
52         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
53 }
54 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
55
56 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
57 {
58         if (!debug_locks)
59                 return 1;
60         if (unlikely(tbl->nest))
61                 return 1;
62         return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
63 }
64 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
65 #else
66 #define ASSERT_RHT_MUTEX(HT)
67 #endif
68
69 static void nested_table_free(union nested_table *ntbl, unsigned int size)
70 {
71         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
72         const unsigned int len = 1 << shift;
73         unsigned int i;
74
75         ntbl = rcu_dereference_raw(ntbl->table);
76         if (!ntbl)
77                 return;
78
79         if (size > len) {
80                 size >>= shift;
81                 for (i = 0; i < len; i++)
82                         nested_table_free(ntbl + i, size);
83         }
84
85         kfree(ntbl);
86 }
87
88 static void nested_bucket_table_free(const struct bucket_table *tbl)
89 {
90         unsigned int size = tbl->size >> tbl->nest;
91         unsigned int len = 1 << tbl->nest;
92         union nested_table *ntbl;
93         unsigned int i;
94
95         ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
96
97         for (i = 0; i < len; i++)
98                 nested_table_free(ntbl + i, size);
99
100         kfree(ntbl);
101 }
102
103 static void bucket_table_free(const struct bucket_table *tbl)
104 {
105         if (tbl->nest)
106                 nested_bucket_table_free(tbl);
107
108         kvfree(tbl);
109 }
110
111 static void bucket_table_free_rcu(struct rcu_head *head)
112 {
113         bucket_table_free(container_of(head, struct bucket_table, rcu));
114 }
115
116 static union nested_table *nested_table_alloc(struct rhashtable *ht,
117                                               union nested_table __rcu **prev,
118                                               bool leaf)
119 {
120         union nested_table *ntbl;
121         int i;
122
123         ntbl = rcu_dereference(*prev);
124         if (ntbl)
125                 return ntbl;
126
127         ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
128
129         if (ntbl && leaf) {
130                 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
131                         INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
132         }
133
134         if (cmpxchg(prev, NULL, ntbl) == NULL)
135                 return ntbl;
136         /* Raced with another thread. */
137         kfree(ntbl);
138         return rcu_dereference(*prev);
139 }
140
141 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
142                                                       size_t nbuckets,
143                                                       gfp_t gfp)
144 {
145         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
146         struct bucket_table *tbl;
147         size_t size;
148
149         if (nbuckets < (1 << (shift + 1)))
150                 return NULL;
151
152         size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
153
154         tbl = kzalloc(size, gfp);
155         if (!tbl)
156                 return NULL;
157
158         if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
159                                 false)) {
160                 kfree(tbl);
161                 return NULL;
162         }
163
164         tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
165
166         return tbl;
167 }
168
169 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
170                                                size_t nbuckets,
171                                                gfp_t gfp)
172 {
173         struct bucket_table *tbl = NULL;
174         size_t size;
175         int i;
176         static struct lock_class_key __key;
177
178         tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
179
180         size = nbuckets;
181
182         if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
183                 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
184                 nbuckets = 0;
185         }
186
187         if (tbl == NULL)
188                 return NULL;
189
190         lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
191
192         tbl->size = size;
193
194         rcu_head_init(&tbl->rcu);
195         INIT_LIST_HEAD(&tbl->walkers);
196
197         tbl->hash_rnd = get_random_u32();
198
199         for (i = 0; i < nbuckets; i++)
200                 INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
201
202         return tbl;
203 }
204
205 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
206                                                   struct bucket_table *tbl)
207 {
208         struct bucket_table *new_tbl;
209
210         do {
211                 new_tbl = tbl;
212                 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
213         } while (tbl);
214
215         return new_tbl;
216 }
217
218 static int rhashtable_rehash_one(struct rhashtable *ht,
219                                  struct rhash_lock_head __rcu **bkt,
220                                  unsigned int old_hash)
221 {
222         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
223         struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
224         int err = -EAGAIN;
225         struct rhash_head *head, *next, *entry;
226         struct rhash_head __rcu **pprev = NULL;
227         unsigned int new_hash;
228
229         if (new_tbl->nest)
230                 goto out;
231
232         err = -ENOENT;
233
234         rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
235                           old_tbl, old_hash) {
236                 err = 0;
237                 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
238
239                 if (rht_is_a_nulls(next))
240                         break;
241
242                 pprev = &entry->next;
243         }
244
245         if (err)
246                 goto out;
247
248         new_hash = head_hashfn(ht, new_tbl, entry);
249
250         rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
251
252         head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
253
254         RCU_INIT_POINTER(entry->next, head);
255
256         rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
257
258         if (pprev)
259                 rcu_assign_pointer(*pprev, next);
260         else
261                 /* Need to preserved the bit lock. */
262                 rht_assign_locked(bkt, next);
263
264 out:
265         return err;
266 }
267
268 static int rhashtable_rehash_chain(struct rhashtable *ht,
269                                     unsigned int old_hash)
270 {
271         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
272         struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
273         int err;
274
275         if (!bkt)
276                 return 0;
277         rht_lock(old_tbl, bkt);
278
279         while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
280                 ;
281
282         if (err == -ENOENT)
283                 err = 0;
284         rht_unlock(old_tbl, bkt);
285
286         return err;
287 }
288
289 static int rhashtable_rehash_attach(struct rhashtable *ht,
290                                     struct bucket_table *old_tbl,
291                                     struct bucket_table *new_tbl)
292 {
293         /* Make insertions go into the new, empty table right away. Deletions
294          * and lookups will be attempted in both tables until we synchronize.
295          * As cmpxchg() provides strong barriers, we do not need
296          * rcu_assign_pointer().
297          */
298
299         if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
300                 return -EEXIST;
301
302         return 0;
303 }
304
305 static int rhashtable_rehash_table(struct rhashtable *ht)
306 {
307         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
308         struct bucket_table *new_tbl;
309         struct rhashtable_walker *walker;
310         unsigned int old_hash;
311         int err;
312
313         new_tbl = rht_dereference(old_tbl->future_tbl, ht);
314         if (!new_tbl)
315                 return 0;
316
317         for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
318                 err = rhashtable_rehash_chain(ht, old_hash);
319                 if (err)
320                         return err;
321                 cond_resched();
322         }
323
324         /* Publish the new table pointer. */
325         rcu_assign_pointer(ht->tbl, new_tbl);
326
327         spin_lock(&ht->lock);
328         list_for_each_entry(walker, &old_tbl->walkers, list)
329                 walker->tbl = NULL;
330
331         /* Wait for readers. All new readers will see the new
332          * table, and thus no references to the old table will
333          * remain.
334          * We do this inside the locked region so that
335          * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
336          * to check if it should not re-link the table.
337          */
338         call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
339         spin_unlock(&ht->lock);
340
341         return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
342 }
343
344 static int rhashtable_rehash_alloc(struct rhashtable *ht,
345                                    struct bucket_table *old_tbl,
346                                    unsigned int size)
347 {
348         struct bucket_table *new_tbl;
349         int err;
350
351         ASSERT_RHT_MUTEX(ht);
352
353         new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
354         if (new_tbl == NULL)
355                 return -ENOMEM;
356
357         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
358         if (err)
359                 bucket_table_free(new_tbl);
360
361         return err;
362 }
363
364 /**
365  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
366  * @ht:         the hash table to shrink
367  *
368  * This function shrinks the hash table to fit, i.e., the smallest
369  * size would not cause it to expand right away automatically.
370  *
371  * The caller must ensure that no concurrent resizing occurs by holding
372  * ht->mutex.
373  *
374  * The caller must ensure that no concurrent table mutations take place.
375  * It is however valid to have concurrent lookups if they are RCU protected.
376  *
377  * It is valid to have concurrent insertions and deletions protected by per
378  * bucket locks or concurrent RCU protected lookups and traversals.
379  */
380 static int rhashtable_shrink(struct rhashtable *ht)
381 {
382         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
383         unsigned int nelems = atomic_read(&ht->nelems);
384         unsigned int size = 0;
385
386         if (nelems)
387                 size = roundup_pow_of_two(nelems * 3 / 2);
388         if (size < ht->p.min_size)
389                 size = ht->p.min_size;
390
391         if (old_tbl->size <= size)
392                 return 0;
393
394         if (rht_dereference(old_tbl->future_tbl, ht))
395                 return -EEXIST;
396
397         return rhashtable_rehash_alloc(ht, old_tbl, size);
398 }
399
400 static void rht_deferred_worker(struct work_struct *work)
401 {
402         struct rhashtable *ht;
403         struct bucket_table *tbl;
404         int err = 0;
405
406         ht = container_of(work, struct rhashtable, run_work);
407         mutex_lock(&ht->mutex);
408
409         tbl = rht_dereference(ht->tbl, ht);
410         tbl = rhashtable_last_table(ht, tbl);
411
412         if (rht_grow_above_75(ht, tbl))
413                 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
414         else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
415                 err = rhashtable_shrink(ht);
416         else if (tbl->nest)
417                 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
418
419         if (!err || err == -EEXIST) {
420                 int nerr;
421
422                 nerr = rhashtable_rehash_table(ht);
423                 err = err ?: nerr;
424         }
425
426         mutex_unlock(&ht->mutex);
427
428         if (err)
429                 schedule_work(&ht->run_work);
430 }
431
432 static int rhashtable_insert_rehash(struct rhashtable *ht,
433                                     struct bucket_table *tbl)
434 {
435         struct bucket_table *old_tbl;
436         struct bucket_table *new_tbl;
437         unsigned int size;
438         int err;
439
440         old_tbl = rht_dereference_rcu(ht->tbl, ht);
441
442         size = tbl->size;
443
444         err = -EBUSY;
445
446         if (rht_grow_above_75(ht, tbl))
447                 size *= 2;
448         /* Do not schedule more than one rehash */
449         else if (old_tbl != tbl)
450                 goto fail;
451
452         err = -ENOMEM;
453
454         new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
455         if (new_tbl == NULL)
456                 goto fail;
457
458         err = rhashtable_rehash_attach(ht, tbl, new_tbl);
459         if (err) {
460                 bucket_table_free(new_tbl);
461                 if (err == -EEXIST)
462                         err = 0;
463         } else
464                 schedule_work(&ht->run_work);
465
466         return err;
467
468 fail:
469         /* Do not fail the insert if someone else did a rehash. */
470         if (likely(rcu_access_pointer(tbl->future_tbl)))
471                 return 0;
472
473         /* Schedule async rehash to retry allocation in process context. */
474         if (err == -ENOMEM)
475                 schedule_work(&ht->run_work);
476
477         return err;
478 }
479
480 static void *rhashtable_lookup_one(struct rhashtable *ht,
481                                    struct rhash_lock_head __rcu **bkt,
482                                    struct bucket_table *tbl, unsigned int hash,
483                                    const void *key, struct rhash_head *obj)
484 {
485         struct rhashtable_compare_arg arg = {
486                 .ht = ht,
487                 .key = key,
488         };
489         struct rhash_head __rcu **pprev = NULL;
490         struct rhash_head *head;
491         int elasticity;
492
493         elasticity = RHT_ELASTICITY;
494         rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
495                 struct rhlist_head *list;
496                 struct rhlist_head *plist;
497
498                 elasticity--;
499                 if (!key ||
500                     (ht->p.obj_cmpfn ?
501                      ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
502                      rhashtable_compare(&arg, rht_obj(ht, head)))) {
503                         pprev = &head->next;
504                         continue;
505                 }
506
507                 if (!ht->rhlist)
508                         return rht_obj(ht, head);
509
510                 list = container_of(obj, struct rhlist_head, rhead);
511                 plist = container_of(head, struct rhlist_head, rhead);
512
513                 RCU_INIT_POINTER(list->next, plist);
514                 head = rht_dereference_bucket(head->next, tbl, hash);
515                 RCU_INIT_POINTER(list->rhead.next, head);
516                 if (pprev)
517                         rcu_assign_pointer(*pprev, obj);
518                 else
519                         /* Need to preserve the bit lock */
520                         rht_assign_locked(bkt, obj);
521
522                 return NULL;
523         }
524
525         if (elasticity <= 0)
526                 return ERR_PTR(-EAGAIN);
527
528         return ERR_PTR(-ENOENT);
529 }
530
531 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
532                                                   struct rhash_lock_head __rcu **bkt,
533                                                   struct bucket_table *tbl,
534                                                   unsigned int hash,
535                                                   struct rhash_head *obj,
536                                                   void *data)
537 {
538         struct bucket_table *new_tbl;
539         struct rhash_head *head;
540
541         if (!IS_ERR_OR_NULL(data))
542                 return ERR_PTR(-EEXIST);
543
544         if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
545                 return ERR_CAST(data);
546
547         new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
548         if (new_tbl)
549                 return new_tbl;
550
551         if (PTR_ERR(data) != -ENOENT)
552                 return ERR_CAST(data);
553
554         if (unlikely(rht_grow_above_max(ht, tbl)))
555                 return ERR_PTR(-E2BIG);
556
557         if (unlikely(rht_grow_above_100(ht, tbl)))
558                 return ERR_PTR(-EAGAIN);
559
560         head = rht_ptr(bkt, tbl, hash);
561
562         RCU_INIT_POINTER(obj->next, head);
563         if (ht->rhlist) {
564                 struct rhlist_head *list;
565
566                 list = container_of(obj, struct rhlist_head, rhead);
567                 RCU_INIT_POINTER(list->next, NULL);
568         }
569
570         /* bkt is always the head of the list, so it holds
571          * the lock, which we need to preserve
572          */
573         rht_assign_locked(bkt, obj);
574
575         atomic_inc(&ht->nelems);
576         if (rht_grow_above_75(ht, tbl))
577                 schedule_work(&ht->run_work);
578
579         return NULL;
580 }
581
582 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
583                                    struct rhash_head *obj)
584 {
585         struct bucket_table *new_tbl;
586         struct bucket_table *tbl;
587         struct rhash_lock_head __rcu **bkt;
588         unsigned int hash;
589         void *data;
590
591         new_tbl = rcu_dereference(ht->tbl);
592
593         do {
594                 tbl = new_tbl;
595                 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
596                 if (rcu_access_pointer(tbl->future_tbl))
597                         /* Failure is OK */
598                         bkt = rht_bucket_var(tbl, hash);
599                 else
600                         bkt = rht_bucket_insert(ht, tbl, hash);
601                 if (bkt == NULL) {
602                         new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
603                         data = ERR_PTR(-EAGAIN);
604                 } else {
605                         rht_lock(tbl, bkt);
606                         data = rhashtable_lookup_one(ht, bkt, tbl,
607                                                      hash, key, obj);
608                         new_tbl = rhashtable_insert_one(ht, bkt, tbl,
609                                                         hash, obj, data);
610                         if (PTR_ERR(new_tbl) != -EEXIST)
611                                 data = ERR_CAST(new_tbl);
612
613                         rht_unlock(tbl, bkt);
614                 }
615         } while (!IS_ERR_OR_NULL(new_tbl));
616
617         if (PTR_ERR(data) == -EAGAIN)
618                 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
619                                -EAGAIN);
620
621         return data;
622 }
623
624 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
625                              struct rhash_head *obj)
626 {
627         void *data;
628
629         do {
630                 rcu_read_lock();
631                 data = rhashtable_try_insert(ht, key, obj);
632                 rcu_read_unlock();
633         } while (PTR_ERR(data) == -EAGAIN);
634
635         return data;
636 }
637 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
638
639 /**
640  * rhashtable_walk_enter - Initialise an iterator
641  * @ht:         Table to walk over
642  * @iter:       Hash table Iterator
643  *
644  * This function prepares a hash table walk.
645  *
646  * Note that if you restart a walk after rhashtable_walk_stop you
647  * may see the same object twice.  Also, you may miss objects if
648  * there are removals in between rhashtable_walk_stop and the next
649  * call to rhashtable_walk_start.
650  *
651  * For a completely stable walk you should construct your own data
652  * structure outside the hash table.
653  *
654  * This function may be called from any process context, including
655  * non-preemptable context, but cannot be called from softirq or
656  * hardirq context.
657  *
658  * You must call rhashtable_walk_exit after this function returns.
659  */
660 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
661 {
662         iter->ht = ht;
663         iter->p = NULL;
664         iter->slot = 0;
665         iter->skip = 0;
666         iter->end_of_table = 0;
667
668         spin_lock(&ht->lock);
669         iter->walker.tbl =
670                 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
671         list_add(&iter->walker.list, &iter->walker.tbl->walkers);
672         spin_unlock(&ht->lock);
673 }
674 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
675
676 /**
677  * rhashtable_walk_exit - Free an iterator
678  * @iter:       Hash table Iterator
679  *
680  * This function frees resources allocated by rhashtable_walk_enter.
681  */
682 void rhashtable_walk_exit(struct rhashtable_iter *iter)
683 {
684         spin_lock(&iter->ht->lock);
685         if (iter->walker.tbl)
686                 list_del(&iter->walker.list);
687         spin_unlock(&iter->ht->lock);
688 }
689 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
690
691 /**
692  * rhashtable_walk_start_check - Start a hash table walk
693  * @iter:       Hash table iterator
694  *
695  * Start a hash table walk at the current iterator position.  Note that we take
696  * the RCU lock in all cases including when we return an error.  So you must
697  * always call rhashtable_walk_stop to clean up.
698  *
699  * Returns zero if successful.
700  *
701  * Returns -EAGAIN if resize event occured.  Note that the iterator
702  * will rewind back to the beginning and you may use it immediately
703  * by calling rhashtable_walk_next.
704  *
705  * rhashtable_walk_start is defined as an inline variant that returns
706  * void. This is preferred in cases where the caller would ignore
707  * resize events and always continue.
708  */
709 int rhashtable_walk_start_check(struct rhashtable_iter *iter)
710         __acquires(RCU)
711 {
712         struct rhashtable *ht = iter->ht;
713         bool rhlist = ht->rhlist;
714
715         rcu_read_lock();
716
717         spin_lock(&ht->lock);
718         if (iter->walker.tbl)
719                 list_del(&iter->walker.list);
720         spin_unlock(&ht->lock);
721
722         if (iter->end_of_table)
723                 return 0;
724         if (!iter->walker.tbl) {
725                 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
726                 iter->slot = 0;
727                 iter->skip = 0;
728                 return -EAGAIN;
729         }
730
731         if (iter->p && !rhlist) {
732                 /*
733                  * We need to validate that 'p' is still in the table, and
734                  * if so, update 'skip'
735                  */
736                 struct rhash_head *p;
737                 int skip = 0;
738                 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
739                         skip++;
740                         if (p == iter->p) {
741                                 iter->skip = skip;
742                                 goto found;
743                         }
744                 }
745                 iter->p = NULL;
746         } else if (iter->p && rhlist) {
747                 /* Need to validate that 'list' is still in the table, and
748                  * if so, update 'skip' and 'p'.
749                  */
750                 struct rhash_head *p;
751                 struct rhlist_head *list;
752                 int skip = 0;
753                 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
754                         for (list = container_of(p, struct rhlist_head, rhead);
755                              list;
756                              list = rcu_dereference(list->next)) {
757                                 skip++;
758                                 if (list == iter->list) {
759                                         iter->p = p;
760                                         iter->skip = skip;
761                                         goto found;
762                                 }
763                         }
764                 }
765                 iter->p = NULL;
766         }
767 found:
768         return 0;
769 }
770 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
771
772 /**
773  * __rhashtable_walk_find_next - Find the next element in a table (or the first
774  * one in case of a new walk).
775  *
776  * @iter:       Hash table iterator
777  *
778  * Returns the found object or NULL when the end of the table is reached.
779  *
780  * Returns -EAGAIN if resize event occurred.
781  */
782 static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
783 {
784         struct bucket_table *tbl = iter->walker.tbl;
785         struct rhlist_head *list = iter->list;
786         struct rhashtable *ht = iter->ht;
787         struct rhash_head *p = iter->p;
788         bool rhlist = ht->rhlist;
789
790         if (!tbl)
791                 return NULL;
792
793         for (; iter->slot < tbl->size; iter->slot++) {
794                 int skip = iter->skip;
795
796                 rht_for_each_rcu(p, tbl, iter->slot) {
797                         if (rhlist) {
798                                 list = container_of(p, struct rhlist_head,
799                                                     rhead);
800                                 do {
801                                         if (!skip)
802                                                 goto next;
803                                         skip--;
804                                         list = rcu_dereference(list->next);
805                                 } while (list);
806
807                                 continue;
808                         }
809                         if (!skip)
810                                 break;
811                         skip--;
812                 }
813
814 next:
815                 if (!rht_is_a_nulls(p)) {
816                         iter->skip++;
817                         iter->p = p;
818                         iter->list = list;
819                         return rht_obj(ht, rhlist ? &list->rhead : p);
820                 }
821
822                 iter->skip = 0;
823         }
824
825         iter->p = NULL;
826
827         /* Ensure we see any new tables. */
828         smp_rmb();
829
830         iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
831         if (iter->walker.tbl) {
832                 iter->slot = 0;
833                 iter->skip = 0;
834                 return ERR_PTR(-EAGAIN);
835         } else {
836                 iter->end_of_table = true;
837         }
838
839         return NULL;
840 }
841
842 /**
843  * rhashtable_walk_next - Return the next object and advance the iterator
844  * @iter:       Hash table iterator
845  *
846  * Note that you must call rhashtable_walk_stop when you are finished
847  * with the walk.
848  *
849  * Returns the next object or NULL when the end of the table is reached.
850  *
851  * Returns -EAGAIN if resize event occurred.  Note that the iterator
852  * will rewind back to the beginning and you may continue to use it.
853  */
854 void *rhashtable_walk_next(struct rhashtable_iter *iter)
855 {
856         struct rhlist_head *list = iter->list;
857         struct rhashtable *ht = iter->ht;
858         struct rhash_head *p = iter->p;
859         bool rhlist = ht->rhlist;
860
861         if (p) {
862                 if (!rhlist || !(list = rcu_dereference(list->next))) {
863                         p = rcu_dereference(p->next);
864                         list = container_of(p, struct rhlist_head, rhead);
865                 }
866                 if (!rht_is_a_nulls(p)) {
867                         iter->skip++;
868                         iter->p = p;
869                         iter->list = list;
870                         return rht_obj(ht, rhlist ? &list->rhead : p);
871                 }
872
873                 /* At the end of this slot, switch to next one and then find
874                  * next entry from that point.
875                  */
876                 iter->skip = 0;
877                 iter->slot++;
878         }
879
880         return __rhashtable_walk_find_next(iter);
881 }
882 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
883
884 /**
885  * rhashtable_walk_peek - Return the next object but don't advance the iterator
886  * @iter:       Hash table iterator
887  *
888  * Returns the next object or NULL when the end of the table is reached.
889  *
890  * Returns -EAGAIN if resize event occurred.  Note that the iterator
891  * will rewind back to the beginning and you may continue to use it.
892  */
893 void *rhashtable_walk_peek(struct rhashtable_iter *iter)
894 {
895         struct rhlist_head *list = iter->list;
896         struct rhashtable *ht = iter->ht;
897         struct rhash_head *p = iter->p;
898
899         if (p)
900                 return rht_obj(ht, ht->rhlist ? &list->rhead : p);
901
902         /* No object found in current iter, find next one in the table. */
903
904         if (iter->skip) {
905                 /* A nonzero skip value points to the next entry in the table
906                  * beyond that last one that was found. Decrement skip so
907                  * we find the current value. __rhashtable_walk_find_next
908                  * will restore the original value of skip assuming that
909                  * the table hasn't changed.
910                  */
911                 iter->skip--;
912         }
913
914         return __rhashtable_walk_find_next(iter);
915 }
916 EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
917
918 /**
919  * rhashtable_walk_stop - Finish a hash table walk
920  * @iter:       Hash table iterator
921  *
922  * Finish a hash table walk.  Does not reset the iterator to the start of the
923  * hash table.
924  */
925 void rhashtable_walk_stop(struct rhashtable_iter *iter)
926         __releases(RCU)
927 {
928         struct rhashtable *ht;
929         struct bucket_table *tbl = iter->walker.tbl;
930
931         if (!tbl)
932                 goto out;
933
934         ht = iter->ht;
935
936         spin_lock(&ht->lock);
937         if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
938                 /* This bucket table is being freed, don't re-link it. */
939                 iter->walker.tbl = NULL;
940         else
941                 list_add(&iter->walker.list, &tbl->walkers);
942         spin_unlock(&ht->lock);
943
944 out:
945         rcu_read_unlock();
946 }
947 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
948
949 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
950 {
951         size_t retsize;
952
953         if (params->nelem_hint)
954                 retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
955                               (unsigned long)params->min_size);
956         else
957                 retsize = max(HASH_DEFAULT_SIZE,
958                               (unsigned long)params->min_size);
959
960         return retsize;
961 }
962
963 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
964 {
965         return jhash2(key, length, seed);
966 }
967
968 /**
969  * rhashtable_init - initialize a new hash table
970  * @ht:         hash table to be initialized
971  * @params:     configuration parameters
972  *
973  * Initializes a new hash table based on the provided configuration
974  * parameters. A table can be configured either with a variable or
975  * fixed length key:
976  *
977  * Configuration Example 1: Fixed length keys
978  * struct test_obj {
979  *      int                     key;
980  *      void *                  my_member;
981  *      struct rhash_head       node;
982  * };
983  *
984  * struct rhashtable_params params = {
985  *      .head_offset = offsetof(struct test_obj, node),
986  *      .key_offset = offsetof(struct test_obj, key),
987  *      .key_len = sizeof(int),
988  *      .hashfn = jhash,
989  * };
990  *
991  * Configuration Example 2: Variable length keys
992  * struct test_obj {
993  *      [...]
994  *      struct rhash_head       node;
995  * };
996  *
997  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
998  * {
999  *      struct test_obj *obj = data;
1000  *
1001  *      return [... hash ...];
1002  * }
1003  *
1004  * struct rhashtable_params params = {
1005  *      .head_offset = offsetof(struct test_obj, node),
1006  *      .hashfn = jhash,
1007  *      .obj_hashfn = my_hash_fn,
1008  * };
1009  */
1010 int rhashtable_init(struct rhashtable *ht,
1011                     const struct rhashtable_params *params)
1012 {
1013         struct bucket_table *tbl;
1014         size_t size;
1015
1016         if ((!params->key_len && !params->obj_hashfn) ||
1017             (params->obj_hashfn && !params->obj_cmpfn))
1018                 return -EINVAL;
1019
1020         memset(ht, 0, sizeof(*ht));
1021         mutex_init(&ht->mutex);
1022         spin_lock_init(&ht->lock);
1023         memcpy(&ht->p, params, sizeof(*params));
1024
1025         if (params->min_size)
1026                 ht->p.min_size = roundup_pow_of_two(params->min_size);
1027
1028         /* Cap total entries at 2^31 to avoid nelems overflow. */
1029         ht->max_elems = 1u << 31;
1030
1031         if (params->max_size) {
1032                 ht->p.max_size = rounddown_pow_of_two(params->max_size);
1033                 if (ht->p.max_size < ht->max_elems / 2)
1034                         ht->max_elems = ht->p.max_size * 2;
1035         }
1036
1037         ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1038
1039         size = rounded_hashtable_size(&ht->p);
1040
1041         ht->key_len = ht->p.key_len;
1042         if (!params->hashfn) {
1043                 ht->p.hashfn = jhash;
1044
1045                 if (!(ht->key_len & (sizeof(u32) - 1))) {
1046                         ht->key_len /= sizeof(u32);
1047                         ht->p.hashfn = rhashtable_jhash2;
1048                 }
1049         }
1050
1051         /*
1052          * This is api initialization and thus we need to guarantee the
1053          * initial rhashtable allocation. Upon failure, retry with the
1054          * smallest possible size with __GFP_NOFAIL semantics.
1055          */
1056         tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1057         if (unlikely(tbl == NULL)) {
1058                 size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1059                 tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
1060         }
1061
1062         atomic_set(&ht->nelems, 0);
1063
1064         RCU_INIT_POINTER(ht->tbl, tbl);
1065
1066         INIT_WORK(&ht->run_work, rht_deferred_worker);
1067
1068         return 0;
1069 }
1070 EXPORT_SYMBOL_GPL(rhashtable_init);
1071
1072 /**
1073  * rhltable_init - initialize a new hash list table
1074  * @hlt:        hash list table to be initialized
1075  * @params:     configuration parameters
1076  *
1077  * Initializes a new hash list table.
1078  *
1079  * See documentation for rhashtable_init.
1080  */
1081 int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1082 {
1083         int err;
1084
1085         err = rhashtable_init(&hlt->ht, params);
1086         hlt->ht.rhlist = true;
1087         return err;
1088 }
1089 EXPORT_SYMBOL_GPL(rhltable_init);
1090
1091 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1092                                 void (*free_fn)(void *ptr, void *arg),
1093                                 void *arg)
1094 {
1095         struct rhlist_head *list;
1096
1097         if (!ht->rhlist) {
1098                 free_fn(rht_obj(ht, obj), arg);
1099                 return;
1100         }
1101
1102         list = container_of(obj, struct rhlist_head, rhead);
1103         do {
1104                 obj = &list->rhead;
1105                 list = rht_dereference(list->next, ht);
1106                 free_fn(rht_obj(ht, obj), arg);
1107         } while (list);
1108 }
1109
1110 /**
1111  * rhashtable_free_and_destroy - free elements and destroy hash table
1112  * @ht:         the hash table to destroy
1113  * @free_fn:    callback to release resources of element
1114  * @arg:        pointer passed to free_fn
1115  *
1116  * Stops an eventual async resize. If defined, invokes free_fn for each
1117  * element to releasal resources. Please note that RCU protected
1118  * readers may still be accessing the elements. Releasing of resources
1119  * must occur in a compatible manner. Then frees the bucket array.
1120  *
1121  * This function will eventually sleep to wait for an async resize
1122  * to complete. The caller is responsible that no further write operations
1123  * occurs in parallel.
1124  */
1125 void rhashtable_free_and_destroy(struct rhashtable *ht,
1126                                  void (*free_fn)(void *ptr, void *arg),
1127                                  void *arg)
1128 {
1129         struct bucket_table *tbl, *next_tbl;
1130         unsigned int i;
1131
1132         cancel_work_sync(&ht->run_work);
1133
1134         mutex_lock(&ht->mutex);
1135         tbl = rht_dereference(ht->tbl, ht);
1136 restart:
1137         if (free_fn) {
1138                 for (i = 0; i < tbl->size; i++) {
1139                         struct rhash_head *pos, *next;
1140
1141                         cond_resched();
1142                         for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
1143                              next = !rht_is_a_nulls(pos) ?
1144                                         rht_dereference(pos->next, ht) : NULL;
1145                              !rht_is_a_nulls(pos);
1146                              pos = next,
1147                              next = !rht_is_a_nulls(pos) ?
1148                                         rht_dereference(pos->next, ht) : NULL)
1149                                 rhashtable_free_one(ht, pos, free_fn, arg);
1150                 }
1151         }
1152
1153         next_tbl = rht_dereference(tbl->future_tbl, ht);
1154         bucket_table_free(tbl);
1155         if (next_tbl) {
1156                 tbl = next_tbl;
1157                 goto restart;
1158         }
1159         mutex_unlock(&ht->mutex);
1160 }
1161 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1162
1163 void rhashtable_destroy(struct rhashtable *ht)
1164 {
1165         return rhashtable_free_and_destroy(ht, NULL, NULL);
1166 }
1167 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1168
1169 struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
1170                                                    unsigned int hash)
1171 {
1172         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1173         unsigned int index = hash & ((1 << tbl->nest) - 1);
1174         unsigned int size = tbl->size >> tbl->nest;
1175         unsigned int subhash = hash;
1176         union nested_table *ntbl;
1177
1178         ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1179         ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1180         subhash >>= tbl->nest;
1181
1182         while (ntbl && size > (1 << shift)) {
1183                 index = subhash & ((1 << shift) - 1);
1184                 ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1185                                                   tbl, hash);
1186                 size >>= shift;
1187                 subhash >>= shift;
1188         }
1189
1190         if (!ntbl)
1191                 return NULL;
1192
1193         return &ntbl[subhash].bucket;
1194
1195 }
1196 EXPORT_SYMBOL_GPL(__rht_bucket_nested);
1197
1198 struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1199                                                  unsigned int hash)
1200 {
1201         static struct rhash_lock_head __rcu *rhnull;
1202
1203         if (!rhnull)
1204                 INIT_RHT_NULLS_HEAD(rhnull);
1205         return __rht_bucket_nested(tbl, hash) ?: &rhnull;
1206 }
1207 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1208
1209 struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1210                                                         struct bucket_table *tbl,
1211                                                         unsigned int hash)
1212 {
1213         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1214         unsigned int index = hash & ((1 << tbl->nest) - 1);
1215         unsigned int size = tbl->size >> tbl->nest;
1216         union nested_table *ntbl;
1217
1218         ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1219         hash >>= tbl->nest;
1220         ntbl = nested_table_alloc(ht, &ntbl[index].table,
1221                                   size <= (1 << shift));
1222
1223         while (ntbl && size > (1 << shift)) {
1224                 index = hash & ((1 << shift) - 1);
1225                 size >>= shift;
1226                 hash >>= shift;
1227                 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1228                                           size <= (1 << shift));
1229         }
1230
1231         if (!ntbl)
1232                 return NULL;
1233
1234         return &ntbl[hash].bucket;
1235
1236 }
1237 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);