inet: frags: refactor ipfrag_init()
[linux-2.6-block.git] / lib / rhashtable.c
CommitLineData
7e1e7763
TG
1/*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
02fd97c3 4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
a5ec68e3 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7e1e7763
TG
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 *
7e1e7763 8 * Code partially derived from nft_hash
02fd97c3
HX
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
7e1e7763
TG
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
07ee0722 17#include <linux/atomic.h>
7e1e7763
TG
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/log2.h>
5beb5c90 21#include <linux/sched.h>
b2d09103 22#include <linux/rculist.h>
7e1e7763
TG
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/mm.h>
87545899 26#include <linux/jhash.h>
7e1e7763
TG
27#include <linux/random.h>
28#include <linux/rhashtable.h>
61d7b097 29#include <linux/err.h>
6d795413 30#include <linux/export.h>
7e1e7763
TG
31
32#define HASH_DEFAULT_SIZE 64UL
c2e213cf 33#define HASH_MIN_SIZE 4U
4cf0b354 34#define BUCKET_LOCKS_PER_CPU 32UL
97defe1e 35
da20420f
HX
36union nested_table {
37 union nested_table __rcu *table;
38 struct rhash_head __rcu *bucket;
39};
40
988dfbd7 41static u32 head_hashfn(struct rhashtable *ht,
8d24c0b4
TG
42 const struct bucket_table *tbl,
43 const struct rhash_head *he)
7e1e7763 44{
02fd97c3 45 return rht_head_hashfn(ht, tbl, he, ht->p);
7e1e7763
TG
46}
47
a03eaec0 48#ifdef CONFIG_PROVE_LOCKING
a03eaec0 49#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
a03eaec0
TG
50
51int lockdep_rht_mutex_is_held(struct rhashtable *ht)
52{
53 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
54}
55EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
56
57int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
58{
02fd97c3 59 spinlock_t *lock = rht_bucket_lock(tbl, hash);
a03eaec0
TG
60
61 return (debug_locks) ? lockdep_is_held(lock) : 1;
62}
63EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
64#else
65#define ASSERT_RHT_MUTEX(HT)
a03eaec0
TG
66#endif
67
da20420f
HX
68static void nested_table_free(union nested_table *ntbl, unsigned int size)
69{
70 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
71 const unsigned int len = 1 << shift;
72 unsigned int i;
73
74 ntbl = rcu_dereference_raw(ntbl->table);
75 if (!ntbl)
76 return;
77
78 if (size > len) {
79 size >>= shift;
80 for (i = 0; i < len; i++)
81 nested_table_free(ntbl + i, size);
82 }
83
84 kfree(ntbl);
85}
86
87static void nested_bucket_table_free(const struct bucket_table *tbl)
88{
89 unsigned int size = tbl->size >> tbl->nest;
90 unsigned int len = 1 << tbl->nest;
91 union nested_table *ntbl;
92 unsigned int i;
93
94 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
95
96 for (i = 0; i < len; i++)
97 nested_table_free(ntbl + i, size);
98
99 kfree(ntbl);
100}
101
97defe1e
TG
102static void bucket_table_free(const struct bucket_table *tbl)
103{
da20420f
HX
104 if (tbl->nest)
105 nested_bucket_table_free(tbl);
106
64e0cd0d 107 free_bucket_spinlocks(tbl->locks);
97defe1e
TG
108 kvfree(tbl);
109}
110
9d901bc0
HX
111static void bucket_table_free_rcu(struct rcu_head *head)
112{
113 bucket_table_free(container_of(head, struct bucket_table, rcu));
114}
115
da20420f
HX
116static union nested_table *nested_table_alloc(struct rhashtable *ht,
117 union nested_table __rcu **prev,
118 unsigned int shifted,
119 unsigned int nhash)
120{
121 union nested_table *ntbl;
122 int i;
123
124 ntbl = rcu_dereference(*prev);
125 if (ntbl)
126 return ntbl;
127
128 ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
129
130 if (ntbl && shifted) {
131 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
132 INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
133 (i << shifted) | nhash);
134 }
135
136 rcu_assign_pointer(*prev, ntbl);
137
138 return ntbl;
139}
140
141static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
142 size_t nbuckets,
143 gfp_t gfp)
144{
145 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
146 struct bucket_table *tbl;
147 size_t size;
148
149 if (nbuckets < (1 << (shift + 1)))
150 return NULL;
151
152 size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
153
154 tbl = kzalloc(size, gfp);
155 if (!tbl)
156 return NULL;
157
158 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
159 0, 0)) {
160 kfree(tbl);
161 return NULL;
162 }
163
164 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
165
166 return tbl;
167}
168
97defe1e 169static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
b9ecfdaa
HX
170 size_t nbuckets,
171 gfp_t gfp)
7e1e7763 172{
eb6d1abf 173 struct bucket_table *tbl = NULL;
64e0cd0d 174 size_t size, max_locks;
f89bd6f8 175 int i;
7e1e7763
TG
176
177 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
12e8fd6f 178 if (gfp != GFP_KERNEL)
b9ecfdaa 179 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
12e8fd6f
MH
180 else
181 tbl = kvzalloc(size, gfp);
da20420f
HX
182
183 size = nbuckets;
184
185 if (tbl == NULL && gfp != GFP_KERNEL) {
186 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
187 nbuckets = 0;
188 }
7e1e7763
TG
189 if (tbl == NULL)
190 return NULL;
191
da20420f 192 tbl->size = size;
7e1e7763 193
64e0cd0d
TH
194 max_locks = size >> 1;
195 if (tbl->nest)
196 max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
197
198 if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
199 ht->p.locks_mul, gfp) < 0) {
97defe1e
TG
200 bucket_table_free(tbl);
201 return NULL;
202 }
7e1e7763 203
eddee5ba
HX
204 INIT_LIST_HEAD(&tbl->walkers);
205
d48ad080 206 tbl->hash_rnd = get_random_u32();
5269b53d 207
f89bd6f8
TG
208 for (i = 0; i < nbuckets; i++)
209 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
210
97defe1e 211 return tbl;
7e1e7763
TG
212}
213
b824478b
HX
214static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
215 struct bucket_table *tbl)
216{
217 struct bucket_table *new_tbl;
218
219 do {
220 new_tbl = tbl;
221 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
222 } while (tbl);
223
224 return new_tbl;
225}
226
299e5c32 227static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
a5ec68e3 228{
aa34a6cb 229 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
b824478b
HX
230 struct bucket_table *new_tbl = rhashtable_last_table(ht,
231 rht_dereference_rcu(old_tbl->future_tbl, ht));
da20420f
HX
232 struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
233 int err = -EAGAIN;
aa34a6cb
HX
234 struct rhash_head *head, *next, *entry;
235 spinlock_t *new_bucket_lock;
299e5c32 236 unsigned int new_hash;
aa34a6cb 237
da20420f
HX
238 if (new_tbl->nest)
239 goto out;
240
241 err = -ENOENT;
242
aa34a6cb
HX
243 rht_for_each(entry, old_tbl, old_hash) {
244 err = 0;
245 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
246
247 if (rht_is_a_nulls(next))
248 break;
a5ec68e3 249
aa34a6cb
HX
250 pprev = &entry->next;
251 }
a5ec68e3 252
aa34a6cb
HX
253 if (err)
254 goto out;
97defe1e 255
aa34a6cb 256 new_hash = head_hashfn(ht, new_tbl, entry);
7e1e7763 257
02fd97c3 258 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
7e1e7763 259
8f2484bd 260 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
aa34a6cb
HX
261 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
262 new_tbl, new_hash);
97defe1e 263
7def0f95 264 RCU_INIT_POINTER(entry->next, head);
a5ec68e3 265
aa34a6cb
HX
266 rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
267 spin_unlock(new_bucket_lock);
97defe1e 268
aa34a6cb 269 rcu_assign_pointer(*pprev, next);
7e1e7763 270
aa34a6cb
HX
271out:
272 return err;
273}
97defe1e 274
da20420f 275static int rhashtable_rehash_chain(struct rhashtable *ht,
299e5c32 276 unsigned int old_hash)
aa34a6cb
HX
277{
278 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
279 spinlock_t *old_bucket_lock;
da20420f 280 int err;
aa34a6cb 281
02fd97c3 282 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
a5ec68e3 283
aa34a6cb 284 spin_lock_bh(old_bucket_lock);
da20420f 285 while (!(err = rhashtable_rehash_one(ht, old_hash)))
aa34a6cb 286 ;
da20420f
HX
287
288 if (err == -ENOENT) {
289 old_tbl->rehash++;
290 err = 0;
291 }
aa34a6cb 292 spin_unlock_bh(old_bucket_lock);
da20420f
HX
293
294 return err;
97defe1e
TG
295}
296
b824478b
HX
297static int rhashtable_rehash_attach(struct rhashtable *ht,
298 struct bucket_table *old_tbl,
299 struct bucket_table *new_tbl)
97defe1e 300{
b824478b
HX
301 /* Protect future_tbl using the first bucket lock. */
302 spin_lock_bh(old_tbl->locks);
303
304 /* Did somebody beat us to it? */
305 if (rcu_access_pointer(old_tbl->future_tbl)) {
306 spin_unlock_bh(old_tbl->locks);
307 return -EEXIST;
308 }
7cd10db8 309
aa34a6cb
HX
310 /* Make insertions go into the new, empty table right away. Deletions
311 * and lookups will be attempted in both tables until we synchronize.
aa34a6cb 312 */
c4db8848 313 rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
aa34a6cb 314
b824478b
HX
315 spin_unlock_bh(old_tbl->locks);
316
317 return 0;
318}
319
320static int rhashtable_rehash_table(struct rhashtable *ht)
321{
322 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
323 struct bucket_table *new_tbl;
324 struct rhashtable_walker *walker;
299e5c32 325 unsigned int old_hash;
da20420f 326 int err;
b824478b
HX
327
328 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
329 if (!new_tbl)
330 return 0;
331
da20420f
HX
332 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
333 err = rhashtable_rehash_chain(ht, old_hash);
334 if (err)
335 return err;
336 }
aa34a6cb
HX
337
338 /* Publish the new table pointer. */
339 rcu_assign_pointer(ht->tbl, new_tbl);
340
ba7c95ea 341 spin_lock(&ht->lock);
eddee5ba
HX
342 list_for_each_entry(walker, &old_tbl->walkers, list)
343 walker->tbl = NULL;
ba7c95ea 344 spin_unlock(&ht->lock);
eddee5ba 345
aa34a6cb
HX
346 /* Wait for readers. All new readers will see the new
347 * table, and thus no references to the old table will
348 * remain.
349 */
9d901bc0 350 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
b824478b
HX
351
352 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
7e1e7763
TG
353}
354
da20420f
HX
355static int rhashtable_rehash_alloc(struct rhashtable *ht,
356 struct bucket_table *old_tbl,
357 unsigned int size)
7e1e7763 358{
da20420f 359 struct bucket_table *new_tbl;
b824478b 360 int err;
7e1e7763
TG
361
362 ASSERT_RHT_MUTEX(ht);
363
da20420f 364 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
7e1e7763
TG
365 if (new_tbl == NULL)
366 return -ENOMEM;
367
b824478b
HX
368 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
369 if (err)
370 bucket_table_free(new_tbl);
371
372 return err;
7e1e7763 373}
7e1e7763
TG
374
375/**
376 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
377 * @ht: the hash table to shrink
7e1e7763 378 *
18093d1c
HX
379 * This function shrinks the hash table to fit, i.e., the smallest
380 * size would not cause it to expand right away automatically.
7e1e7763 381 *
97defe1e
TG
382 * The caller must ensure that no concurrent resizing occurs by holding
383 * ht->mutex.
384 *
7e1e7763
TG
385 * The caller must ensure that no concurrent table mutations take place.
386 * It is however valid to have concurrent lookups if they are RCU protected.
97defe1e
TG
387 *
388 * It is valid to have concurrent insertions and deletions protected by per
389 * bucket locks or concurrent RCU protected lookups and traversals.
7e1e7763 390 */
b824478b 391static int rhashtable_shrink(struct rhashtable *ht)
7e1e7763 392{
da20420f 393 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
12311959
VN
394 unsigned int nelems = atomic_read(&ht->nelems);
395 unsigned int size = 0;
7e1e7763 396
12311959
VN
397 if (nelems)
398 size = roundup_pow_of_two(nelems * 3 / 2);
18093d1c
HX
399 if (size < ht->p.min_size)
400 size = ht->p.min_size;
401
402 if (old_tbl->size <= size)
403 return 0;
404
b824478b
HX
405 if (rht_dereference(old_tbl->future_tbl, ht))
406 return -EEXIST;
407
da20420f 408 return rhashtable_rehash_alloc(ht, old_tbl, size);
7e1e7763 409}
7e1e7763 410
97defe1e
TG
411static void rht_deferred_worker(struct work_struct *work)
412{
413 struct rhashtable *ht;
414 struct bucket_table *tbl;
b824478b 415 int err = 0;
97defe1e 416
57699a40 417 ht = container_of(work, struct rhashtable, run_work);
97defe1e 418 mutex_lock(&ht->mutex);
28134a53 419
97defe1e 420 tbl = rht_dereference(ht->tbl, ht);
b824478b 421 tbl = rhashtable_last_table(ht, tbl);
97defe1e 422
a5b6846f 423 if (rht_grow_above_75(ht, tbl))
da20420f 424 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
b5e2c150 425 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
da20420f
HX
426 err = rhashtable_shrink(ht);
427 else if (tbl->nest)
428 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
b824478b 429
da20420f
HX
430 if (!err)
431 err = rhashtable_rehash_table(ht);
b824478b 432
97defe1e 433 mutex_unlock(&ht->mutex);
b824478b
HX
434
435 if (err)
436 schedule_work(&ht->run_work);
97defe1e
TG
437}
438
ca26893f
HX
439static int rhashtable_insert_rehash(struct rhashtable *ht,
440 struct bucket_table *tbl)
ccd57b1b
HX
441{
442 struct bucket_table *old_tbl;
443 struct bucket_table *new_tbl;
ccd57b1b
HX
444 unsigned int size;
445 int err;
446
447 old_tbl = rht_dereference_rcu(ht->tbl, ht);
ccd57b1b
HX
448
449 size = tbl->size;
450
3cf92222
HX
451 err = -EBUSY;
452
ccd57b1b
HX
453 if (rht_grow_above_75(ht, tbl))
454 size *= 2;
a87b9ebf
TG
455 /* Do not schedule more than one rehash */
456 else if (old_tbl != tbl)
3cf92222
HX
457 goto fail;
458
459 err = -ENOMEM;
ccd57b1b
HX
460
461 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
3cf92222
HX
462 if (new_tbl == NULL)
463 goto fail;
ccd57b1b
HX
464
465 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
466 if (err) {
467 bucket_table_free(new_tbl);
468 if (err == -EEXIST)
469 err = 0;
470 } else
471 schedule_work(&ht->run_work);
472
473 return err;
3cf92222
HX
474
475fail:
476 /* Do not fail the insert if someone else did a rehash. */
477 if (likely(rcu_dereference_raw(tbl->future_tbl)))
478 return 0;
479
480 /* Schedule async rehash to retry allocation in process context. */
481 if (err == -ENOMEM)
482 schedule_work(&ht->run_work);
483
484 return err;
ccd57b1b 485}
ccd57b1b 486
ca26893f
HX
487static void *rhashtable_lookup_one(struct rhashtable *ht,
488 struct bucket_table *tbl, unsigned int hash,
489 const void *key, struct rhash_head *obj)
02fd97c3 490{
ca26893f
HX
491 struct rhashtable_compare_arg arg = {
492 .ht = ht,
493 .key = key,
494 };
495 struct rhash_head __rcu **pprev;
02fd97c3 496 struct rhash_head *head;
ca26893f 497 int elasticity;
02fd97c3 498
5f8ddeab 499 elasticity = RHT_ELASTICITY;
da20420f
HX
500 pprev = rht_bucket_var(tbl, hash);
501 rht_for_each_continue(head, *pprev, tbl, hash) {
ca26893f
HX
502 struct rhlist_head *list;
503 struct rhlist_head *plist;
504
505 elasticity--;
506 if (!key ||
507 (ht->p.obj_cmpfn ?
508 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
d3dcf8eb
PB
509 rhashtable_compare(&arg, rht_obj(ht, head)))) {
510 pprev = &head->next;
ca26893f 511 continue;
d3dcf8eb 512 }
ca26893f
HX
513
514 if (!ht->rhlist)
515 return rht_obj(ht, head);
516
517 list = container_of(obj, struct rhlist_head, rhead);
518 plist = container_of(head, struct rhlist_head, rhead);
519
520 RCU_INIT_POINTER(list->next, plist);
521 head = rht_dereference_bucket(head->next, tbl, hash);
522 RCU_INIT_POINTER(list->rhead.next, head);
523 rcu_assign_pointer(*pprev, obj);
524
525 return NULL;
5ca8cc5b 526 }
02fd97c3 527
ca26893f
HX
528 if (elasticity <= 0)
529 return ERR_PTR(-EAGAIN);
530
531 return ERR_PTR(-ENOENT);
532}
533
534static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
535 struct bucket_table *tbl,
536 unsigned int hash,
537 struct rhash_head *obj,
538 void *data)
539{
da20420f 540 struct rhash_head __rcu **pprev;
ca26893f
HX
541 struct bucket_table *new_tbl;
542 struct rhash_head *head;
543
544 if (!IS_ERR_OR_NULL(data))
545 return ERR_PTR(-EEXIST);
07ee0722 546
ca26893f
HX
547 if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
548 return ERR_CAST(data);
ccd57b1b 549
ca26893f
HX
550 new_tbl = rcu_dereference(tbl->future_tbl);
551 if (new_tbl)
552 return new_tbl;
553
554 if (PTR_ERR(data) != -ENOENT)
555 return ERR_CAST(data);
556
557 if (unlikely(rht_grow_above_max(ht, tbl)))
558 return ERR_PTR(-E2BIG);
559
560 if (unlikely(rht_grow_above_100(ht, tbl)))
561 return ERR_PTR(-EAGAIN);
02fd97c3 562
da20420f
HX
563 pprev = rht_bucket_insert(ht, tbl, hash);
564 if (!pprev)
565 return ERR_PTR(-ENOMEM);
566
567 head = rht_dereference_bucket(*pprev, tbl, hash);
02fd97c3
HX
568
569 RCU_INIT_POINTER(obj->next, head);
ca26893f
HX
570 if (ht->rhlist) {
571 struct rhlist_head *list;
572
573 list = container_of(obj, struct rhlist_head, rhead);
574 RCU_INIT_POINTER(list->next, NULL);
575 }
02fd97c3 576
da20420f 577 rcu_assign_pointer(*pprev, obj);
02fd97c3
HX
578
579 atomic_inc(&ht->nelems);
ca26893f
HX
580 if (rht_grow_above_75(ht, tbl))
581 schedule_work(&ht->run_work);
02fd97c3 582
ca26893f
HX
583 return NULL;
584}
02fd97c3 585
ca26893f
HX
586static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
587 struct rhash_head *obj)
588{
589 struct bucket_table *new_tbl;
590 struct bucket_table *tbl;
591 unsigned int hash;
592 spinlock_t *lock;
593 void *data;
594
595 tbl = rcu_dereference(ht->tbl);
596
597 /* All insertions must grab the oldest table containing
598 * the hashed bucket that is yet to be rehashed.
599 */
600 for (;;) {
601 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
602 lock = rht_bucket_lock(tbl, hash);
603 spin_lock_bh(lock);
604
605 if (tbl->rehash <= hash)
606 break;
607
608 spin_unlock_bh(lock);
609 tbl = rcu_dereference(tbl->future_tbl);
610 }
611
612 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
613 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
614 if (PTR_ERR(new_tbl) != -EEXIST)
615 data = ERR_CAST(new_tbl);
616
617 while (!IS_ERR_OR_NULL(new_tbl)) {
618 tbl = new_tbl;
619 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
620 spin_lock_nested(rht_bucket_lock(tbl, hash),
621 SINGLE_DEPTH_NESTING);
622
623 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
624 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
625 if (PTR_ERR(new_tbl) != -EEXIST)
626 data = ERR_CAST(new_tbl);
627
628 spin_unlock(rht_bucket_lock(tbl, hash));
629 }
630
631 spin_unlock_bh(lock);
632
633 if (PTR_ERR(data) == -EAGAIN)
634 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
635 -EAGAIN);
636
637 return data;
638}
639
640void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
641 struct rhash_head *obj)
642{
643 void *data;
644
645 do {
646 rcu_read_lock();
647 data = rhashtable_try_insert(ht, key, obj);
648 rcu_read_unlock();
649 } while (PTR_ERR(data) == -EAGAIN);
650
651 return data;
02fd97c3
HX
652}
653EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
654
f2dba9c6 655/**
246779dd 656 * rhashtable_walk_enter - Initialise an iterator
f2dba9c6
HX
657 * @ht: Table to walk over
658 * @iter: Hash table Iterator
659 *
660 * This function prepares a hash table walk.
661 *
662 * Note that if you restart a walk after rhashtable_walk_stop you
663 * may see the same object twice. Also, you may miss objects if
664 * there are removals in between rhashtable_walk_stop and the next
665 * call to rhashtable_walk_start.
666 *
667 * For a completely stable walk you should construct your own data
668 * structure outside the hash table.
669 *
670 * This function may sleep so you must not call it from interrupt
671 * context or with spin locks held.
672 *
246779dd 673 * You must call rhashtable_walk_exit after this function returns.
f2dba9c6 674 */
246779dd 675void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
f2dba9c6
HX
676{
677 iter->ht = ht;
678 iter->p = NULL;
679 iter->slot = 0;
680 iter->skip = 0;
2db54b47 681 iter->end_of_table = 0;
f2dba9c6 682
c6ff5268 683 spin_lock(&ht->lock);
246779dd 684 iter->walker.tbl =
179ccc0a 685 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
246779dd 686 list_add(&iter->walker.list, &iter->walker.tbl->walkers);
c6ff5268 687 spin_unlock(&ht->lock);
f2dba9c6 688}
246779dd 689EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
f2dba9c6
HX
690
691/**
692 * rhashtable_walk_exit - Free an iterator
693 * @iter: Hash table Iterator
694 *
695 * This function frees resources allocated by rhashtable_walk_init.
696 */
697void rhashtable_walk_exit(struct rhashtable_iter *iter)
698{
c6ff5268 699 spin_lock(&iter->ht->lock);
246779dd
HX
700 if (iter->walker.tbl)
701 list_del(&iter->walker.list);
c6ff5268 702 spin_unlock(&iter->ht->lock);
f2dba9c6
HX
703}
704EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
705
706/**
97a6ec4a 707 * rhashtable_walk_start_check - Start a hash table walk
f2dba9c6
HX
708 * @iter: Hash table iterator
709 *
0647169c
AG
710 * Start a hash table walk at the current iterator position. Note that we take
711 * the RCU lock in all cases including when we return an error. So you must
712 * always call rhashtable_walk_stop to clean up.
f2dba9c6
HX
713 *
714 * Returns zero if successful.
715 *
716 * Returns -EAGAIN if resize event occured. Note that the iterator
717 * will rewind back to the beginning and you may use it immediately
718 * by calling rhashtable_walk_next.
97a6ec4a
TH
719 *
720 * rhashtable_walk_start is defined as an inline variant that returns
721 * void. This is preferred in cases where the caller would ignore
722 * resize events and always continue.
f2dba9c6 723 */
97a6ec4a 724int rhashtable_walk_start_check(struct rhashtable_iter *iter)
db4374f4 725 __acquires(RCU)
f2dba9c6 726{
eddee5ba
HX
727 struct rhashtable *ht = iter->ht;
728
c6ff5268 729 rcu_read_lock();
eddee5ba 730
c6ff5268 731 spin_lock(&ht->lock);
246779dd
HX
732 if (iter->walker.tbl)
733 list_del(&iter->walker.list);
c6ff5268 734 spin_unlock(&ht->lock);
eddee5ba 735
2db54b47 736 if (!iter->walker.tbl && !iter->end_of_table) {
246779dd 737 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
f2dba9c6
HX
738 return -EAGAIN;
739 }
740
741 return 0;
742}
97a6ec4a 743EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
f2dba9c6
HX
744
745/**
2db54b47
TH
746 * __rhashtable_walk_find_next - Find the next element in a table (or the first
747 * one in case of a new walk).
748 *
f2dba9c6
HX
749 * @iter: Hash table iterator
750 *
2db54b47 751 * Returns the found object or NULL when the end of the table is reached.
f2dba9c6 752 *
2db54b47 753 * Returns -EAGAIN if resize event occurred.
f2dba9c6 754 */
2db54b47 755static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
f2dba9c6 756{
246779dd 757 struct bucket_table *tbl = iter->walker.tbl;
ca26893f 758 struct rhlist_head *list = iter->list;
f2dba9c6
HX
759 struct rhashtable *ht = iter->ht;
760 struct rhash_head *p = iter->p;
ca26893f 761 bool rhlist = ht->rhlist;
f2dba9c6 762
2db54b47
TH
763 if (!tbl)
764 return NULL;
f2dba9c6
HX
765
766 for (; iter->slot < tbl->size; iter->slot++) {
767 int skip = iter->skip;
768
769 rht_for_each_rcu(p, tbl, iter->slot) {
ca26893f
HX
770 if (rhlist) {
771 list = container_of(p, struct rhlist_head,
772 rhead);
773 do {
774 if (!skip)
775 goto next;
776 skip--;
777 list = rcu_dereference(list->next);
778 } while (list);
779
780 continue;
781 }
f2dba9c6
HX
782 if (!skip)
783 break;
784 skip--;
785 }
786
787next:
788 if (!rht_is_a_nulls(p)) {
789 iter->skip++;
790 iter->p = p;
ca26893f
HX
791 iter->list = list;
792 return rht_obj(ht, rhlist ? &list->rhead : p);
f2dba9c6
HX
793 }
794
795 iter->skip = 0;
796 }
797
142b942a
PS
798 iter->p = NULL;
799
d88252f9
HX
800 /* Ensure we see any new tables. */
801 smp_rmb();
802
246779dd
HX
803 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
804 if (iter->walker.tbl) {
f2dba9c6
HX
805 iter->slot = 0;
806 iter->skip = 0;
f2dba9c6 807 return ERR_PTR(-EAGAIN);
2db54b47
TH
808 } else {
809 iter->end_of_table = true;
f2dba9c6
HX
810 }
811
c936a79f 812 return NULL;
f2dba9c6 813}
2db54b47
TH
814
815/**
816 * rhashtable_walk_next - Return the next object and advance the iterator
817 * @iter: Hash table iterator
818 *
819 * Note that you must call rhashtable_walk_stop when you are finished
820 * with the walk.
821 *
822 * Returns the next object or NULL when the end of the table is reached.
823 *
824 * Returns -EAGAIN if resize event occurred. Note that the iterator
825 * will rewind back to the beginning and you may continue to use it.
826 */
827void *rhashtable_walk_next(struct rhashtable_iter *iter)
828{
829 struct rhlist_head *list = iter->list;
830 struct rhashtable *ht = iter->ht;
831 struct rhash_head *p = iter->p;
832 bool rhlist = ht->rhlist;
833
834 if (p) {
835 if (!rhlist || !(list = rcu_dereference(list->next))) {
836 p = rcu_dereference(p->next);
837 list = container_of(p, struct rhlist_head, rhead);
838 }
839 if (!rht_is_a_nulls(p)) {
840 iter->skip++;
841 iter->p = p;
842 iter->list = list;
843 return rht_obj(ht, rhlist ? &list->rhead : p);
844 }
845
846 /* At the end of this slot, switch to next one and then find
847 * next entry from that point.
848 */
849 iter->skip = 0;
850 iter->slot++;
851 }
852
853 return __rhashtable_walk_find_next(iter);
854}
f2dba9c6
HX
855EXPORT_SYMBOL_GPL(rhashtable_walk_next);
856
2db54b47
TH
857/**
858 * rhashtable_walk_peek - Return the next object but don't advance the iterator
859 * @iter: Hash table iterator
860 *
861 * Returns the next object or NULL when the end of the table is reached.
862 *
863 * Returns -EAGAIN if resize event occurred. Note that the iterator
864 * will rewind back to the beginning and you may continue to use it.
865 */
866void *rhashtable_walk_peek(struct rhashtable_iter *iter)
867{
868 struct rhlist_head *list = iter->list;
869 struct rhashtable *ht = iter->ht;
870 struct rhash_head *p = iter->p;
871
872 if (p)
873 return rht_obj(ht, ht->rhlist ? &list->rhead : p);
874
875 /* No object found in current iter, find next one in the table. */
876
877 if (iter->skip) {
878 /* A nonzero skip value points to the next entry in the table
879 * beyond that last one that was found. Decrement skip so
880 * we find the current value. __rhashtable_walk_find_next
881 * will restore the original value of skip assuming that
882 * the table hasn't changed.
883 */
884 iter->skip--;
885 }
886
887 return __rhashtable_walk_find_next(iter);
888}
889EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
890
f2dba9c6
HX
891/**
892 * rhashtable_walk_stop - Finish a hash table walk
893 * @iter: Hash table iterator
894 *
0647169c
AG
895 * Finish a hash table walk. Does not reset the iterator to the start of the
896 * hash table.
f2dba9c6
HX
897 */
898void rhashtable_walk_stop(struct rhashtable_iter *iter)
db4374f4 899 __releases(RCU)
f2dba9c6 900{
eddee5ba 901 struct rhashtable *ht;
246779dd 902 struct bucket_table *tbl = iter->walker.tbl;
eddee5ba 903
eddee5ba 904 if (!tbl)
963ecbd4 905 goto out;
eddee5ba
HX
906
907 ht = iter->ht;
908
ba7c95ea 909 spin_lock(&ht->lock);
c4db8848 910 if (tbl->rehash < tbl->size)
246779dd 911 list_add(&iter->walker.list, &tbl->walkers);
eddee5ba 912 else
246779dd 913 iter->walker.tbl = NULL;
ba7c95ea 914 spin_unlock(&ht->lock);
eddee5ba 915
f2dba9c6 916 iter->p = NULL;
963ecbd4
HX
917
918out:
919 rcu_read_unlock();
f2dba9c6
HX
920}
921EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
922
488fb86e 923static size_t rounded_hashtable_size(const struct rhashtable_params *params)
7e1e7763 924{
94000176 925 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
e2e21c1c 926 (unsigned long)params->min_size);
7e1e7763
TG
927}
928
31ccde2d
HX
929static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
930{
931 return jhash2(key, length, seed);
932}
933
7e1e7763
TG
934/**
935 * rhashtable_init - initialize a new hash table
936 * @ht: hash table to be initialized
937 * @params: configuration parameters
938 *
939 * Initializes a new hash table based on the provided configuration
940 * parameters. A table can be configured either with a variable or
941 * fixed length key:
942 *
943 * Configuration Example 1: Fixed length keys
944 * struct test_obj {
945 * int key;
946 * void * my_member;
947 * struct rhash_head node;
948 * };
949 *
950 * struct rhashtable_params params = {
951 * .head_offset = offsetof(struct test_obj, node),
952 * .key_offset = offsetof(struct test_obj, key),
953 * .key_len = sizeof(int),
87545899 954 * .hashfn = jhash,
f89bd6f8 955 * .nulls_base = (1U << RHT_BASE_SHIFT),
7e1e7763
TG
956 * };
957 *
958 * Configuration Example 2: Variable length keys
959 * struct test_obj {
960 * [...]
961 * struct rhash_head node;
962 * };
963 *
49f7b33e 964 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
7e1e7763
TG
965 * {
966 * struct test_obj *obj = data;
967 *
968 * return [... hash ...];
969 * }
970 *
971 * struct rhashtable_params params = {
972 * .head_offset = offsetof(struct test_obj, node),
87545899 973 * .hashfn = jhash,
7e1e7763 974 * .obj_hashfn = my_hash_fn,
7e1e7763
TG
975 * };
976 */
488fb86e
HX
977int rhashtable_init(struct rhashtable *ht,
978 const struct rhashtable_params *params)
7e1e7763
TG
979{
980 struct bucket_table *tbl;
981 size_t size;
982
983 size = HASH_DEFAULT_SIZE;
984
31ccde2d 985 if ((!params->key_len && !params->obj_hashfn) ||
02fd97c3 986 (params->obj_hashfn && !params->obj_cmpfn))
7e1e7763
TG
987 return -EINVAL;
988
f89bd6f8
TG
989 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
990 return -EINVAL;
991
97defe1e
TG
992 memset(ht, 0, sizeof(*ht));
993 mutex_init(&ht->mutex);
ba7c95ea 994 spin_lock_init(&ht->lock);
97defe1e
TG
995 memcpy(&ht->p, params, sizeof(*params));
996
a998f712
TG
997 if (params->min_size)
998 ht->p.min_size = roundup_pow_of_two(params->min_size);
999
6d684e54
HX
1000 /* Cap total entries at 2^31 to avoid nelems overflow. */
1001 ht->max_elems = 1u << 31;
2d2ab658
HX
1002
1003 if (params->max_size) {
1004 ht->p.max_size = rounddown_pow_of_two(params->max_size);
1005 if (ht->p.max_size < ht->max_elems / 2)
1006 ht->max_elems = ht->p.max_size * 2;
1007 }
6d684e54 1008
48e75b43 1009 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
a998f712 1010
3a324606
HX
1011 if (params->nelem_hint)
1012 size = rounded_hashtable_size(&ht->p);
1013
97defe1e
TG
1014 if (params->locks_mul)
1015 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1016 else
1017 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1018
31ccde2d
HX
1019 ht->key_len = ht->p.key_len;
1020 if (!params->hashfn) {
1021 ht->p.hashfn = jhash;
1022
1023 if (!(ht->key_len & (sizeof(u32) - 1))) {
1024 ht->key_len /= sizeof(u32);
1025 ht->p.hashfn = rhashtable_jhash2;
1026 }
1027 }
1028
b9ecfdaa 1029 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
7e1e7763
TG
1030 if (tbl == NULL)
1031 return -ENOMEM;
1032
545a148e 1033 atomic_set(&ht->nelems, 0);
a5b6846f 1034
7e1e7763
TG
1035 RCU_INIT_POINTER(ht->tbl, tbl);
1036
4c4b52d9 1037 INIT_WORK(&ht->run_work, rht_deferred_worker);
97defe1e 1038
7e1e7763
TG
1039 return 0;
1040}
1041EXPORT_SYMBOL_GPL(rhashtable_init);
1042
ca26893f
HX
1043/**
1044 * rhltable_init - initialize a new hash list table
1045 * @hlt: hash list table to be initialized
1046 * @params: configuration parameters
1047 *
1048 * Initializes a new hash list table.
1049 *
1050 * See documentation for rhashtable_init.
1051 */
1052int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1053{
1054 int err;
1055
1056 /* No rhlist NULLs marking for now. */
1057 if (params->nulls_base)
1058 return -EINVAL;
1059
1060 err = rhashtable_init(&hlt->ht, params);
1061 hlt->ht.rhlist = true;
1062 return err;
1063}
1064EXPORT_SYMBOL_GPL(rhltable_init);
1065
1066static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1067 void (*free_fn)(void *ptr, void *arg),
1068 void *arg)
1069{
1070 struct rhlist_head *list;
1071
1072 if (!ht->rhlist) {
1073 free_fn(rht_obj(ht, obj), arg);
1074 return;
1075 }
1076
1077 list = container_of(obj, struct rhlist_head, rhead);
1078 do {
1079 obj = &list->rhead;
1080 list = rht_dereference(list->next, ht);
1081 free_fn(rht_obj(ht, obj), arg);
1082 } while (list);
1083}
1084
7e1e7763 1085/**
6b6f302c 1086 * rhashtable_free_and_destroy - free elements and destroy hash table
7e1e7763 1087 * @ht: the hash table to destroy
6b6f302c
TG
1088 * @free_fn: callback to release resources of element
1089 * @arg: pointer passed to free_fn
7e1e7763 1090 *
6b6f302c
TG
1091 * Stops an eventual async resize. If defined, invokes free_fn for each
1092 * element to releasal resources. Please note that RCU protected
1093 * readers may still be accessing the elements. Releasing of resources
1094 * must occur in a compatible manner. Then frees the bucket array.
1095 *
1096 * This function will eventually sleep to wait for an async resize
1097 * to complete. The caller is responsible that no further write operations
1098 * occurs in parallel.
7e1e7763 1099 */
6b6f302c
TG
1100void rhashtable_free_and_destroy(struct rhashtable *ht,
1101 void (*free_fn)(void *ptr, void *arg),
1102 void *arg)
7e1e7763 1103{
da20420f 1104 struct bucket_table *tbl;
6b6f302c 1105 unsigned int i;
97defe1e 1106
4c4b52d9 1107 cancel_work_sync(&ht->run_work);
97defe1e 1108
57699a40 1109 mutex_lock(&ht->mutex);
6b6f302c
TG
1110 tbl = rht_dereference(ht->tbl, ht);
1111 if (free_fn) {
1112 for (i = 0; i < tbl->size; i++) {
1113 struct rhash_head *pos, *next;
1114
da20420f 1115 for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
6b6f302c
TG
1116 next = !rht_is_a_nulls(pos) ?
1117 rht_dereference(pos->next, ht) : NULL;
1118 !rht_is_a_nulls(pos);
1119 pos = next,
1120 next = !rht_is_a_nulls(pos) ?
1121 rht_dereference(pos->next, ht) : NULL)
ca26893f 1122 rhashtable_free_one(ht, pos, free_fn, arg);
6b6f302c
TG
1123 }
1124 }
1125
1126 bucket_table_free(tbl);
97defe1e 1127 mutex_unlock(&ht->mutex);
7e1e7763 1128}
6b6f302c
TG
1129EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1130
1131void rhashtable_destroy(struct rhashtable *ht)
1132{
1133 return rhashtable_free_and_destroy(ht, NULL, NULL);
1134}
7e1e7763 1135EXPORT_SYMBOL_GPL(rhashtable_destroy);
da20420f
HX
1136
1137struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1138 unsigned int hash)
1139{
1140 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1141 static struct rhash_head __rcu *rhnull =
1142 (struct rhash_head __rcu *)NULLS_MARKER(0);
1143 unsigned int index = hash & ((1 << tbl->nest) - 1);
1144 unsigned int size = tbl->size >> tbl->nest;
1145 unsigned int subhash = hash;
1146 union nested_table *ntbl;
1147
1148 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
c4d2603d 1149 ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
da20420f
HX
1150 subhash >>= tbl->nest;
1151
1152 while (ntbl && size > (1 << shift)) {
1153 index = subhash & ((1 << shift) - 1);
c4d2603d
HX
1154 ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1155 tbl, hash);
da20420f
HX
1156 size >>= shift;
1157 subhash >>= shift;
1158 }
1159
1160 if (!ntbl)
1161 return &rhnull;
1162
1163 return &ntbl[subhash].bucket;
1164
1165}
1166EXPORT_SYMBOL_GPL(rht_bucket_nested);
1167
1168struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1169 struct bucket_table *tbl,
1170 unsigned int hash)
1171{
1172 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1173 unsigned int index = hash & ((1 << tbl->nest) - 1);
1174 unsigned int size = tbl->size >> tbl->nest;
1175 union nested_table *ntbl;
1176 unsigned int shifted;
1177 unsigned int nhash;
1178
1179 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1180 hash >>= tbl->nest;
1181 nhash = index;
1182 shifted = tbl->nest;
1183 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1184 size <= (1 << shift) ? shifted : 0, nhash);
1185
1186 while (ntbl && size > (1 << shift)) {
1187 index = hash & ((1 << shift) - 1);
1188 size >>= shift;
1189 hash >>= shift;
1190 nhash |= index << shifted;
1191 shifted += shift;
1192 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1193 size <= (1 << shift) ? shifted : 0,
1194 nhash);
1195 }
1196
1197 if (!ntbl)
1198 return NULL;
1199
1200 return &ntbl[hash].bucket;
1201
1202}
1203EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);