microblaze: use asm-generic for seccomp.h
[linux-2.6-block.git] / lib / rhashtable.c
... / ...
CommitLineData
1/*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 *
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/log2.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include <linux/mm.h>
24#include <linux/jhash.h>
25#include <linux/random.h>
26#include <linux/rhashtable.h>
27#include <linux/err.h>
28
29#define HASH_DEFAULT_SIZE 64UL
30#define HASH_MIN_SIZE 4U
31#define BUCKET_LOCKS_PER_CPU 128UL
32
33static u32 head_hashfn(struct rhashtable *ht,
34 const struct bucket_table *tbl,
35 const struct rhash_head *he)
36{
37 return rht_head_hashfn(ht, tbl, he, ht->p);
38}
39
40#ifdef CONFIG_PROVE_LOCKING
41#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
42
43int lockdep_rht_mutex_is_held(struct rhashtable *ht)
44{
45 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
46}
47EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
48
49int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
50{
51 spinlock_t *lock = rht_bucket_lock(tbl, hash);
52
53 return (debug_locks) ? lockdep_is_held(lock) : 1;
54}
55EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
56#else
57#define ASSERT_RHT_MUTEX(HT)
58#endif
59
60
61static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
62 gfp_t gfp)
63{
64 unsigned int i, size;
65#if defined(CONFIG_PROVE_LOCKING)
66 unsigned int nr_pcpus = 2;
67#else
68 unsigned int nr_pcpus = num_possible_cpus();
69#endif
70
71 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
72 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
73
74 /* Never allocate more than 0.5 locks per bucket */
75 size = min_t(unsigned int, size, tbl->size >> 1);
76
77 if (sizeof(spinlock_t) != 0) {
78#ifdef CONFIG_NUMA
79 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
80 gfp == GFP_KERNEL)
81 tbl->locks = vmalloc(size * sizeof(spinlock_t));
82 else
83#endif
84 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
85 gfp);
86 if (!tbl->locks)
87 return -ENOMEM;
88 for (i = 0; i < size; i++)
89 spin_lock_init(&tbl->locks[i]);
90 }
91 tbl->locks_mask = size - 1;
92
93 return 0;
94}
95
96static void bucket_table_free(const struct bucket_table *tbl)
97{
98 if (tbl)
99 kvfree(tbl->locks);
100
101 kvfree(tbl);
102}
103
104static void bucket_table_free_rcu(struct rcu_head *head)
105{
106 bucket_table_free(container_of(head, struct bucket_table, rcu));
107}
108
109static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
110 size_t nbuckets,
111 gfp_t gfp)
112{
113 struct bucket_table *tbl = NULL;
114 size_t size;
115 int i;
116
117 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
118 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
119 gfp != GFP_KERNEL)
120 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
121 if (tbl == NULL && gfp == GFP_KERNEL)
122 tbl = vzalloc(size);
123 if (tbl == NULL)
124 return NULL;
125
126 tbl->size = nbuckets;
127
128 if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
129 bucket_table_free(tbl);
130 return NULL;
131 }
132
133 INIT_LIST_HEAD(&tbl->walkers);
134
135 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
136
137 for (i = 0; i < nbuckets; i++)
138 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
139
140 return tbl;
141}
142
143static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
144 struct bucket_table *tbl)
145{
146 struct bucket_table *new_tbl;
147
148 do {
149 new_tbl = tbl;
150 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
151 } while (tbl);
152
153 return new_tbl;
154}
155
156static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
157{
158 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
159 struct bucket_table *new_tbl = rhashtable_last_table(ht,
160 rht_dereference_rcu(old_tbl->future_tbl, ht));
161 struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
162 int err = -ENOENT;
163 struct rhash_head *head, *next, *entry;
164 spinlock_t *new_bucket_lock;
165 unsigned int new_hash;
166
167 rht_for_each(entry, old_tbl, old_hash) {
168 err = 0;
169 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
170
171 if (rht_is_a_nulls(next))
172 break;
173
174 pprev = &entry->next;
175 }
176
177 if (err)
178 goto out;
179
180 new_hash = head_hashfn(ht, new_tbl, entry);
181
182 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
183
184 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
185 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
186 new_tbl, new_hash);
187
188 if (rht_is_a_nulls(head))
189 INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
190 else
191 RCU_INIT_POINTER(entry->next, head);
192
193 rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
194 spin_unlock(new_bucket_lock);
195
196 rcu_assign_pointer(*pprev, next);
197
198out:
199 return err;
200}
201
202static void rhashtable_rehash_chain(struct rhashtable *ht,
203 unsigned int old_hash)
204{
205 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
206 spinlock_t *old_bucket_lock;
207
208 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
209
210 spin_lock_bh(old_bucket_lock);
211 while (!rhashtable_rehash_one(ht, old_hash))
212 ;
213 old_tbl->rehash++;
214 spin_unlock_bh(old_bucket_lock);
215}
216
217static int rhashtable_rehash_attach(struct rhashtable *ht,
218 struct bucket_table *old_tbl,
219 struct bucket_table *new_tbl)
220{
221 /* Protect future_tbl using the first bucket lock. */
222 spin_lock_bh(old_tbl->locks);
223
224 /* Did somebody beat us to it? */
225 if (rcu_access_pointer(old_tbl->future_tbl)) {
226 spin_unlock_bh(old_tbl->locks);
227 return -EEXIST;
228 }
229
230 /* Make insertions go into the new, empty table right away. Deletions
231 * and lookups will be attempted in both tables until we synchronize.
232 */
233 rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
234
235 /* Ensure the new table is visible to readers. */
236 smp_wmb();
237
238 spin_unlock_bh(old_tbl->locks);
239
240 return 0;
241}
242
243static int rhashtable_rehash_table(struct rhashtable *ht)
244{
245 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
246 struct bucket_table *new_tbl;
247 struct rhashtable_walker *walker;
248 unsigned int old_hash;
249
250 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
251 if (!new_tbl)
252 return 0;
253
254 for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
255 rhashtable_rehash_chain(ht, old_hash);
256
257 /* Publish the new table pointer. */
258 rcu_assign_pointer(ht->tbl, new_tbl);
259
260 spin_lock(&ht->lock);
261 list_for_each_entry(walker, &old_tbl->walkers, list)
262 walker->tbl = NULL;
263 spin_unlock(&ht->lock);
264
265 /* Wait for readers. All new readers will see the new
266 * table, and thus no references to the old table will
267 * remain.
268 */
269 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
270
271 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
272}
273
274/**
275 * rhashtable_expand - Expand hash table while allowing concurrent lookups
276 * @ht: the hash table to expand
277 *
278 * A secondary bucket array is allocated and the hash entries are migrated.
279 *
280 * This function may only be called in a context where it is safe to call
281 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
282 *
283 * The caller must ensure that no concurrent resizing occurs by holding
284 * ht->mutex.
285 *
286 * It is valid to have concurrent insertions and deletions protected by per
287 * bucket locks or concurrent RCU protected lookups and traversals.
288 */
289static int rhashtable_expand(struct rhashtable *ht)
290{
291 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
292 int err;
293
294 ASSERT_RHT_MUTEX(ht);
295
296 old_tbl = rhashtable_last_table(ht, old_tbl);
297
298 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
299 if (new_tbl == NULL)
300 return -ENOMEM;
301
302 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
303 if (err)
304 bucket_table_free(new_tbl);
305
306 return err;
307}
308
309/**
310 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
311 * @ht: the hash table to shrink
312 *
313 * This function shrinks the hash table to fit, i.e., the smallest
314 * size would not cause it to expand right away automatically.
315 *
316 * The caller must ensure that no concurrent resizing occurs by holding
317 * ht->mutex.
318 *
319 * The caller must ensure that no concurrent table mutations take place.
320 * It is however valid to have concurrent lookups if they are RCU protected.
321 *
322 * It is valid to have concurrent insertions and deletions protected by per
323 * bucket locks or concurrent RCU protected lookups and traversals.
324 */
325static int rhashtable_shrink(struct rhashtable *ht)
326{
327 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
328 unsigned int size;
329 int err;
330
331 ASSERT_RHT_MUTEX(ht);
332
333 size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
334 if (size < ht->p.min_size)
335 size = ht->p.min_size;
336
337 if (old_tbl->size <= size)
338 return 0;
339
340 if (rht_dereference(old_tbl->future_tbl, ht))
341 return -EEXIST;
342
343 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
344 if (new_tbl == NULL)
345 return -ENOMEM;
346
347 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
348 if (err)
349 bucket_table_free(new_tbl);
350
351 return err;
352}
353
354static void rht_deferred_worker(struct work_struct *work)
355{
356 struct rhashtable *ht;
357 struct bucket_table *tbl;
358 int err = 0;
359
360 ht = container_of(work, struct rhashtable, run_work);
361 mutex_lock(&ht->mutex);
362
363 tbl = rht_dereference(ht->tbl, ht);
364 tbl = rhashtable_last_table(ht, tbl);
365
366 if (rht_grow_above_75(ht, tbl))
367 rhashtable_expand(ht);
368 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
369 rhashtable_shrink(ht);
370
371 err = rhashtable_rehash_table(ht);
372
373 mutex_unlock(&ht->mutex);
374
375 if (err)
376 schedule_work(&ht->run_work);
377}
378
379static bool rhashtable_check_elasticity(struct rhashtable *ht,
380 struct bucket_table *tbl,
381 unsigned int hash)
382{
383 unsigned int elasticity = ht->elasticity;
384 struct rhash_head *head;
385
386 rht_for_each(head, tbl, hash)
387 if (!--elasticity)
388 return true;
389
390 return false;
391}
392
393int rhashtable_insert_rehash(struct rhashtable *ht)
394{
395 struct bucket_table *old_tbl;
396 struct bucket_table *new_tbl;
397 struct bucket_table *tbl;
398 unsigned int size;
399 int err;
400
401 old_tbl = rht_dereference_rcu(ht->tbl, ht);
402 tbl = rhashtable_last_table(ht, old_tbl);
403
404 size = tbl->size;
405
406 if (rht_grow_above_75(ht, tbl))
407 size *= 2;
408 /* More than two rehashes (not resizes) detected. */
409 else if (WARN_ON(old_tbl != tbl && old_tbl->size == size))
410 return -EBUSY;
411
412 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
413 if (new_tbl == NULL)
414 return -ENOMEM;
415
416 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
417 if (err) {
418 bucket_table_free(new_tbl);
419 if (err == -EEXIST)
420 err = 0;
421 } else
422 schedule_work(&ht->run_work);
423
424 return err;
425}
426EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
427
428int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
429 struct rhash_head *obj,
430 struct bucket_table *tbl)
431{
432 struct rhash_head *head;
433 unsigned int hash;
434 int err;
435
436 tbl = rhashtable_last_table(ht, tbl);
437 hash = head_hashfn(ht, tbl, obj);
438 spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
439
440 err = -EEXIST;
441 if (key && rhashtable_lookup_fast(ht, key, ht->p))
442 goto exit;
443
444 err = -EAGAIN;
445 if (rhashtable_check_elasticity(ht, tbl, hash) ||
446 rht_grow_above_100(ht, tbl))
447 goto exit;
448
449 err = 0;
450
451 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
452
453 RCU_INIT_POINTER(obj->next, head);
454
455 rcu_assign_pointer(tbl->buckets[hash], obj);
456
457 atomic_inc(&ht->nelems);
458
459exit:
460 spin_unlock(rht_bucket_lock(tbl, hash));
461
462 return err;
463}
464EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
465
466/**
467 * rhashtable_walk_init - Initialise an iterator
468 * @ht: Table to walk over
469 * @iter: Hash table Iterator
470 *
471 * This function prepares a hash table walk.
472 *
473 * Note that if you restart a walk after rhashtable_walk_stop you
474 * may see the same object twice. Also, you may miss objects if
475 * there are removals in between rhashtable_walk_stop and the next
476 * call to rhashtable_walk_start.
477 *
478 * For a completely stable walk you should construct your own data
479 * structure outside the hash table.
480 *
481 * This function may sleep so you must not call it from interrupt
482 * context or with spin locks held.
483 *
484 * You must call rhashtable_walk_exit if this function returns
485 * successfully.
486 */
487int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
488{
489 iter->ht = ht;
490 iter->p = NULL;
491 iter->slot = 0;
492 iter->skip = 0;
493
494 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
495 if (!iter->walker)
496 return -ENOMEM;
497
498 mutex_lock(&ht->mutex);
499 iter->walker->tbl = rht_dereference(ht->tbl, ht);
500 list_add(&iter->walker->list, &iter->walker->tbl->walkers);
501 mutex_unlock(&ht->mutex);
502
503 return 0;
504}
505EXPORT_SYMBOL_GPL(rhashtable_walk_init);
506
507/**
508 * rhashtable_walk_exit - Free an iterator
509 * @iter: Hash table Iterator
510 *
511 * This function frees resources allocated by rhashtable_walk_init.
512 */
513void rhashtable_walk_exit(struct rhashtable_iter *iter)
514{
515 mutex_lock(&iter->ht->mutex);
516 if (iter->walker->tbl)
517 list_del(&iter->walker->list);
518 mutex_unlock(&iter->ht->mutex);
519 kfree(iter->walker);
520}
521EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
522
523/**
524 * rhashtable_walk_start - Start a hash table walk
525 * @iter: Hash table iterator
526 *
527 * Start a hash table walk. Note that we take the RCU lock in all
528 * cases including when we return an error. So you must always call
529 * rhashtable_walk_stop to clean up.
530 *
531 * Returns zero if successful.
532 *
533 * Returns -EAGAIN if resize event occured. Note that the iterator
534 * will rewind back to the beginning and you may use it immediately
535 * by calling rhashtable_walk_next.
536 */
537int rhashtable_walk_start(struct rhashtable_iter *iter)
538 __acquires(RCU)
539{
540 struct rhashtable *ht = iter->ht;
541
542 mutex_lock(&ht->mutex);
543
544 if (iter->walker->tbl)
545 list_del(&iter->walker->list);
546
547 rcu_read_lock();
548
549 mutex_unlock(&ht->mutex);
550
551 if (!iter->walker->tbl) {
552 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
553 return -EAGAIN;
554 }
555
556 return 0;
557}
558EXPORT_SYMBOL_GPL(rhashtable_walk_start);
559
560/**
561 * rhashtable_walk_next - Return the next object and advance the iterator
562 * @iter: Hash table iterator
563 *
564 * Note that you must call rhashtable_walk_stop when you are finished
565 * with the walk.
566 *
567 * Returns the next object or NULL when the end of the table is reached.
568 *
569 * Returns -EAGAIN if resize event occured. Note that the iterator
570 * will rewind back to the beginning and you may continue to use it.
571 */
572void *rhashtable_walk_next(struct rhashtable_iter *iter)
573{
574 struct bucket_table *tbl = iter->walker->tbl;
575 struct rhashtable *ht = iter->ht;
576 struct rhash_head *p = iter->p;
577 void *obj = NULL;
578
579 if (p) {
580 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
581 goto next;
582 }
583
584 for (; iter->slot < tbl->size; iter->slot++) {
585 int skip = iter->skip;
586
587 rht_for_each_rcu(p, tbl, iter->slot) {
588 if (!skip)
589 break;
590 skip--;
591 }
592
593next:
594 if (!rht_is_a_nulls(p)) {
595 iter->skip++;
596 iter->p = p;
597 obj = rht_obj(ht, p);
598 goto out;
599 }
600
601 iter->skip = 0;
602 }
603
604 /* Ensure we see any new tables. */
605 smp_rmb();
606
607 iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
608 if (iter->walker->tbl) {
609 iter->slot = 0;
610 iter->skip = 0;
611 return ERR_PTR(-EAGAIN);
612 }
613
614 iter->p = NULL;
615
616out:
617
618 return obj;
619}
620EXPORT_SYMBOL_GPL(rhashtable_walk_next);
621
622/**
623 * rhashtable_walk_stop - Finish a hash table walk
624 * @iter: Hash table iterator
625 *
626 * Finish a hash table walk.
627 */
628void rhashtable_walk_stop(struct rhashtable_iter *iter)
629 __releases(RCU)
630{
631 struct rhashtable *ht;
632 struct bucket_table *tbl = iter->walker->tbl;
633
634 if (!tbl)
635 goto out;
636
637 ht = iter->ht;
638
639 spin_lock(&ht->lock);
640 if (tbl->rehash < tbl->size)
641 list_add(&iter->walker->list, &tbl->walkers);
642 else
643 iter->walker->tbl = NULL;
644 spin_unlock(&ht->lock);
645
646 iter->p = NULL;
647
648out:
649 rcu_read_unlock();
650}
651EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
652
653static size_t rounded_hashtable_size(const struct rhashtable_params *params)
654{
655 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
656 (unsigned long)params->min_size);
657}
658
659static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
660{
661 return jhash2(key, length, seed);
662}
663
664/**
665 * rhashtable_init - initialize a new hash table
666 * @ht: hash table to be initialized
667 * @params: configuration parameters
668 *
669 * Initializes a new hash table based on the provided configuration
670 * parameters. A table can be configured either with a variable or
671 * fixed length key:
672 *
673 * Configuration Example 1: Fixed length keys
674 * struct test_obj {
675 * int key;
676 * void * my_member;
677 * struct rhash_head node;
678 * };
679 *
680 * struct rhashtable_params params = {
681 * .head_offset = offsetof(struct test_obj, node),
682 * .key_offset = offsetof(struct test_obj, key),
683 * .key_len = sizeof(int),
684 * .hashfn = jhash,
685 * .nulls_base = (1U << RHT_BASE_SHIFT),
686 * };
687 *
688 * Configuration Example 2: Variable length keys
689 * struct test_obj {
690 * [...]
691 * struct rhash_head node;
692 * };
693 *
694 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
695 * {
696 * struct test_obj *obj = data;
697 *
698 * return [... hash ...];
699 * }
700 *
701 * struct rhashtable_params params = {
702 * .head_offset = offsetof(struct test_obj, node),
703 * .hashfn = jhash,
704 * .obj_hashfn = my_hash_fn,
705 * };
706 */
707int rhashtable_init(struct rhashtable *ht,
708 const struct rhashtable_params *params)
709{
710 struct bucket_table *tbl;
711 size_t size;
712
713 size = HASH_DEFAULT_SIZE;
714
715 if ((!params->key_len && !params->obj_hashfn) ||
716 (params->obj_hashfn && !params->obj_cmpfn))
717 return -EINVAL;
718
719 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
720 return -EINVAL;
721
722 if (params->nelem_hint)
723 size = rounded_hashtable_size(params);
724
725 memset(ht, 0, sizeof(*ht));
726 mutex_init(&ht->mutex);
727 spin_lock_init(&ht->lock);
728 memcpy(&ht->p, params, sizeof(*params));
729
730 if (params->min_size)
731 ht->p.min_size = roundup_pow_of_two(params->min_size);
732
733 if (params->max_size)
734 ht->p.max_size = rounddown_pow_of_two(params->max_size);
735
736 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
737
738 /* The maximum (not average) chain length grows with the
739 * size of the hash table, at a rate of (log N)/(log log N).
740 * The value of 16 is selected so that even if the hash
741 * table grew to 2^32 you would not expect the maximum
742 * chain length to exceed it unless we are under attack
743 * (or extremely unlucky).
744 *
745 * As this limit is only to detect attacks, we don't need
746 * to set it to a lower value as you'd need the chain
747 * length to vastly exceed 16 to have any real effect
748 * on the system.
749 */
750 if (!params->insecure_elasticity)
751 ht->elasticity = 16;
752
753 if (params->locks_mul)
754 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
755 else
756 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
757
758 ht->key_len = ht->p.key_len;
759 if (!params->hashfn) {
760 ht->p.hashfn = jhash;
761
762 if (!(ht->key_len & (sizeof(u32) - 1))) {
763 ht->key_len /= sizeof(u32);
764 ht->p.hashfn = rhashtable_jhash2;
765 }
766 }
767
768 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
769 if (tbl == NULL)
770 return -ENOMEM;
771
772 atomic_set(&ht->nelems, 0);
773
774 RCU_INIT_POINTER(ht->tbl, tbl);
775
776 INIT_WORK(&ht->run_work, rht_deferred_worker);
777
778 return 0;
779}
780EXPORT_SYMBOL_GPL(rhashtable_init);
781
782/**
783 * rhashtable_free_and_destroy - free elements and destroy hash table
784 * @ht: the hash table to destroy
785 * @free_fn: callback to release resources of element
786 * @arg: pointer passed to free_fn
787 *
788 * Stops an eventual async resize. If defined, invokes free_fn for each
789 * element to releasal resources. Please note that RCU protected
790 * readers may still be accessing the elements. Releasing of resources
791 * must occur in a compatible manner. Then frees the bucket array.
792 *
793 * This function will eventually sleep to wait for an async resize
794 * to complete. The caller is responsible that no further write operations
795 * occurs in parallel.
796 */
797void rhashtable_free_and_destroy(struct rhashtable *ht,
798 void (*free_fn)(void *ptr, void *arg),
799 void *arg)
800{
801 const struct bucket_table *tbl;
802 unsigned int i;
803
804 cancel_work_sync(&ht->run_work);
805
806 mutex_lock(&ht->mutex);
807 tbl = rht_dereference(ht->tbl, ht);
808 if (free_fn) {
809 for (i = 0; i < tbl->size; i++) {
810 struct rhash_head *pos, *next;
811
812 for (pos = rht_dereference(tbl->buckets[i], ht),
813 next = !rht_is_a_nulls(pos) ?
814 rht_dereference(pos->next, ht) : NULL;
815 !rht_is_a_nulls(pos);
816 pos = next,
817 next = !rht_is_a_nulls(pos) ?
818 rht_dereference(pos->next, ht) : NULL)
819 free_fn(rht_obj(ht, pos), arg);
820 }
821 }
822
823 bucket_table_free(tbl);
824 mutex_unlock(&ht->mutex);
825}
826EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
827
828void rhashtable_destroy(struct rhashtable *ht)
829{
830 return rhashtable_free_and_destroy(ht, NULL, NULL);
831}
832EXPORT_SYMBOL_GPL(rhashtable_destroy);