rhashtable: Fix sleeping inside RCU critical section in walk_stop
[linux-2.6-block.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/mm.h>
24 #include <linux/jhash.h>
25 #include <linux/random.h>
26 #include <linux/rhashtable.h>
27 #include <linux/err.h>
28
29 #define HASH_DEFAULT_SIZE       64UL
30 #define HASH_MIN_SIZE           4U
31 #define BUCKET_LOCKS_PER_CPU   128UL
32
33 static u32 head_hashfn(struct rhashtable *ht,
34                        const struct bucket_table *tbl,
35                        const struct rhash_head *he)
36 {
37         return rht_head_hashfn(ht, tbl, he, ht->p);
38 }
39
40 #ifdef CONFIG_PROVE_LOCKING
41 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
42
43 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
44 {
45         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
46 }
47 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
48
49 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
50 {
51         spinlock_t *lock = rht_bucket_lock(tbl, hash);
52
53         return (debug_locks) ? lockdep_is_held(lock) : 1;
54 }
55 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
56 #else
57 #define ASSERT_RHT_MUTEX(HT)
58 #endif
59
60
61 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
62                               gfp_t gfp)
63 {
64         unsigned int i, size;
65 #if defined(CONFIG_PROVE_LOCKING)
66         unsigned int nr_pcpus = 2;
67 #else
68         unsigned int nr_pcpus = num_possible_cpus();
69 #endif
70
71         nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
72         size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
73
74         /* Never allocate more than 0.5 locks per bucket */
75         size = min_t(unsigned int, size, tbl->size >> 1);
76
77         if (sizeof(spinlock_t) != 0) {
78 #ifdef CONFIG_NUMA
79                 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
80                     gfp == GFP_KERNEL)
81                         tbl->locks = vmalloc(size * sizeof(spinlock_t));
82                 else
83 #endif
84                 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
85                                            gfp);
86                 if (!tbl->locks)
87                         return -ENOMEM;
88                 for (i = 0; i < size; i++)
89                         spin_lock_init(&tbl->locks[i]);
90         }
91         tbl->locks_mask = size - 1;
92
93         return 0;
94 }
95
96 static void bucket_table_free(const struct bucket_table *tbl)
97 {
98         if (tbl)
99                 kvfree(tbl->locks);
100
101         kvfree(tbl);
102 }
103
104 static void bucket_table_free_rcu(struct rcu_head *head)
105 {
106         bucket_table_free(container_of(head, struct bucket_table, rcu));
107 }
108
109 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
110                                                size_t nbuckets,
111                                                gfp_t gfp)
112 {
113         struct bucket_table *tbl = NULL;
114         size_t size;
115         int i;
116
117         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
118         if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
119             gfp != GFP_KERNEL)
120                 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
121         if (tbl == NULL && gfp == GFP_KERNEL)
122                 tbl = vzalloc(size);
123         if (tbl == NULL)
124                 return NULL;
125
126         tbl->size = nbuckets;
127
128         if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
129                 bucket_table_free(tbl);
130                 return NULL;
131         }
132
133         INIT_LIST_HEAD(&tbl->walkers);
134
135         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
136
137         for (i = 0; i < nbuckets; i++)
138                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
139
140         return tbl;
141 }
142
143 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
144                                                   struct bucket_table *tbl)
145 {
146         struct bucket_table *new_tbl;
147
148         do {
149                 new_tbl = tbl;
150                 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
151         } while (tbl);
152
153         return new_tbl;
154 }
155
156 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
157 {
158         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
159         struct bucket_table *new_tbl = rhashtable_last_table(ht,
160                 rht_dereference_rcu(old_tbl->future_tbl, ht));
161         struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
162         int err = -ENOENT;
163         struct rhash_head *head, *next, *entry;
164         spinlock_t *new_bucket_lock;
165         unsigned new_hash;
166
167         rht_for_each(entry, old_tbl, old_hash) {
168                 err = 0;
169                 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
170
171                 if (rht_is_a_nulls(next))
172                         break;
173
174                 pprev = &entry->next;
175         }
176
177         if (err)
178                 goto out;
179
180         new_hash = head_hashfn(ht, new_tbl, entry);
181
182         new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
183
184         spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
185         head = rht_dereference_bucket(new_tbl->buckets[new_hash],
186                                       new_tbl, new_hash);
187
188         if (rht_is_a_nulls(head))
189                 INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
190         else
191                 RCU_INIT_POINTER(entry->next, head);
192
193         rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
194         spin_unlock(new_bucket_lock);
195
196         rcu_assign_pointer(*pprev, next);
197
198 out:
199         return err;
200 }
201
202 static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
203 {
204         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
205         spinlock_t *old_bucket_lock;
206
207         old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
208
209         spin_lock_bh(old_bucket_lock);
210         while (!rhashtable_rehash_one(ht, old_hash))
211                 ;
212         old_tbl->rehash++;
213         spin_unlock_bh(old_bucket_lock);
214 }
215
216 static int rhashtable_rehash_attach(struct rhashtable *ht,
217                                     struct bucket_table *old_tbl,
218                                     struct bucket_table *new_tbl)
219 {
220         /* Protect future_tbl using the first bucket lock. */
221         spin_lock_bh(old_tbl->locks);
222
223         /* Did somebody beat us to it? */
224         if (rcu_access_pointer(old_tbl->future_tbl)) {
225                 spin_unlock_bh(old_tbl->locks);
226                 return -EEXIST;
227         }
228
229         /* Make insertions go into the new, empty table right away. Deletions
230          * and lookups will be attempted in both tables until we synchronize.
231          */
232         rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
233
234         /* Ensure the new table is visible to readers. */
235         smp_wmb();
236
237         spin_unlock_bh(old_tbl->locks);
238
239         return 0;
240 }
241
242 static int rhashtable_rehash_table(struct rhashtable *ht)
243 {
244         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
245         struct bucket_table *new_tbl;
246         struct rhashtable_walker *walker;
247         unsigned old_hash;
248
249         new_tbl = rht_dereference(old_tbl->future_tbl, ht);
250         if (!new_tbl)
251                 return 0;
252
253         for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
254                 rhashtable_rehash_chain(ht, old_hash);
255
256         /* Publish the new table pointer. */
257         rcu_assign_pointer(ht->tbl, new_tbl);
258
259         spin_lock(&ht->lock);
260         list_for_each_entry(walker, &old_tbl->walkers, list)
261                 walker->tbl = NULL;
262         spin_unlock(&ht->lock);
263
264         /* Wait for readers. All new readers will see the new
265          * table, and thus no references to the old table will
266          * remain.
267          */
268         call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
269
270         return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
271 }
272
273 /**
274  * rhashtable_expand - Expand hash table while allowing concurrent lookups
275  * @ht:         the hash table to expand
276  *
277  * A secondary bucket array is allocated and the hash entries are migrated.
278  *
279  * This function may only be called in a context where it is safe to call
280  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
281  *
282  * The caller must ensure that no concurrent resizing occurs by holding
283  * ht->mutex.
284  *
285  * It is valid to have concurrent insertions and deletions protected by per
286  * bucket locks or concurrent RCU protected lookups and traversals.
287  */
288 static int rhashtable_expand(struct rhashtable *ht)
289 {
290         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
291         int err;
292
293         ASSERT_RHT_MUTEX(ht);
294
295         old_tbl = rhashtable_last_table(ht, old_tbl);
296
297         new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
298         if (new_tbl == NULL)
299                 return -ENOMEM;
300
301         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
302         if (err)
303                 bucket_table_free(new_tbl);
304
305         return err;
306 }
307
308 /**
309  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
310  * @ht:         the hash table to shrink
311  *
312  * This function shrinks the hash table to fit, i.e., the smallest
313  * size would not cause it to expand right away automatically.
314  *
315  * The caller must ensure that no concurrent resizing occurs by holding
316  * ht->mutex.
317  *
318  * The caller must ensure that no concurrent table mutations take place.
319  * It is however valid to have concurrent lookups if they are RCU protected.
320  *
321  * It is valid to have concurrent insertions and deletions protected by per
322  * bucket locks or concurrent RCU protected lookups and traversals.
323  */
324 static int rhashtable_shrink(struct rhashtable *ht)
325 {
326         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
327         unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
328         int err;
329
330         ASSERT_RHT_MUTEX(ht);
331
332         if (size < ht->p.min_size)
333                 size = ht->p.min_size;
334
335         if (old_tbl->size <= size)
336                 return 0;
337
338         if (rht_dereference(old_tbl->future_tbl, ht))
339                 return -EEXIST;
340
341         new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
342         if (new_tbl == NULL)
343                 return -ENOMEM;
344
345         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
346         if (err)
347                 bucket_table_free(new_tbl);
348
349         return err;
350 }
351
352 static void rht_deferred_worker(struct work_struct *work)
353 {
354         struct rhashtable *ht;
355         struct bucket_table *tbl;
356         int err = 0;
357
358         ht = container_of(work, struct rhashtable, run_work);
359         mutex_lock(&ht->mutex);
360         if (ht->being_destroyed)
361                 goto unlock;
362
363         tbl = rht_dereference(ht->tbl, ht);
364         tbl = rhashtable_last_table(ht, tbl);
365
366         if (rht_grow_above_75(ht, tbl))
367                 rhashtable_expand(ht);
368         else if (rht_shrink_below_30(ht, tbl))
369                 rhashtable_shrink(ht);
370
371         err = rhashtable_rehash_table(ht);
372
373 unlock:
374         mutex_unlock(&ht->mutex);
375
376         if (err)
377                 schedule_work(&ht->run_work);
378 }
379
380 static bool rhashtable_check_elasticity(struct rhashtable *ht,
381                                         struct bucket_table *tbl,
382                                         unsigned hash)
383 {
384         unsigned elasticity = ht->elasticity;
385         struct rhash_head *head;
386
387         rht_for_each(head, tbl, hash)
388                 if (!--elasticity)
389                         return true;
390
391         return false;
392 }
393
394 int rhashtable_insert_rehash(struct rhashtable *ht)
395 {
396         struct bucket_table *old_tbl;
397         struct bucket_table *new_tbl;
398         struct bucket_table *tbl;
399         unsigned int size;
400         int err;
401
402         old_tbl = rht_dereference_rcu(ht->tbl, ht);
403         tbl = rhashtable_last_table(ht, old_tbl);
404
405         size = tbl->size;
406
407         if (rht_grow_above_75(ht, tbl))
408                 size *= 2;
409         /* More than two rehashes (not resizes) detected. */
410         else if (WARN_ON(old_tbl != tbl && old_tbl->size == size))
411                 return -EBUSY;
412
413         new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
414         if (new_tbl == NULL)
415                 return -ENOMEM;
416
417         err = rhashtable_rehash_attach(ht, tbl, new_tbl);
418         if (err) {
419                 bucket_table_free(new_tbl);
420                 if (err == -EEXIST)
421                         err = 0;
422         } else
423                 schedule_work(&ht->run_work);
424
425         return err;
426 }
427 EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
428
429 int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
430                            struct rhash_head *obj,
431                            struct bucket_table *tbl)
432 {
433         struct rhash_head *head;
434         unsigned hash;
435         int err;
436
437         tbl = rhashtable_last_table(ht, tbl);
438         hash = head_hashfn(ht, tbl, obj);
439         spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
440
441         err = -EEXIST;
442         if (key && rhashtable_lookup_fast(ht, key, ht->p))
443                 goto exit;
444
445         err = -EAGAIN;
446         if (rhashtable_check_elasticity(ht, tbl, hash) ||
447             rht_grow_above_100(ht, tbl))
448                 goto exit;
449
450         err = 0;
451
452         head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
453
454         RCU_INIT_POINTER(obj->next, head);
455
456         rcu_assign_pointer(tbl->buckets[hash], obj);
457
458         atomic_inc(&ht->nelems);
459
460 exit:
461         spin_unlock(rht_bucket_lock(tbl, hash));
462
463         return err;
464 }
465 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
466
467 /**
468  * rhashtable_walk_init - Initialise an iterator
469  * @ht:         Table to walk over
470  * @iter:       Hash table Iterator
471  *
472  * This function prepares a hash table walk.
473  *
474  * Note that if you restart a walk after rhashtable_walk_stop you
475  * may see the same object twice.  Also, you may miss objects if
476  * there are removals in between rhashtable_walk_stop and the next
477  * call to rhashtable_walk_start.
478  *
479  * For a completely stable walk you should construct your own data
480  * structure outside the hash table.
481  *
482  * This function may sleep so you must not call it from interrupt
483  * context or with spin locks held.
484  *
485  * You must call rhashtable_walk_exit if this function returns
486  * successfully.
487  */
488 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
489 {
490         iter->ht = ht;
491         iter->p = NULL;
492         iter->slot = 0;
493         iter->skip = 0;
494
495         iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
496         if (!iter->walker)
497                 return -ENOMEM;
498
499         mutex_lock(&ht->mutex);
500         iter->walker->tbl = rht_dereference(ht->tbl, ht);
501         list_add(&iter->walker->list, &iter->walker->tbl->walkers);
502         mutex_unlock(&ht->mutex);
503
504         return 0;
505 }
506 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
507
508 /**
509  * rhashtable_walk_exit - Free an iterator
510  * @iter:       Hash table Iterator
511  *
512  * This function frees resources allocated by rhashtable_walk_init.
513  */
514 void rhashtable_walk_exit(struct rhashtable_iter *iter)
515 {
516         mutex_lock(&iter->ht->mutex);
517         if (iter->walker->tbl)
518                 list_del(&iter->walker->list);
519         mutex_unlock(&iter->ht->mutex);
520         kfree(iter->walker);
521 }
522 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
523
524 /**
525  * rhashtable_walk_start - Start a hash table walk
526  * @iter:       Hash table iterator
527  *
528  * Start a hash table walk.  Note that we take the RCU lock in all
529  * cases including when we return an error.  So you must always call
530  * rhashtable_walk_stop to clean up.
531  *
532  * Returns zero if successful.
533  *
534  * Returns -EAGAIN if resize event occured.  Note that the iterator
535  * will rewind back to the beginning and you may use it immediately
536  * by calling rhashtable_walk_next.
537  */
538 int rhashtable_walk_start(struct rhashtable_iter *iter)
539         __acquires(RCU)
540 {
541         struct rhashtable *ht = iter->ht;
542
543         mutex_lock(&ht->mutex);
544
545         if (iter->walker->tbl)
546                 list_del(&iter->walker->list);
547
548         rcu_read_lock();
549
550         mutex_unlock(&ht->mutex);
551
552         if (!iter->walker->tbl) {
553                 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
554                 return -EAGAIN;
555         }
556
557         return 0;
558 }
559 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
560
561 /**
562  * rhashtable_walk_next - Return the next object and advance the iterator
563  * @iter:       Hash table iterator
564  *
565  * Note that you must call rhashtable_walk_stop when you are finished
566  * with the walk.
567  *
568  * Returns the next object or NULL when the end of the table is reached.
569  *
570  * Returns -EAGAIN if resize event occured.  Note that the iterator
571  * will rewind back to the beginning and you may continue to use it.
572  */
573 void *rhashtable_walk_next(struct rhashtable_iter *iter)
574 {
575         struct bucket_table *tbl = iter->walker->tbl;
576         struct rhashtable *ht = iter->ht;
577         struct rhash_head *p = iter->p;
578         void *obj = NULL;
579
580         if (p) {
581                 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
582                 goto next;
583         }
584
585         for (; iter->slot < tbl->size; iter->slot++) {
586                 int skip = iter->skip;
587
588                 rht_for_each_rcu(p, tbl, iter->slot) {
589                         if (!skip)
590                                 break;
591                         skip--;
592                 }
593
594 next:
595                 if (!rht_is_a_nulls(p)) {
596                         iter->skip++;
597                         iter->p = p;
598                         obj = rht_obj(ht, p);
599                         goto out;
600                 }
601
602                 iter->skip = 0;
603         }
604
605         /* Ensure we see any new tables. */
606         smp_rmb();
607
608         iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
609         if (iter->walker->tbl) {
610                 iter->slot = 0;
611                 iter->skip = 0;
612                 return ERR_PTR(-EAGAIN);
613         }
614
615         iter->p = NULL;
616
617 out:
618
619         return obj;
620 }
621 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
622
623 /**
624  * rhashtable_walk_stop - Finish a hash table walk
625  * @iter:       Hash table iterator
626  *
627  * Finish a hash table walk.
628  */
629 void rhashtable_walk_stop(struct rhashtable_iter *iter)
630         __releases(RCU)
631 {
632         struct rhashtable *ht;
633         struct bucket_table *tbl = iter->walker->tbl;
634
635         if (!tbl)
636                 goto out;
637
638         ht = iter->ht;
639
640         spin_lock(&ht->lock);
641         if (tbl->rehash < tbl->size)
642                 list_add(&iter->walker->list, &tbl->walkers);
643         else
644                 iter->walker->tbl = NULL;
645         spin_unlock(&ht->lock);
646
647         iter->p = NULL;
648
649 out:
650         rcu_read_unlock();
651 }
652 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
653
654 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
655 {
656         return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
657                    (unsigned long)params->min_size);
658 }
659
660 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
661 {
662         return jhash2(key, length, seed);
663 }
664
665 /**
666  * rhashtable_init - initialize a new hash table
667  * @ht:         hash table to be initialized
668  * @params:     configuration parameters
669  *
670  * Initializes a new hash table based on the provided configuration
671  * parameters. A table can be configured either with a variable or
672  * fixed length key:
673  *
674  * Configuration Example 1: Fixed length keys
675  * struct test_obj {
676  *      int                     key;
677  *      void *                  my_member;
678  *      struct rhash_head       node;
679  * };
680  *
681  * struct rhashtable_params params = {
682  *      .head_offset = offsetof(struct test_obj, node),
683  *      .key_offset = offsetof(struct test_obj, key),
684  *      .key_len = sizeof(int),
685  *      .hashfn = jhash,
686  *      .nulls_base = (1U << RHT_BASE_SHIFT),
687  * };
688  *
689  * Configuration Example 2: Variable length keys
690  * struct test_obj {
691  *      [...]
692  *      struct rhash_head       node;
693  * };
694  *
695  * u32 my_hash_fn(const void *data, u32 seed)
696  * {
697  *      struct test_obj *obj = data;
698  *
699  *      return [... hash ...];
700  * }
701  *
702  * struct rhashtable_params params = {
703  *      .head_offset = offsetof(struct test_obj, node),
704  *      .hashfn = jhash,
705  *      .obj_hashfn = my_hash_fn,
706  * };
707  */
708 int rhashtable_init(struct rhashtable *ht,
709                     const struct rhashtable_params *params)
710 {
711         struct bucket_table *tbl;
712         size_t size;
713
714         size = HASH_DEFAULT_SIZE;
715
716         if ((!params->key_len && !params->obj_hashfn) ||
717             (params->obj_hashfn && !params->obj_cmpfn))
718                 return -EINVAL;
719
720         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
721                 return -EINVAL;
722
723         if (params->nelem_hint)
724                 size = rounded_hashtable_size(params);
725
726         memset(ht, 0, sizeof(*ht));
727         mutex_init(&ht->mutex);
728         spin_lock_init(&ht->lock);
729         memcpy(&ht->p, params, sizeof(*params));
730
731         if (params->min_size)
732                 ht->p.min_size = roundup_pow_of_two(params->min_size);
733
734         if (params->max_size)
735                 ht->p.max_size = rounddown_pow_of_two(params->max_size);
736
737         ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
738
739         if (!params->insecure_elasticity)
740                 ht->elasticity = 16;
741
742         if (params->locks_mul)
743                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
744         else
745                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
746
747         ht->key_len = ht->p.key_len;
748         if (!params->hashfn) {
749                 ht->p.hashfn = jhash;
750
751                 if (!(ht->key_len & (sizeof(u32) - 1))) {
752                         ht->key_len /= sizeof(u32);
753                         ht->p.hashfn = rhashtable_jhash2;
754                 }
755         }
756
757         tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
758         if (tbl == NULL)
759                 return -ENOMEM;
760
761         atomic_set(&ht->nelems, 0);
762
763         RCU_INIT_POINTER(ht->tbl, tbl);
764
765         INIT_WORK(&ht->run_work, rht_deferred_worker);
766
767         return 0;
768 }
769 EXPORT_SYMBOL_GPL(rhashtable_init);
770
771 /**
772  * rhashtable_destroy - destroy hash table
773  * @ht:         the hash table to destroy
774  *
775  * Frees the bucket array. This function is not rcu safe, therefore the caller
776  * has to make sure that no resizing may happen by unpublishing the hashtable
777  * and waiting for the quiescent cycle before releasing the bucket array.
778  */
779 void rhashtable_destroy(struct rhashtable *ht)
780 {
781         ht->being_destroyed = true;
782
783         cancel_work_sync(&ht->run_work);
784
785         mutex_lock(&ht->mutex);
786         bucket_table_free(rht_dereference(ht->tbl, ht));
787         mutex_unlock(&ht->mutex);
788 }
789 EXPORT_SYMBOL_GPL(rhashtable_destroy);