Merge branch 'amd-xgbe-next'
[linux-2.6-block.git] / include / linux / rhashtable.h
CommitLineData
7e1e7763
TG
1/*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
dc0ee268 4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
7e1e7763
TG
5 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 *
7e1e7763 8 * Code partially derived from nft_hash
dc0ee268
HX
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
7e1e7763
TG
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#ifndef _LINUX_RHASHTABLE_H
18#define _LINUX_RHASHTABLE_H
19
f2dba9c6 20#include <linux/compiler.h>
f89bd6f8 21#include <linux/list_nulls.h>
97defe1e 22#include <linux/workqueue.h>
86b35b64 23#include <linux/mutex.h>
02fd97c3 24#include <linux/rcupdate.h>
7e1e7763 25
f89bd6f8
TG
26/*
27 * The end of the chain is marked with a special nulls marks which has
28 * the following format:
29 *
30 * +-------+-----------------------------------------------------+-+
31 * | Base | Hash |1|
32 * +-------+-----------------------------------------------------+-+
33 *
34 * Base (4 bits) : Reserved to distinguish between multiple tables.
35 * Specified via &struct rhashtable_params.nulls_base.
36 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
37 * 1 (1 bit) : Nulls marker (always set)
38 *
39 * The remaining bits of the next pointer remain unused for now.
40 */
41#define RHT_BASE_BITS 4
42#define RHT_HASH_BITS 27
43#define RHT_BASE_SHIFT RHT_HASH_BITS
44
02fd97c3
HX
45/* Base bits plus 1 bit for nulls marker */
46#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
47
7e1e7763 48struct rhash_head {
5300fdcb 49 struct rhash_head __rcu *next;
7e1e7763
TG
50};
51
97defe1e
TG
52/**
53 * struct bucket_table - Table of hash buckets
54 * @size: Number of hash buckets
63d512d0 55 * @rehash: Current bucket being rehashed
988dfbd7 56 * @hash_rnd: Random seed to fold into hash
97defe1e
TG
57 * @locks_mask: Mask to apply before accessing locks[]
58 * @locks: Array of spinlocks protecting individual buckets
eddee5ba 59 * @walkers: List of active walkers
9d901bc0 60 * @rcu: RCU structure for freeing the table
c4db8848 61 * @future_tbl: Table under construction during rehashing
97defe1e
TG
62 * @buckets: size * hash buckets
63 */
7e1e7763 64struct bucket_table {
63d512d0
HX
65 unsigned int size;
66 unsigned int rehash;
988dfbd7 67 u32 hash_rnd;
b9ebafbe
ED
68 unsigned int locks_mask;
69 spinlock_t *locks;
eddee5ba 70 struct list_head walkers;
9d901bc0 71 struct rcu_head rcu;
b9ebafbe 72
c4db8848
HX
73 struct bucket_table __rcu *future_tbl;
74
b9ebafbe 75 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
7e1e7763
TG
76};
77
02fd97c3
HX
78/**
79 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
80 * @ht: Hash table
81 * @key: Key to compare against
82 */
83struct rhashtable_compare_arg {
84 struct rhashtable *ht;
85 const void *key;
86};
87
7e1e7763
TG
88typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
89typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
02fd97c3
HX
90typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
91 const void *obj);
7e1e7763
TG
92
93struct rhashtable;
94
95/**
96 * struct rhashtable_params - Hash table construction parameters
97 * @nelem_hint: Hint on number of elements, should be 75% of desired size
98 * @key_len: Length of key
99 * @key_offset: Offset of key in struct to be hashed
100 * @head_offset: Offset of rhash_head in struct to be hashed
c2e213cf
HX
101 * @max_size: Maximum size while expanding
102 * @min_size: Minimum size while shrinking
f89bd6f8 103 * @nulls_base: Base value to generate nulls marker
97defe1e 104 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
7e1e7763
TG
105 * @hashfn: Function to hash key
106 * @obj_hashfn: Function to hash object
02fd97c3 107 * @obj_cmpfn: Function to compare key with object
7e1e7763
TG
108 */
109struct rhashtable_params {
110 size_t nelem_hint;
111 size_t key_len;
112 size_t key_offset;
113 size_t head_offset;
c2e213cf
HX
114 unsigned int max_size;
115 unsigned int min_size;
f89bd6f8 116 u32 nulls_base;
97defe1e 117 size_t locks_mul;
7e1e7763
TG
118 rht_hashfn_t hashfn;
119 rht_obj_hashfn_t obj_hashfn;
02fd97c3 120 rht_obj_cmpfn_t obj_cmpfn;
7e1e7763
TG
121};
122
123/**
124 * struct rhashtable - Hash table handle
125 * @tbl: Bucket table
126 * @nelems: Number of elements in table
7e1e7763 127 * @p: Configuration parameters
97defe1e
TG
128 * @run_work: Deferred worker to expand/shrink asynchronously
129 * @mutex: Mutex to protect current/future table swapping
130 * @being_destroyed: True if table is set up for destruction
7e1e7763
TG
131 */
132struct rhashtable {
133 struct bucket_table __rcu *tbl;
97defe1e 134 atomic_t nelems;
a5b6846f 135 bool being_destroyed;
7e1e7763 136 struct rhashtable_params p;
57699a40 137 struct work_struct run_work;
97defe1e 138 struct mutex mutex;
7e1e7763
TG
139};
140
f2dba9c6
HX
141/**
142 * struct rhashtable_walker - Hash table walker
143 * @list: List entry on list of walkers
eddee5ba 144 * @tbl: The table that we were walking over
f2dba9c6
HX
145 */
146struct rhashtable_walker {
147 struct list_head list;
eddee5ba 148 struct bucket_table *tbl;
f2dba9c6
HX
149};
150
151/**
152 * struct rhashtable_iter - Hash table iterator, fits into netlink cb
153 * @ht: Table to iterate through
154 * @p: Current pointer
155 * @walker: Associated rhashtable walker
156 * @slot: Current slot
157 * @skip: Number of entries to skip in slot
158 */
159struct rhashtable_iter {
160 struct rhashtable *ht;
161 struct rhash_head *p;
162 struct rhashtable_walker *walker;
163 unsigned int slot;
164 unsigned int skip;
165};
166
f89bd6f8
TG
167static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
168{
169 return NULLS_MARKER(ht->p.nulls_base + hash);
170}
171
172#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
173 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
174
175static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
176{
177 return ((unsigned long) ptr & 1);
178}
179
180static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
181{
182 return ((unsigned long) ptr) >> 1;
183}
184
02fd97c3
HX
185static inline void *rht_obj(const struct rhashtable *ht,
186 const struct rhash_head *he)
187{
188 return (char *)he - ht->p.head_offset;
189}
190
191static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
192 unsigned int hash)
193{
194 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
195}
196
197static inline unsigned int rht_key_hashfn(
198 struct rhashtable *ht, const struct bucket_table *tbl,
199 const void *key, const struct rhashtable_params params)
200{
201 return rht_bucket_index(tbl, params.hashfn(key, params.key_len ?:
202 ht->p.key_len,
203 tbl->hash_rnd));
204}
205
206static inline unsigned int rht_head_hashfn(
207 struct rhashtable *ht, const struct bucket_table *tbl,
208 const struct rhash_head *he, const struct rhashtable_params params)
209{
210 const char *ptr = rht_obj(ht, he);
211
212 return likely(params.obj_hashfn) ?
213 rht_bucket_index(tbl, params.obj_hashfn(ptr, tbl->hash_rnd)) :
214 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
215}
216
217/**
218 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
219 * @ht: hash table
220 * @tbl: current table
221 */
222static inline bool rht_grow_above_75(const struct rhashtable *ht,
223 const struct bucket_table *tbl)
224{
225 /* Expand table when exceeding 75% load */
226 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
227 (!ht->p.max_size || tbl->size < ht->p.max_size);
228}
229
230/**
231 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
232 * @ht: hash table
233 * @tbl: current table
234 */
235static inline bool rht_shrink_below_30(const struct rhashtable *ht,
236 const struct bucket_table *tbl)
237{
238 /* Shrink table beneath 30% load */
239 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
240 tbl->size > ht->p.min_size;
241}
242
243/* The bucket lock is selected based on the hash and protects mutations
244 * on a group of hash buckets.
245 *
246 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
247 * a single lock always covers both buckets which may both contains
248 * entries which link to the same bucket of the old table during resizing.
249 * This allows to simplify the locking as locking the bucket in both
250 * tables during resize always guarantee protection.
251 *
252 * IMPORTANT: When holding the bucket lock of both the old and new table
253 * during expansions and shrinking, the old bucket lock must always be
254 * acquired first.
255 */
256static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
257 unsigned int hash)
258{
259 return &tbl->locks[hash & tbl->locks_mask];
260}
261
7e1e7763 262#ifdef CONFIG_PROVE_LOCKING
97defe1e 263int lockdep_rht_mutex_is_held(struct rhashtable *ht);
88d6ed15 264int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
7e1e7763 265#else
97defe1e 266static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
7e1e7763
TG
267{
268 return 1;
269}
88d6ed15
TG
270
271static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
272 u32 hash)
273{
274 return 1;
275}
7e1e7763
TG
276#endif /* CONFIG_PROVE_LOCKING */
277
488fb86e
HX
278int rhashtable_init(struct rhashtable *ht,
279 const struct rhashtable_params *params);
7e1e7763 280
02fd97c3
HX
281int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
282 struct rhash_head *obj,
283 struct bucket_table *old_tbl);
7e1e7763 284
6eba8224
TG
285int rhashtable_expand(struct rhashtable *ht);
286int rhashtable_shrink(struct rhashtable *ht);
7e1e7763 287
f2dba9c6
HX
288int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
289void rhashtable_walk_exit(struct rhashtable_iter *iter);
290int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
291void *rhashtable_walk_next(struct rhashtable_iter *iter);
292void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
293
97defe1e 294void rhashtable_destroy(struct rhashtable *ht);
7e1e7763
TG
295
296#define rht_dereference(p, ht) \
297 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
298
299#define rht_dereference_rcu(p, ht) \
300 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
301
88d6ed15
TG
302#define rht_dereference_bucket(p, tbl, hash) \
303 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
7e1e7763 304
88d6ed15
TG
305#define rht_dereference_bucket_rcu(p, tbl, hash) \
306 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
307
308#define rht_entry(tpos, pos, member) \
309 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
7e1e7763
TG
310
311/**
88d6ed15
TG
312 * rht_for_each_continue - continue iterating over hash chain
313 * @pos: the &struct rhash_head to use as a loop cursor.
314 * @head: the previous &struct rhash_head to continue from
315 * @tbl: the &struct bucket_table
316 * @hash: the hash value / bucket index
7e1e7763 317 */
88d6ed15
TG
318#define rht_for_each_continue(pos, head, tbl, hash) \
319 for (pos = rht_dereference_bucket(head, tbl, hash); \
f89bd6f8 320 !rht_is_a_nulls(pos); \
88d6ed15
TG
321 pos = rht_dereference_bucket((pos)->next, tbl, hash))
322
323/**
324 * rht_for_each - iterate over hash chain
325 * @pos: the &struct rhash_head to use as a loop cursor.
326 * @tbl: the &struct bucket_table
327 * @hash: the hash value / bucket index
328 */
329#define rht_for_each(pos, tbl, hash) \
330 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
331
332/**
333 * rht_for_each_entry_continue - continue iterating over hash chain
334 * @tpos: the type * to use as a loop cursor.
335 * @pos: the &struct rhash_head to use as a loop cursor.
336 * @head: the previous &struct rhash_head to continue from
337 * @tbl: the &struct bucket_table
338 * @hash: the hash value / bucket index
339 * @member: name of the &struct rhash_head within the hashable struct.
340 */
341#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
342 for (pos = rht_dereference_bucket(head, tbl, hash); \
f89bd6f8 343 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
88d6ed15 344 pos = rht_dereference_bucket((pos)->next, tbl, hash))
7e1e7763
TG
345
346/**
347 * rht_for_each_entry - iterate over hash chain of given type
88d6ed15
TG
348 * @tpos: the type * to use as a loop cursor.
349 * @pos: the &struct rhash_head to use as a loop cursor.
350 * @tbl: the &struct bucket_table
351 * @hash: the hash value / bucket index
352 * @member: name of the &struct rhash_head within the hashable struct.
7e1e7763 353 */
88d6ed15
TG
354#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
355 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
356 tbl, hash, member)
7e1e7763
TG
357
358/**
359 * rht_for_each_entry_safe - safely iterate over hash chain of given type
88d6ed15
TG
360 * @tpos: the type * to use as a loop cursor.
361 * @pos: the &struct rhash_head to use as a loop cursor.
362 * @next: the &struct rhash_head to use as next in loop cursor.
363 * @tbl: the &struct bucket_table
364 * @hash: the hash value / bucket index
365 * @member: name of the &struct rhash_head within the hashable struct.
7e1e7763
TG
366 *
367 * This hash chain list-traversal primitive allows for the looped code to
368 * remove the loop cursor from the list.
369 */
88d6ed15
TG
370#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
371 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
f89bd6f8
TG
372 next = !rht_is_a_nulls(pos) ? \
373 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
374 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
607954b0
PM
375 pos = next, \
376 next = !rht_is_a_nulls(pos) ? \
377 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
88d6ed15
TG
378
379/**
380 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
381 * @pos: the &struct rhash_head to use as a loop cursor.
382 * @head: the previous &struct rhash_head to continue from
383 * @tbl: the &struct bucket_table
384 * @hash: the hash value / bucket index
385 *
386 * This hash chain list-traversal primitive may safely run concurrently with
387 * the _rcu mutation primitives such as rhashtable_insert() as long as the
388 * traversal is guarded by rcu_read_lock().
389 */
390#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
391 for (({barrier(); }), \
392 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
f89bd6f8 393 !rht_is_a_nulls(pos); \
88d6ed15 394 pos = rcu_dereference_raw(pos->next))
7e1e7763
TG
395
396/**
397 * rht_for_each_rcu - iterate over rcu hash chain
88d6ed15
TG
398 * @pos: the &struct rhash_head to use as a loop cursor.
399 * @tbl: the &struct bucket_table
400 * @hash: the hash value / bucket index
7e1e7763
TG
401 *
402 * This hash chain list-traversal primitive may safely run concurrently with
88d6ed15 403 * the _rcu mutation primitives such as rhashtable_insert() as long as the
7e1e7763
TG
404 * traversal is guarded by rcu_read_lock().
405 */
88d6ed15
TG
406#define rht_for_each_rcu(pos, tbl, hash) \
407 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
408
409/**
410 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
411 * @tpos: the type * to use as a loop cursor.
412 * @pos: the &struct rhash_head to use as a loop cursor.
413 * @head: the previous &struct rhash_head to continue from
414 * @tbl: the &struct bucket_table
415 * @hash: the hash value / bucket index
416 * @member: name of the &struct rhash_head within the hashable struct.
417 *
418 * This hash chain list-traversal primitive may safely run concurrently with
419 * the _rcu mutation primitives such as rhashtable_insert() as long as the
420 * traversal is guarded by rcu_read_lock().
421 */
422#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
423 for (({barrier(); }), \
424 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
f89bd6f8 425 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
88d6ed15 426 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
7e1e7763
TG
427
428/**
429 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
88d6ed15
TG
430 * @tpos: the type * to use as a loop cursor.
431 * @pos: the &struct rhash_head to use as a loop cursor.
432 * @tbl: the &struct bucket_table
433 * @hash: the hash value / bucket index
434 * @member: name of the &struct rhash_head within the hashable struct.
7e1e7763
TG
435 *
436 * This hash chain list-traversal primitive may safely run concurrently with
88d6ed15 437 * the _rcu mutation primitives such as rhashtable_insert() as long as the
7e1e7763
TG
438 * traversal is guarded by rcu_read_lock().
439 */
88d6ed15
TG
440#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
441 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
442 tbl, hash, member)
7e1e7763 443
02fd97c3
HX
444static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
445 const void *obj)
446{
447 struct rhashtable *ht = arg->ht;
448 const char *ptr = obj;
449
450 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
451}
452
453/**
454 * rhashtable_lookup_fast - search hash table, inlined version
455 * @ht: hash table
456 * @key: the pointer to the key
457 * @params: hash table parameters
458 *
459 * Computes the hash value for the key and traverses the bucket chain looking
460 * for a entry with an identical key. The first matching entry is returned.
461 *
462 * Returns the first entry on which the compare function returned true.
463 */
464static inline void *rhashtable_lookup_fast(
465 struct rhashtable *ht, const void *key,
466 const struct rhashtable_params params)
467{
468 struct rhashtable_compare_arg arg = {
469 .ht = ht,
470 .key = key,
471 };
472 const struct bucket_table *tbl;
473 struct rhash_head *he;
474 unsigned hash;
475
476 rcu_read_lock();
477
478 tbl = rht_dereference_rcu(ht->tbl, ht);
479restart:
480 hash = rht_key_hashfn(ht, tbl, key, params);
481 rht_for_each_rcu(he, tbl, hash) {
482 if (params.obj_cmpfn ?
483 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
484 rhashtable_compare(&arg, rht_obj(ht, he)))
485 continue;
486 rcu_read_unlock();
487 return rht_obj(ht, he);
488 }
489
490 /* Ensure we see any new tables. */
491 smp_rmb();
492
493 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
494 if (unlikely(tbl))
495 goto restart;
496 rcu_read_unlock();
497
498 return NULL;
499}
500
501static inline int __rhashtable_insert_fast(
502 struct rhashtable *ht, const void *key, struct rhash_head *obj,
503 const struct rhashtable_params params)
504{
505 struct rhashtable_compare_arg arg = {
506 .ht = ht,
507 .key = key,
508 };
509 int err = -EEXIST;
510 struct bucket_table *tbl, *new_tbl;
511 struct rhash_head *head;
512 spinlock_t *lock;
513 unsigned hash;
514
515 rcu_read_lock();
516
517 tbl = rht_dereference_rcu(ht->tbl, ht);
518 hash = rht_head_hashfn(ht, tbl, obj, params);
519 lock = rht_bucket_lock(tbl, hash);
520
521 spin_lock_bh(lock);
522
523 /* Because we have already taken the bucket lock in tbl,
524 * if we find that future_tbl is not yet visible then
525 * that guarantees all other insertions of the same entry
526 * will also grab the bucket lock in tbl because until
527 * the rehash completes ht->tbl won't be changed.
528 */
529 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
530 if (unlikely(new_tbl)) {
531 err = rhashtable_insert_slow(ht, key, obj, new_tbl);
532 goto out;
533 }
534
535 if (!key)
536 goto skip_lookup;
537
538 rht_for_each(head, tbl, hash) {
539 if (unlikely(!(params.obj_cmpfn ?
540 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
541 rhashtable_compare(&arg, rht_obj(ht, head)))))
542 goto out;
543 }
544
545skip_lookup:
546 err = 0;
547
548 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
549
550 RCU_INIT_POINTER(obj->next, head);
551
552 rcu_assign_pointer(tbl->buckets[hash], obj);
553
554 atomic_inc(&ht->nelems);
555 if (rht_grow_above_75(ht, tbl))
556 schedule_work(&ht->run_work);
557
558out:
559 spin_unlock_bh(lock);
560 rcu_read_unlock();
561
562 return err;
563}
564
565/**
566 * rhashtable_insert_fast - insert object into hash table
567 * @ht: hash table
568 * @obj: pointer to hash head inside object
569 * @params: hash table parameters
570 *
571 * Will take a per bucket spinlock to protect against mutual mutations
572 * on the same bucket. Multiple insertions may occur in parallel unless
573 * they map to the same bucket lock.
574 *
575 * It is safe to call this function from atomic context.
576 *
577 * Will trigger an automatic deferred table resizing if the size grows
578 * beyond the watermark indicated by grow_decision() which can be passed
579 * to rhashtable_init().
580 */
581static inline int rhashtable_insert_fast(
582 struct rhashtable *ht, struct rhash_head *obj,
583 const struct rhashtable_params params)
584{
585 return __rhashtable_insert_fast(ht, NULL, obj, params);
586}
587
588/**
589 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
590 * @ht: hash table
591 * @obj: pointer to hash head inside object
592 * @params: hash table parameters
593 *
594 * Locks down the bucket chain in both the old and new table if a resize
595 * is in progress to ensure that writers can't remove from the old table
596 * and can't insert to the new table during the atomic operation of search
597 * and insertion. Searches for duplicates in both the old and new table if
598 * a resize is in progress.
599 *
600 * This lookup function may only be used for fixed key hash table (key_len
601 * parameter set). It will BUG() if used inappropriately.
602 *
603 * It is safe to call this function from atomic context.
604 *
605 * Will trigger an automatic deferred table resizing if the size grows
606 * beyond the watermark indicated by grow_decision() which can be passed
607 * to rhashtable_init().
608 */
609static inline int rhashtable_lookup_insert_fast(
610 struct rhashtable *ht, struct rhash_head *obj,
611 const struct rhashtable_params params)
612{
613 const char *key = rht_obj(ht, obj);
614
615 BUG_ON(ht->p.obj_hashfn);
616
617 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
618 params);
619}
620
621/**
622 * rhashtable_lookup_insert_key - search and insert object to hash table
623 * with explicit key
624 * @ht: hash table
625 * @key: key
626 * @obj: pointer to hash head inside object
627 * @params: hash table parameters
628 *
629 * Locks down the bucket chain in both the old and new table if a resize
630 * is in progress to ensure that writers can't remove from the old table
631 * and can't insert to the new table during the atomic operation of search
632 * and insertion. Searches for duplicates in both the old and new table if
633 * a resize is in progress.
634 *
635 * Lookups may occur in parallel with hashtable mutations and resizing.
636 *
637 * Will trigger an automatic deferred table resizing if the size grows
638 * beyond the watermark indicated by grow_decision() which can be passed
639 * to rhashtable_init().
640 *
641 * Returns zero on success.
642 */
643static inline int rhashtable_lookup_insert_key(
644 struct rhashtable *ht, const void *key, struct rhash_head *obj,
645 const struct rhashtable_params params)
646{
647 BUG_ON(!ht->p.obj_hashfn || !key);
648
649 return __rhashtable_insert_fast(ht, key, obj, params);
650}
651
652static inline int __rhashtable_remove_fast(
653 struct rhashtable *ht, struct bucket_table *tbl,
654 struct rhash_head *obj, const struct rhashtable_params params)
655{
656 struct rhash_head __rcu **pprev;
657 struct rhash_head *he;
658 spinlock_t * lock;
659 unsigned hash;
660 int err = -ENOENT;
661
662 hash = rht_head_hashfn(ht, tbl, obj, params);
663 lock = rht_bucket_lock(tbl, hash);
664
665 spin_lock_bh(lock);
666
667 pprev = &tbl->buckets[hash];
668 rht_for_each(he, tbl, hash) {
669 if (he != obj) {
670 pprev = &he->next;
671 continue;
672 }
673
674 rcu_assign_pointer(*pprev, obj->next);
675 err = 0;
676 break;
677 }
678
679 spin_unlock_bh(lock);
680
681 return err;
682}
683
684/**
685 * rhashtable_remove_fast - remove object from hash table
686 * @ht: hash table
687 * @obj: pointer to hash head inside object
688 * @params: hash table parameters
689 *
690 * Since the hash chain is single linked, the removal operation needs to
691 * walk the bucket chain upon removal. The removal operation is thus
692 * considerable slow if the hash table is not correctly sized.
693 *
694 * Will automatically shrink the table via rhashtable_expand() if the
695 * shrink_decision function specified at rhashtable_init() returns true.
696 *
697 * Returns zero on success, -ENOENT if the entry could not be found.
698 */
699static inline int rhashtable_remove_fast(
700 struct rhashtable *ht, struct rhash_head *obj,
701 const struct rhashtable_params params)
702{
703 struct bucket_table *tbl;
704 int err;
705
706 rcu_read_lock();
707
708 tbl = rht_dereference_rcu(ht->tbl, ht);
709
710 /* Because we have already taken (and released) the bucket
711 * lock in old_tbl, if we find that future_tbl is not yet
712 * visible then that guarantees the entry to still be in
713 * the old tbl if it exists.
714 */
715 while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
716 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
717 ;
718
719 if (err)
720 goto out;
721
722 atomic_dec(&ht->nelems);
723 if (rht_shrink_below_30(ht, tbl))
724 schedule_work(&ht->run_work);
725
726out:
727 rcu_read_unlock();
728
729 return err;
730}
731
7e1e7763 732#endif /* _LINUX_RHASHTABLE_H */