1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Resizable, Scalable, Concurrent Hash Table
5 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
9 * Code partially derived from nft_hash
10 * Rewritten with rehash code from br_multicast plus single list
11 * pointer as suggested by Josh Triplett
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #ifndef _LINUX_RHASHTABLE_H
19 #define _LINUX_RHASHTABLE_H
21 #include <linux/err.h>
22 #include <linux/errno.h>
23 #include <linux/jhash.h>
24 #include <linux/list_nulls.h>
25 #include <linux/workqueue.h>
26 #include <linux/rculist.h>
28 #include <linux/rhashtable-types.h>
30 * The end of the chain is marked with a special nulls marks which has
31 * the least significant bit set.
34 /* Maximum chain length before rehash
36 * The maximum (not average) chain length grows with the size of the hash
37 * table, at a rate of (log N)/(log log N).
39 * The value of 16 is selected so that even if the hash table grew to
40 * 2^32 you would not expect the maximum chain length to exceed it
41 * unless we are under attack (or extremely unlucky).
43 * As this limit is only to detect attacks, we don't need to set it to a
44 * lower value as you'd need the chain length to vastly exceed 16 to have
45 * any real effect on the system.
47 #define RHT_ELASTICITY 16u
50 * struct bucket_table - Table of hash buckets
51 * @size: Number of hash buckets
52 * @nest: Number of bits of first-level nested table.
53 * @rehash: Current bucket being rehashed
54 * @hash_rnd: Random seed to fold into hash
55 * @locks_mask: Mask to apply before accessing locks[]
56 * @locks: Array of spinlocks protecting individual buckets
57 * @walkers: List of active walkers
58 * @rcu: RCU structure for freeing the table
59 * @future_tbl: Table under construction during rehashing
60 * @ntbl: Nested table used when out of memory.
61 * @buckets: size * hash buckets
67 unsigned int locks_mask;
69 struct list_head walkers;
72 struct bucket_table __rcu *future_tbl;
74 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
78 * NULLS_MARKER() expects a hash value with the low
79 * bits mostly likely to be significant, and it discards
81 * We git it an address, in which the bottom 2 bits are
82 * always 0, and the msb might be significant.
83 * So we shift the address down one bit to align with
84 * expectations and avoid losing a significant bit.
86 #define RHT_NULLS_MARKER(ptr) \
87 ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
88 #define INIT_RHT_NULLS_HEAD(ptr) \
89 ((ptr) = RHT_NULLS_MARKER(&(ptr)))
91 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
93 return ((unsigned long) ptr & 1);
96 static inline void *rht_obj(const struct rhashtable *ht,
97 const struct rhash_head *he)
99 return (char *)he - ht->p.head_offset;
102 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
105 return hash & (tbl->size - 1);
108 static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
109 const void *key, const struct rhashtable_params params,
110 unsigned int hash_rnd)
114 /* params must be equal to ht->p if it isn't constant. */
115 if (!__builtin_constant_p(params.key_len))
116 hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
117 else if (params.key_len) {
118 unsigned int key_len = params.key_len;
121 hash = params.hashfn(key, key_len, hash_rnd);
122 else if (key_len & (sizeof(u32) - 1))
123 hash = jhash(key, key_len, hash_rnd);
125 hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
127 unsigned int key_len = ht->p.key_len;
130 hash = params.hashfn(key, key_len, hash_rnd);
132 hash = jhash(key, key_len, hash_rnd);
138 static inline unsigned int rht_key_hashfn(
139 struct rhashtable *ht, const struct bucket_table *tbl,
140 const void *key, const struct rhashtable_params params)
142 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
144 return rht_bucket_index(tbl, hash);
147 static inline unsigned int rht_head_hashfn(
148 struct rhashtable *ht, const struct bucket_table *tbl,
149 const struct rhash_head *he, const struct rhashtable_params params)
151 const char *ptr = rht_obj(ht, he);
153 return likely(params.obj_hashfn) ?
154 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
157 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
161 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
163 * @tbl: current table
165 static inline bool rht_grow_above_75(const struct rhashtable *ht,
166 const struct bucket_table *tbl)
168 /* Expand table when exceeding 75% load */
169 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
170 (!ht->p.max_size || tbl->size < ht->p.max_size);
174 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
176 * @tbl: current table
178 static inline bool rht_shrink_below_30(const struct rhashtable *ht,
179 const struct bucket_table *tbl)
181 /* Shrink table beneath 30% load */
182 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
183 tbl->size > ht->p.min_size;
187 * rht_grow_above_100 - returns true if nelems > table-size
189 * @tbl: current table
191 static inline bool rht_grow_above_100(const struct rhashtable *ht,
192 const struct bucket_table *tbl)
194 return atomic_read(&ht->nelems) > tbl->size &&
195 (!ht->p.max_size || tbl->size < ht->p.max_size);
199 * rht_grow_above_max - returns true if table is above maximum
201 * @tbl: current table
203 static inline bool rht_grow_above_max(const struct rhashtable *ht,
204 const struct bucket_table *tbl)
206 return atomic_read(&ht->nelems) >= ht->max_elems;
209 /* The bucket lock is selected based on the hash and protects mutations
210 * on a group of hash buckets.
212 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
213 * a single lock always covers both buckets which may both contains
214 * entries which link to the same bucket of the old table during resizing.
215 * This allows to simplify the locking as locking the bucket in both
216 * tables during resize always guarantee protection.
218 * IMPORTANT: When holding the bucket lock of both the old and new table
219 * during expansions and shrinking, the old bucket lock must always be
222 static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
225 return &tbl->locks[hash & tbl->locks_mask];
228 #ifdef CONFIG_PROVE_LOCKING
229 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
230 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
232 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
237 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
242 #endif /* CONFIG_PROVE_LOCKING */
244 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
245 struct rhash_head *obj);
247 void rhashtable_walk_enter(struct rhashtable *ht,
248 struct rhashtable_iter *iter);
249 void rhashtable_walk_exit(struct rhashtable_iter *iter);
250 int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
252 static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
254 (void)rhashtable_walk_start_check(iter);
257 void *rhashtable_walk_next(struct rhashtable_iter *iter);
258 void *rhashtable_walk_peek(struct rhashtable_iter *iter);
259 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
261 void rhashtable_free_and_destroy(struct rhashtable *ht,
262 void (*free_fn)(void *ptr, void *arg),
264 void rhashtable_destroy(struct rhashtable *ht);
266 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
268 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
269 struct bucket_table *tbl,
272 #define rht_dereference(p, ht) \
273 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
275 #define rht_dereference_rcu(p, ht) \
276 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
278 #define rht_dereference_bucket(p, tbl, hash) \
279 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
281 #define rht_dereference_bucket_rcu(p, tbl, hash) \
282 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
284 #define rht_entry(tpos, pos, member) \
285 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
287 static inline struct rhash_head __rcu *const *rht_bucket(
288 const struct bucket_table *tbl, unsigned int hash)
290 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
294 static inline struct rhash_head __rcu **rht_bucket_var(
295 struct bucket_table *tbl, unsigned int hash)
297 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
301 static inline struct rhash_head __rcu **rht_bucket_insert(
302 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
304 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
309 * rht_for_each_from - iterate over hash chain from given head
310 * @pos: the &struct rhash_head to use as a loop cursor.
311 * @head: the &struct rhash_head to start from
312 * @tbl: the &struct bucket_table
313 * @hash: the hash value / bucket index
315 #define rht_for_each_from(pos, head, tbl, hash) \
316 for (pos = rht_dereference_bucket(head, tbl, hash); \
317 !rht_is_a_nulls(pos); \
318 pos = rht_dereference_bucket((pos)->next, tbl, hash))
321 * rht_for_each - iterate over hash chain
322 * @pos: the &struct rhash_head to use as a loop cursor.
323 * @tbl: the &struct bucket_table
324 * @hash: the hash value / bucket index
326 #define rht_for_each(pos, tbl, hash) \
327 rht_for_each_from(pos, *rht_bucket(tbl, hash), tbl, hash)
330 * rht_for_each_entry_from - iterate over hash chain from given head
331 * @tpos: the type * to use as a loop cursor.
332 * @pos: the &struct rhash_head to use as a loop cursor.
333 * @head: the &struct rhash_head to start from
334 * @tbl: the &struct bucket_table
335 * @hash: the hash value / bucket index
336 * @member: name of the &struct rhash_head within the hashable struct.
338 #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \
339 for (pos = rht_dereference_bucket(head, tbl, hash); \
340 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
341 pos = rht_dereference_bucket((pos)->next, tbl, hash))
344 * rht_for_each_entry - iterate over hash chain of given type
345 * @tpos: the type * to use as a loop cursor.
346 * @pos: the &struct rhash_head to use as a loop cursor.
347 * @tbl: the &struct bucket_table
348 * @hash: the hash value / bucket index
349 * @member: name of the &struct rhash_head within the hashable struct.
351 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
352 rht_for_each_entry_from(tpos, pos, *rht_bucket(tbl, hash), \
356 * rht_for_each_entry_safe - safely iterate over hash chain of given type
357 * @tpos: the type * to use as a loop cursor.
358 * @pos: the &struct rhash_head to use as a loop cursor.
359 * @next: the &struct rhash_head to use as next in loop cursor.
360 * @tbl: the &struct bucket_table
361 * @hash: the hash value / bucket index
362 * @member: name of the &struct rhash_head within the hashable struct.
364 * This hash chain list-traversal primitive allows for the looped code to
365 * remove the loop cursor from the list.
367 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
368 for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
369 next = !rht_is_a_nulls(pos) ? \
370 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
371 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
373 next = !rht_is_a_nulls(pos) ? \
374 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
377 * rht_for_each_rcu_from - iterate over rcu hash chain from given head
378 * @pos: the &struct rhash_head to use as a loop cursor.
379 * @head: the &struct rhash_head to start from
380 * @tbl: the &struct bucket_table
381 * @hash: the hash value / bucket index
383 * This hash chain list-traversal primitive may safely run concurrently with
384 * the _rcu mutation primitives such as rhashtable_insert() as long as the
385 * traversal is guarded by rcu_read_lock().
387 #define rht_for_each_rcu_from(pos, head, tbl, hash) \
388 for (({barrier(); }), \
389 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
390 !rht_is_a_nulls(pos); \
391 pos = rcu_dereference_raw(pos->next))
394 * rht_for_each_rcu - iterate over rcu hash chain
395 * @pos: the &struct rhash_head to use as a loop cursor.
396 * @tbl: the &struct bucket_table
397 * @hash: the hash value / bucket index
399 * This hash chain list-traversal primitive may safely run concurrently with
400 * the _rcu mutation primitives such as rhashtable_insert() as long as the
401 * traversal is guarded by rcu_read_lock().
403 #define rht_for_each_rcu(pos, tbl, hash) \
404 rht_for_each_rcu_from(pos, *rht_bucket(tbl, hash), tbl, hash)
407 * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
408 * @tpos: the type * to use as a loop cursor.
409 * @pos: the &struct rhash_head to use as a loop cursor.
410 * @head: the &struct rhash_head to start from
411 * @tbl: the &struct bucket_table
412 * @hash: the hash value / bucket index
413 * @member: name of the &struct rhash_head within the hashable struct.
415 * This hash chain list-traversal primitive may safely run concurrently with
416 * the _rcu mutation primitives such as rhashtable_insert() as long as the
417 * traversal is guarded by rcu_read_lock().
419 #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
420 for (({barrier(); }), \
421 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
422 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
423 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
426 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
427 * @tpos: the type * to use as a loop cursor.
428 * @pos: the &struct rhash_head to use as a loop cursor.
429 * @tbl: the &struct bucket_table
430 * @hash: the hash value / bucket index
431 * @member: name of the &struct rhash_head within the hashable struct.
433 * This hash chain list-traversal primitive may safely run concurrently with
434 * the _rcu mutation primitives such as rhashtable_insert() as long as the
435 * traversal is guarded by rcu_read_lock().
437 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
438 rht_for_each_entry_rcu_from(tpos, pos, *rht_bucket(tbl, hash), \
442 * rhl_for_each_rcu - iterate over rcu hash table list
443 * @pos: the &struct rlist_head to use as a loop cursor.
444 * @list: the head of the list
446 * This hash chain list-traversal primitive should be used on the
447 * list returned by rhltable_lookup.
449 #define rhl_for_each_rcu(pos, list) \
450 for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
453 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
454 * @tpos: the type * to use as a loop cursor.
455 * @pos: the &struct rlist_head to use as a loop cursor.
456 * @list: the head of the list
457 * @member: name of the &struct rlist_head within the hashable struct.
459 * This hash chain list-traversal primitive should be used on the
460 * list returned by rhltable_lookup.
462 #define rhl_for_each_entry_rcu(tpos, pos, list, member) \
463 for (pos = list; pos && rht_entry(tpos, pos, member); \
464 pos = rcu_dereference_raw(pos->next))
466 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
469 struct rhashtable *ht = arg->ht;
470 const char *ptr = obj;
472 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
475 /* Internal function, do not use. */
476 static inline struct rhash_head *__rhashtable_lookup(
477 struct rhashtable *ht, const void *key,
478 const struct rhashtable_params params)
480 struct rhashtable_compare_arg arg = {
484 struct rhash_head __rcu * const *head;
485 struct bucket_table *tbl;
486 struct rhash_head *he;
489 tbl = rht_dereference_rcu(ht->tbl, ht);
491 hash = rht_key_hashfn(ht, tbl, key, params);
492 head = rht_bucket(tbl, hash);
494 rht_for_each_rcu_from(he, *head, tbl, hash) {
495 if (params.obj_cmpfn ?
496 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
497 rhashtable_compare(&arg, rht_obj(ht, he)))
501 /* An object might have been moved to a different hash chain,
502 * while we walk along it - better check and retry.
504 } while (he != RHT_NULLS_MARKER(head));
506 /* Ensure we see any new tables. */
509 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
517 * rhashtable_lookup - search hash table
519 * @key: the pointer to the key
520 * @params: hash table parameters
522 * Computes the hash value for the key and traverses the bucket chain looking
523 * for a entry with an identical key. The first matching entry is returned.
525 * This must only be called under the RCU read lock.
527 * Returns the first entry on which the compare function returned true.
529 static inline void *rhashtable_lookup(
530 struct rhashtable *ht, const void *key,
531 const struct rhashtable_params params)
533 struct rhash_head *he = __rhashtable_lookup(ht, key, params);
535 return he ? rht_obj(ht, he) : NULL;
539 * rhashtable_lookup_fast - search hash table, without RCU read lock
541 * @key: the pointer to the key
542 * @params: hash table parameters
544 * Computes the hash value for the key and traverses the bucket chain looking
545 * for a entry with an identical key. The first matching entry is returned.
547 * Only use this function when you have other mechanisms guaranteeing
548 * that the object won't go away after the RCU read lock is released.
550 * Returns the first entry on which the compare function returned true.
552 static inline void *rhashtable_lookup_fast(
553 struct rhashtable *ht, const void *key,
554 const struct rhashtable_params params)
559 obj = rhashtable_lookup(ht, key, params);
566 * rhltable_lookup - search hash list table
568 * @key: the pointer to the key
569 * @params: hash table parameters
571 * Computes the hash value for the key and traverses the bucket chain looking
572 * for a entry with an identical key. All matching entries are returned
575 * This must only be called under the RCU read lock.
577 * Returns the list of entries that match the given key.
579 static inline struct rhlist_head *rhltable_lookup(
580 struct rhltable *hlt, const void *key,
581 const struct rhashtable_params params)
583 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
585 return he ? container_of(he, struct rhlist_head, rhead) : NULL;
588 /* Internal function, please use rhashtable_insert_fast() instead. This
589 * function returns the existing element already in hashes in there is a clash,
590 * otherwise it returns an error via ERR_PTR().
592 static inline void *__rhashtable_insert_fast(
593 struct rhashtable *ht, const void *key, struct rhash_head *obj,
594 const struct rhashtable_params params, bool rhlist)
596 struct rhashtable_compare_arg arg = {
600 struct rhash_head __rcu **pprev;
601 struct bucket_table *tbl;
602 struct rhash_head *head;
610 tbl = rht_dereference_rcu(ht->tbl, ht);
611 hash = rht_head_hashfn(ht, tbl, obj, params);
612 lock = rht_bucket_lock(tbl, hash);
615 if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
617 spin_unlock_bh(lock);
619 return rhashtable_insert_slow(ht, key, obj);
622 elasticity = RHT_ELASTICITY;
623 pprev = rht_bucket_insert(ht, tbl, hash);
624 data = ERR_PTR(-ENOMEM);
628 rht_for_each_from(head, *pprev, tbl, hash) {
629 struct rhlist_head *plist;
630 struct rhlist_head *list;
635 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
636 rhashtable_compare(&arg, rht_obj(ht, head)))) {
641 data = rht_obj(ht, head);
647 list = container_of(obj, struct rhlist_head, rhead);
648 plist = container_of(head, struct rhlist_head, rhead);
650 RCU_INIT_POINTER(list->next, plist);
651 head = rht_dereference_bucket(head->next, tbl, hash);
652 RCU_INIT_POINTER(list->rhead.next, head);
653 rcu_assign_pointer(*pprev, obj);
661 data = ERR_PTR(-E2BIG);
662 if (unlikely(rht_grow_above_max(ht, tbl)))
665 if (unlikely(rht_grow_above_100(ht, tbl)))
668 head = rht_dereference_bucket(*pprev, tbl, hash);
670 RCU_INIT_POINTER(obj->next, head);
672 struct rhlist_head *list;
674 list = container_of(obj, struct rhlist_head, rhead);
675 RCU_INIT_POINTER(list->next, NULL);
678 rcu_assign_pointer(*pprev, obj);
680 atomic_inc(&ht->nelems);
681 if (rht_grow_above_75(ht, tbl))
682 schedule_work(&ht->run_work);
688 spin_unlock_bh(lock);
695 * rhashtable_insert_fast - insert object into hash table
697 * @obj: pointer to hash head inside object
698 * @params: hash table parameters
700 * Will take a per bucket spinlock to protect against mutual mutations
701 * on the same bucket. Multiple insertions may occur in parallel unless
702 * they map to the same bucket lock.
704 * It is safe to call this function from atomic context.
706 * Will trigger an automatic deferred table resizing if residency in the
707 * table grows beyond 70%.
709 static inline int rhashtable_insert_fast(
710 struct rhashtable *ht, struct rhash_head *obj,
711 const struct rhashtable_params params)
715 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
719 return ret == NULL ? 0 : -EEXIST;
723 * rhltable_insert_key - insert object into hash list table
724 * @hlt: hash list table
725 * @key: the pointer to the key
726 * @list: pointer to hash list head inside object
727 * @params: hash table parameters
729 * Will take a per bucket spinlock to protect against mutual mutations
730 * on the same bucket. Multiple insertions may occur in parallel unless
731 * they map to the same bucket lock.
733 * It is safe to call this function from atomic context.
735 * Will trigger an automatic deferred table resizing if residency in the
736 * table grows beyond 70%.
738 static inline int rhltable_insert_key(
739 struct rhltable *hlt, const void *key, struct rhlist_head *list,
740 const struct rhashtable_params params)
742 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
747 * rhltable_insert - insert object into hash list table
748 * @hlt: hash list table
749 * @list: pointer to hash list head inside object
750 * @params: hash table parameters
752 * Will take a per bucket spinlock to protect against mutual mutations
753 * on the same bucket. Multiple insertions may occur in parallel unless
754 * they map to the same bucket lock.
756 * It is safe to call this function from atomic context.
758 * Will trigger an automatic deferred table resizing if residency in the
759 * table grows beyond 70%.
761 static inline int rhltable_insert(
762 struct rhltable *hlt, struct rhlist_head *list,
763 const struct rhashtable_params params)
765 const char *key = rht_obj(&hlt->ht, &list->rhead);
767 key += params.key_offset;
769 return rhltable_insert_key(hlt, key, list, params);
773 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
775 * @obj: pointer to hash head inside object
776 * @params: hash table parameters
778 * This lookup function may only be used for fixed key hash table (key_len
779 * parameter set). It will BUG() if used inappropriately.
781 * It is safe to call this function from atomic context.
783 * Will trigger an automatic deferred table resizing if residency in the
784 * table grows beyond 70%.
786 static inline int rhashtable_lookup_insert_fast(
787 struct rhashtable *ht, struct rhash_head *obj,
788 const struct rhashtable_params params)
790 const char *key = rht_obj(ht, obj);
793 BUG_ON(ht->p.obj_hashfn);
795 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
800 return ret == NULL ? 0 : -EEXIST;
804 * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
806 * @obj: pointer to hash head inside object
807 * @params: hash table parameters
809 * Just like rhashtable_lookup_insert_fast(), but this function returns the
810 * object if it exists, NULL if it did not and the insertion was successful,
811 * and an ERR_PTR otherwise.
813 static inline void *rhashtable_lookup_get_insert_fast(
814 struct rhashtable *ht, struct rhash_head *obj,
815 const struct rhashtable_params params)
817 const char *key = rht_obj(ht, obj);
819 BUG_ON(ht->p.obj_hashfn);
821 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
826 * rhashtable_lookup_insert_key - search and insert object to hash table
830 * @obj: pointer to hash head inside object
831 * @params: hash table parameters
833 * Lookups may occur in parallel with hashtable mutations and resizing.
835 * Will trigger an automatic deferred table resizing if residency in the
836 * table grows beyond 70%.
838 * Returns zero on success.
840 static inline int rhashtable_lookup_insert_key(
841 struct rhashtable *ht, const void *key, struct rhash_head *obj,
842 const struct rhashtable_params params)
846 BUG_ON(!ht->p.obj_hashfn || !key);
848 ret = __rhashtable_insert_fast(ht, key, obj, params, false);
852 return ret == NULL ? 0 : -EEXIST;
856 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
858 * @obj: pointer to hash head inside object
859 * @params: hash table parameters
860 * @data: pointer to element data already in hashes
862 * Just like rhashtable_lookup_insert_key(), but this function returns the
863 * object if it exists, NULL if it does not and the insertion was successful,
864 * and an ERR_PTR otherwise.
866 static inline void *rhashtable_lookup_get_insert_key(
867 struct rhashtable *ht, const void *key, struct rhash_head *obj,
868 const struct rhashtable_params params)
870 BUG_ON(!ht->p.obj_hashfn || !key);
872 return __rhashtable_insert_fast(ht, key, obj, params, false);
875 /* Internal function, please use rhashtable_remove_fast() instead */
876 static inline int __rhashtable_remove_fast_one(
877 struct rhashtable *ht, struct bucket_table *tbl,
878 struct rhash_head *obj, const struct rhashtable_params params,
881 struct rhash_head __rcu **pprev;
882 struct rhash_head *he;
887 hash = rht_head_hashfn(ht, tbl, obj, params);
888 lock = rht_bucket_lock(tbl, hash);
892 pprev = rht_bucket_var(tbl, hash);
893 rht_for_each_from(he, *pprev, tbl, hash) {
894 struct rhlist_head *list;
896 list = container_of(he, struct rhlist_head, rhead);
899 struct rhlist_head __rcu **lpprev;
907 lpprev = &list->next;
908 list = rht_dereference_bucket(list->next,
910 } while (list && obj != &list->rhead);
915 list = rht_dereference_bucket(list->next, tbl, hash);
916 RCU_INIT_POINTER(*lpprev, list);
921 obj = rht_dereference_bucket(obj->next, tbl, hash);
925 list = rht_dereference_bucket(list->next, tbl, hash);
927 RCU_INIT_POINTER(list->rhead.next, obj);
933 rcu_assign_pointer(*pprev, obj);
937 spin_unlock_bh(lock);
940 atomic_dec(&ht->nelems);
941 if (unlikely(ht->p.automatic_shrinking &&
942 rht_shrink_below_30(ht, tbl)))
943 schedule_work(&ht->run_work);
950 /* Internal function, please use rhashtable_remove_fast() instead */
951 static inline int __rhashtable_remove_fast(
952 struct rhashtable *ht, struct rhash_head *obj,
953 const struct rhashtable_params params, bool rhlist)
955 struct bucket_table *tbl;
960 tbl = rht_dereference_rcu(ht->tbl, ht);
962 /* Because we have already taken (and released) the bucket
963 * lock in old_tbl, if we find that future_tbl is not yet
964 * visible then that guarantees the entry to still be in
965 * the old tbl if it exists.
967 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
969 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
978 * rhashtable_remove_fast - remove object from hash table
980 * @obj: pointer to hash head inside object
981 * @params: hash table parameters
983 * Since the hash chain is single linked, the removal operation needs to
984 * walk the bucket chain upon removal. The removal operation is thus
985 * considerable slow if the hash table is not correctly sized.
987 * Will automatically shrink the table if permitted when residency drops
990 * Returns zero on success, -ENOENT if the entry could not be found.
992 static inline int rhashtable_remove_fast(
993 struct rhashtable *ht, struct rhash_head *obj,
994 const struct rhashtable_params params)
996 return __rhashtable_remove_fast(ht, obj, params, false);
1000 * rhltable_remove - remove object from hash list table
1001 * @hlt: hash list table
1002 * @list: pointer to hash list head inside object
1003 * @params: hash table parameters
1005 * Since the hash chain is single linked, the removal operation needs to
1006 * walk the bucket chain upon removal. The removal operation is thus
1007 * considerable slow if the hash table is not correctly sized.
1009 * Will automatically shrink the table if permitted when residency drops
1012 * Returns zero on success, -ENOENT if the entry could not be found.
1014 static inline int rhltable_remove(
1015 struct rhltable *hlt, struct rhlist_head *list,
1016 const struct rhashtable_params params)
1018 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1021 /* Internal function, please use rhashtable_replace_fast() instead */
1022 static inline int __rhashtable_replace_fast(
1023 struct rhashtable *ht, struct bucket_table *tbl,
1024 struct rhash_head *obj_old, struct rhash_head *obj_new,
1025 const struct rhashtable_params params)
1027 struct rhash_head __rcu **pprev;
1028 struct rhash_head *he;
1033 /* Minimally, the old and new objects must have same hash
1034 * (which should mean identifiers are the same).
1036 hash = rht_head_hashfn(ht, tbl, obj_old, params);
1037 if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
1040 lock = rht_bucket_lock(tbl, hash);
1044 pprev = rht_bucket_var(tbl, hash);
1045 rht_for_each_from(he, *pprev, tbl, hash) {
1046 if (he != obj_old) {
1051 rcu_assign_pointer(obj_new->next, obj_old->next);
1052 rcu_assign_pointer(*pprev, obj_new);
1057 spin_unlock_bh(lock);
1063 * rhashtable_replace_fast - replace an object in hash table
1065 * @obj_old: pointer to hash head inside object being replaced
1066 * @obj_new: pointer to hash head inside object which is new
1067 * @params: hash table parameters
1069 * Replacing an object doesn't affect the number of elements in the hash table
1070 * or bucket, so we don't need to worry about shrinking or expanding the
1073 * Returns zero on success, -ENOENT if the entry could not be found,
1074 * -EINVAL if hash is not the same for the old and new objects.
1076 static inline int rhashtable_replace_fast(
1077 struct rhashtable *ht, struct rhash_head *obj_old,
1078 struct rhash_head *obj_new,
1079 const struct rhashtable_params params)
1081 struct bucket_table *tbl;
1086 tbl = rht_dereference_rcu(ht->tbl, ht);
1088 /* Because we have already taken (and released) the bucket
1089 * lock in old_tbl, if we find that future_tbl is not yet
1090 * visible then that guarantees the entry to still be in
1091 * the old tbl if it exists.
1093 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
1094 obj_new, params)) &&
1095 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1104 * rhltable_walk_enter - Initialise an iterator
1105 * @hlt: Table to walk over
1106 * @iter: Hash table Iterator
1108 * This function prepares a hash table walk.
1110 * Note that if you restart a walk after rhashtable_walk_stop you
1111 * may see the same object twice. Also, you may miss objects if
1112 * there are removals in between rhashtable_walk_stop and the next
1113 * call to rhashtable_walk_start.
1115 * For a completely stable walk you should construct your own data
1116 * structure outside the hash table.
1118 * This function may be called from any process context, including
1119 * non-preemptable context, but cannot be called from softirq or
1122 * You must call rhashtable_walk_exit after this function returns.
1124 static inline void rhltable_walk_enter(struct rhltable *hlt,
1125 struct rhashtable_iter *iter)
1127 return rhashtable_walk_enter(&hlt->ht, iter);
1131 * rhltable_free_and_destroy - free elements and destroy hash list table
1132 * @hlt: the hash list table to destroy
1133 * @free_fn: callback to release resources of element
1134 * @arg: pointer passed to free_fn
1136 * See documentation for rhashtable_free_and_destroy.
1138 static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1139 void (*free_fn)(void *ptr,
1143 return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1146 static inline void rhltable_destroy(struct rhltable *hlt)
1148 return rhltable_free_and_destroy(hlt, NULL, NULL);
1151 #endif /* _LINUX_RHASHTABLE_H */