2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2005 SGI, Christoph Lameter
5 * Copyright (C) 2006 Nick Piggin
6 * Copyright (C) 2012 Konstantin Khlebnikov
7 * Copyright (C) 2016 Intel, Matthew Wilcox
8 * Copyright (C) 2016 Intel, Ross Zwisler
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2, or (at
13 * your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/export.h>
29 #include <linux/radix-tree.h>
30 #include <linux/percpu.h>
31 #include <linux/slab.h>
32 #include <linux/kmemleak.h>
33 #include <linux/notifier.h>
34 #include <linux/cpu.h>
35 #include <linux/string.h>
36 #include <linux/bitops.h>
37 #include <linux/rcupdate.h>
38 #include <linux/preempt.h> /* in_interrupt() */
42 * Radix tree node cache.
44 static struct kmem_cache *radix_tree_node_cachep;
47 * The radix tree is variable-height, so an insert operation not only has
48 * to build the branch to its corresponding item, it also has to build the
49 * branch to existing items if the size has to be increased (by
52 * The worst case is a zero height tree with just a single item at index 0,
53 * and then inserting an item at index ULONG_MAX. This requires 2 new branches
54 * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
57 #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
60 * Per-cpu pool of preloaded nodes
62 struct radix_tree_preload {
64 /* nodes->private_data points to next preallocated node */
65 struct radix_tree_node *nodes;
67 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69 static inline void *node_to_entry(void *ptr)
71 return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
74 #define RADIX_TREE_RETRY node_to_entry(NULL)
76 #ifdef CONFIG_RADIX_TREE_MULTIORDER
77 /* Sibling slots point directly to another slot in the same node */
78 static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node)
81 return (parent->slots <= ptr) &&
82 (ptr < parent->slots + RADIX_TREE_MAP_SIZE);
85 static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node)
91 static inline unsigned long get_slot_offset(struct radix_tree_node *parent,
94 return slot - parent->slots;
97 static unsigned radix_tree_descend(struct radix_tree_node *parent,
98 struct radix_tree_node **nodep, unsigned offset)
100 void **entry = rcu_dereference_raw(parent->slots[offset]);
102 #ifdef CONFIG_RADIX_TREE_MULTIORDER
103 if (radix_tree_is_internal_node(entry)) {
104 unsigned long siboff = get_slot_offset(parent, entry);
105 if (siboff < RADIX_TREE_MAP_SIZE) {
107 entry = rcu_dereference_raw(parent->slots[offset]);
112 *nodep = (void *)entry;
116 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
118 return root->gfp_mask & __GFP_BITS_MASK;
121 static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
124 __set_bit(offset, node->tags[tag]);
127 static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
130 __clear_bit(offset, node->tags[tag]);
133 static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
136 return test_bit(offset, node->tags[tag]);
139 static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
141 root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
144 static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
146 root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
149 static inline void root_tag_clear_all(struct radix_tree_root *root)
151 root->gfp_mask &= __GFP_BITS_MASK;
154 static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
156 return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
159 static inline unsigned root_tags_get(struct radix_tree_root *root)
161 return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT;
165 * Returns 1 if any slot in the node has this tag set.
166 * Otherwise returns 0.
168 static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
171 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
172 if (node->tags[tag][idx])
179 * radix_tree_find_next_bit - find the next set bit in a memory region
181 * @addr: The address to base the search on
182 * @size: The bitmap size in bits
183 * @offset: The bitnumber to start searching at
185 * Unrollable variant of find_next_bit() for constant size arrays.
186 * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
187 * Returns next bit offset, or size if nothing found.
189 static __always_inline unsigned long
190 radix_tree_find_next_bit(const unsigned long *addr,
191 unsigned long size, unsigned long offset)
193 if (!__builtin_constant_p(size))
194 return find_next_bit(addr, size, offset);
199 addr += offset / BITS_PER_LONG;
200 tmp = *addr >> (offset % BITS_PER_LONG);
202 return __ffs(tmp) + offset;
203 offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
204 while (offset < size) {
207 return __ffs(tmp) + offset;
208 offset += BITS_PER_LONG;
215 static void dump_node(struct radix_tree_node *node, unsigned long index)
219 pr_debug("radix node: %p offset %d tags %lx %lx %lx shift %d count %d parent %p\n",
221 node->tags[0][0], node->tags[1][0], node->tags[2][0],
222 node->shift, node->count, node->parent);
224 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
225 unsigned long first = index | (i << node->shift);
226 unsigned long last = first | ((1UL << node->shift) - 1);
227 void *entry = node->slots[i];
230 if (is_sibling_entry(node, entry)) {
231 pr_debug("radix sblng %p offset %ld val %p indices %ld-%ld\n",
233 *(void **)entry_to_node(entry),
235 } else if (!radix_tree_is_internal_node(entry)) {
236 pr_debug("radix entry %p offset %ld indices %ld-%ld\n",
237 entry, i, first, last);
239 dump_node(entry_to_node(entry), first);
245 static void radix_tree_dump(struct radix_tree_root *root)
247 pr_debug("radix root: %p rnode %p tags %x\n",
249 root->gfp_mask >> __GFP_BITS_SHIFT);
250 if (!radix_tree_is_internal_node(root->rnode))
252 dump_node(entry_to_node(root->rnode), 0);
257 * This assumes that the caller has performed appropriate preallocation, and
258 * that the caller has pinned this thread of control to the current CPU.
260 static struct radix_tree_node *
261 radix_tree_node_alloc(struct radix_tree_root *root)
263 struct radix_tree_node *ret = NULL;
264 gfp_t gfp_mask = root_gfp_mask(root);
267 * Preload code isn't irq safe and it doesn't make sense to use
268 * preloading during an interrupt anyway as all the allocations have
269 * to be atomic. So just do normal allocation when in interrupt.
271 if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
272 struct radix_tree_preload *rtp;
275 * Even if the caller has preloaded, try to allocate from the
276 * cache first for the new node to get accounted.
278 ret = kmem_cache_alloc(radix_tree_node_cachep,
279 gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN);
284 * Provided the caller has preloaded here, we will always
285 * succeed in getting a node here (and never reach
288 rtp = this_cpu_ptr(&radix_tree_preloads);
291 rtp->nodes = ret->private_data;
292 ret->private_data = NULL;
296 * Update the allocation stack trace as this is more useful
299 kmemleak_update_trace(ret);
302 ret = kmem_cache_alloc(radix_tree_node_cachep,
303 gfp_mask | __GFP_ACCOUNT);
305 BUG_ON(radix_tree_is_internal_node(ret));
309 static void radix_tree_node_rcu_free(struct rcu_head *head)
311 struct radix_tree_node *node =
312 container_of(head, struct radix_tree_node, rcu_head);
316 * must only free zeroed nodes into the slab. radix_tree_shrink
317 * can leave us with a non-NULL entry in the first slot, so clear
318 * that here to make sure.
320 for (i = 0; i < RADIX_TREE_MAX_TAGS; i++)
321 tag_clear(node, i, 0);
323 node->slots[0] = NULL;
326 kmem_cache_free(radix_tree_node_cachep, node);
330 radix_tree_node_free(struct radix_tree_node *node)
332 call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
336 * Load up this CPU's radix_tree_node buffer with sufficient objects to
337 * ensure that the addition of a single element in the tree cannot fail. On
338 * success, return zero, with preemption disabled. On error, return -ENOMEM
339 * with preemption not disabled.
341 * To make use of this facility, the radix tree must be initialised without
342 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
344 static int __radix_tree_preload(gfp_t gfp_mask)
346 struct radix_tree_preload *rtp;
347 struct radix_tree_node *node;
351 rtp = this_cpu_ptr(&radix_tree_preloads);
352 while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
354 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
358 rtp = this_cpu_ptr(&radix_tree_preloads);
359 if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
360 node->private_data = rtp->nodes;
364 kmem_cache_free(radix_tree_node_cachep, node);
373 * Load up this CPU's radix_tree_node buffer with sufficient objects to
374 * ensure that the addition of a single element in the tree cannot fail. On
375 * success, return zero, with preemption disabled. On error, return -ENOMEM
376 * with preemption not disabled.
378 * To make use of this facility, the radix tree must be initialised without
379 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
381 int radix_tree_preload(gfp_t gfp_mask)
383 /* Warn on non-sensical use... */
384 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
385 return __radix_tree_preload(gfp_mask);
387 EXPORT_SYMBOL(radix_tree_preload);
390 * The same as above function, except we don't guarantee preloading happens.
391 * We do it, if we decide it helps. On success, return zero with preemption
392 * disabled. On error, return -ENOMEM with preemption not disabled.
394 int radix_tree_maybe_preload(gfp_t gfp_mask)
396 if (gfpflags_allow_blocking(gfp_mask))
397 return __radix_tree_preload(gfp_mask);
398 /* Preloading doesn't help anything with this gfp mask, skip it */
402 EXPORT_SYMBOL(radix_tree_maybe_preload);
405 * The maximum index which can be stored in a radix tree
407 static inline unsigned long shift_maxindex(unsigned int shift)
409 return (RADIX_TREE_MAP_SIZE << shift) - 1;
412 static inline unsigned long node_maxindex(struct radix_tree_node *node)
414 return shift_maxindex(node->shift);
417 static unsigned radix_tree_load_root(struct radix_tree_root *root,
418 struct radix_tree_node **nodep, unsigned long *maxindex)
420 struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
424 if (likely(radix_tree_is_internal_node(node))) {
425 node = entry_to_node(node);
426 *maxindex = node_maxindex(node);
427 return node->shift + RADIX_TREE_MAP_SHIFT;
435 * Extend a radix tree so it can store key @index.
437 static int radix_tree_extend(struct radix_tree_root *root,
438 unsigned long index, unsigned int shift)
440 struct radix_tree_node *slot;
441 unsigned int maxshift;
444 /* Figure out what the shift should be. */
446 while (index > shift_maxindex(maxshift))
447 maxshift += RADIX_TREE_MAP_SHIFT;
454 struct radix_tree_node *node = radix_tree_node_alloc(root);
459 /* Propagate the aggregated tag info into the new root */
460 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
461 if (root_tag_get(root, tag))
462 tag_set(node, tag, 0);
465 BUG_ON(shift > BITS_PER_LONG);
470 if (radix_tree_is_internal_node(slot))
471 entry_to_node(slot)->parent = node;
472 node->slots[0] = slot;
473 slot = node_to_entry(node);
474 rcu_assign_pointer(root->rnode, slot);
475 shift += RADIX_TREE_MAP_SHIFT;
476 } while (shift <= maxshift);
478 return maxshift + RADIX_TREE_MAP_SHIFT;
482 * __radix_tree_create - create a slot in a radix tree
483 * @root: radix tree root
485 * @order: index occupies 2^order aligned slots
486 * @nodep: returns node
487 * @slotp: returns slot
489 * Create, if necessary, and return the node and slot for an item
490 * at position @index in the radix tree @root.
492 * Until there is more than one item in the tree, no nodes are
493 * allocated and @root->rnode is used as a direct slot instead of
494 * pointing to a node, in which case *@nodep will be NULL.
496 * Returns -ENOMEM, or 0 for success.
498 int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
499 unsigned order, struct radix_tree_node **nodep,
502 struct radix_tree_node *node = NULL, *slot;
503 unsigned long maxindex;
504 unsigned int shift, offset;
505 unsigned long max = index | ((1UL << order) - 1);
507 shift = radix_tree_load_root(root, &slot, &maxindex);
509 /* Make sure the tree is high enough. */
510 if (max > maxindex) {
511 int error = radix_tree_extend(root, max, shift);
517 shift += RADIX_TREE_MAP_SHIFT;
520 offset = 0; /* uninitialised var warning */
521 while (shift > order) {
522 shift -= RADIX_TREE_MAP_SHIFT;
524 /* Have to add a child node. */
525 slot = radix_tree_node_alloc(root);
529 slot->offset = offset;
532 rcu_assign_pointer(node->slots[offset],
533 node_to_entry(slot));
536 rcu_assign_pointer(root->rnode,
537 node_to_entry(slot));
538 } else if (!radix_tree_is_internal_node(slot))
541 /* Go a level down */
542 node = entry_to_node(slot);
543 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
544 offset = radix_tree_descend(node, &slot, offset);
547 #ifdef CONFIG_RADIX_TREE_MULTIORDER
548 /* Insert pointers to the canonical entry */
550 int i, n = 1 << (order - shift);
551 offset = offset & ~(n - 1);
552 slot = node_to_entry(&node->slots[offset]);
553 for (i = 0; i < n; i++) {
554 if (node->slots[offset + i])
558 for (i = 1; i < n; i++) {
559 rcu_assign_pointer(node->slots[offset + i], slot);
568 *slotp = node ? node->slots + offset : (void **)&root->rnode;
573 * __radix_tree_insert - insert into a radix tree
574 * @root: radix tree root
576 * @order: key covers the 2^order indices around index
577 * @item: item to insert
579 * Insert an item into the radix tree at position @index.
581 int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
582 unsigned order, void *item)
584 struct radix_tree_node *node;
588 BUG_ON(radix_tree_is_internal_node(item));
590 error = __radix_tree_create(root, index, order, &node, &slot);
595 rcu_assign_pointer(*slot, item);
598 unsigned offset = get_slot_offset(node, slot);
600 BUG_ON(tag_get(node, 0, offset));
601 BUG_ON(tag_get(node, 1, offset));
602 BUG_ON(tag_get(node, 2, offset));
604 BUG_ON(root_tags_get(root));
609 EXPORT_SYMBOL(__radix_tree_insert);
612 * __radix_tree_lookup - lookup an item in a radix tree
613 * @root: radix tree root
615 * @nodep: returns node
616 * @slotp: returns slot
618 * Lookup and return the item at position @index in the radix
621 * Until there is more than one item in the tree, no nodes are
622 * allocated and @root->rnode is used as a direct slot instead of
623 * pointing to a node, in which case *@nodep will be NULL.
625 void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
626 struct radix_tree_node **nodep, void ***slotp)
628 struct radix_tree_node *node, *parent;
629 unsigned long maxindex;
635 slot = (void **)&root->rnode;
636 shift = radix_tree_load_root(root, &node, &maxindex);
637 if (index > maxindex)
640 while (radix_tree_is_internal_node(node)) {
643 if (node == RADIX_TREE_RETRY)
645 parent = entry_to_node(node);
646 shift -= RADIX_TREE_MAP_SHIFT;
647 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
648 offset = radix_tree_descend(parent, &node, offset);
649 slot = parent->slots + offset;
660 * radix_tree_lookup_slot - lookup a slot in a radix tree
661 * @root: radix tree root
664 * Returns: the slot corresponding to the position @index in the
665 * radix tree @root. This is useful for update-if-exists operations.
667 * This function can be called under rcu_read_lock iff the slot is not
668 * modified by radix_tree_replace_slot, otherwise it must be called
669 * exclusive from other writers. Any dereference of the slot must be done
670 * using radix_tree_deref_slot.
672 void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
676 if (!__radix_tree_lookup(root, index, NULL, &slot))
680 EXPORT_SYMBOL(radix_tree_lookup_slot);
683 * radix_tree_lookup - perform lookup operation on a radix tree
684 * @root: radix tree root
687 * Lookup the item at the position @index in the radix tree @root.
689 * This function can be called under rcu_read_lock, however the caller
690 * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
691 * them safely). No RCU barriers are required to access or modify the
692 * returned item, however.
694 void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
696 return __radix_tree_lookup(root, index, NULL, NULL);
698 EXPORT_SYMBOL(radix_tree_lookup);
701 * radix_tree_tag_set - set a tag on a radix tree node
702 * @root: radix tree root
706 * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
707 * corresponding to @index in the radix tree. From
708 * the root all the way down to the leaf node.
710 * Returns the address of the tagged item. Setting a tag on a not-present
713 void *radix_tree_tag_set(struct radix_tree_root *root,
714 unsigned long index, unsigned int tag)
716 struct radix_tree_node *node, *parent;
717 unsigned long maxindex;
720 shift = radix_tree_load_root(root, &node, &maxindex);
721 BUG_ON(index > maxindex);
723 while (radix_tree_is_internal_node(node)) {
726 shift -= RADIX_TREE_MAP_SHIFT;
727 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
729 parent = entry_to_node(node);
730 offset = radix_tree_descend(parent, &node, offset);
733 if (!tag_get(parent, tag, offset))
734 tag_set(parent, tag, offset);
737 /* set the root's tag bit */
738 if (!root_tag_get(root, tag))
739 root_tag_set(root, tag);
743 EXPORT_SYMBOL(radix_tree_tag_set);
746 * radix_tree_tag_clear - clear a tag on a radix tree node
747 * @root: radix tree root
751 * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
752 * corresponding to @index in the radix tree. If this causes
753 * the leaf node to have no tags set then clear the tag in the
754 * next-to-leaf node, etc.
756 * Returns the address of the tagged item on success, else NULL. ie:
757 * has the same return value and semantics as radix_tree_lookup().
759 void *radix_tree_tag_clear(struct radix_tree_root *root,
760 unsigned long index, unsigned int tag)
762 struct radix_tree_node *node, *parent;
763 unsigned long maxindex;
765 int uninitialized_var(offset);
767 shift = radix_tree_load_root(root, &node, &maxindex);
768 if (index > maxindex)
773 while (radix_tree_is_internal_node(node)) {
774 shift -= RADIX_TREE_MAP_SHIFT;
775 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
777 parent = entry_to_node(node);
778 offset = radix_tree_descend(parent, &node, offset);
787 if (!tag_get(parent, tag, offset))
789 tag_clear(parent, tag, offset);
790 if (any_tag_set(parent, tag))
793 index >>= RADIX_TREE_MAP_SHIFT;
794 offset = index & RADIX_TREE_MAP_MASK;
795 parent = parent->parent;
798 /* clear the root's tag bit */
799 if (root_tag_get(root, tag))
800 root_tag_clear(root, tag);
805 EXPORT_SYMBOL(radix_tree_tag_clear);
808 * radix_tree_tag_get - get a tag on a radix tree node
809 * @root: radix tree root
811 * @tag: tag index (< RADIX_TREE_MAX_TAGS)
815 * 0: tag not present or not set
818 * Note that the return value of this function may not be relied on, even if
819 * the RCU lock is held, unless tag modification and node deletion are excluded
822 int radix_tree_tag_get(struct radix_tree_root *root,
823 unsigned long index, unsigned int tag)
825 struct radix_tree_node *node, *parent;
826 unsigned long maxindex;
829 if (!root_tag_get(root, tag))
832 shift = radix_tree_load_root(root, &node, &maxindex);
833 if (index > maxindex)
838 while (radix_tree_is_internal_node(node)) {
841 shift -= RADIX_TREE_MAP_SHIFT;
842 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
844 parent = entry_to_node(node);
845 offset = radix_tree_descend(parent, &node, offset);
849 if (!tag_get(parent, tag, offset))
851 if (node == RADIX_TREE_RETRY)
857 EXPORT_SYMBOL(radix_tree_tag_get);
859 static inline void __set_iter_shift(struct radix_tree_iter *iter,
862 #ifdef CONFIG_RADIX_TREE_MULTIORDER
868 * radix_tree_next_chunk - find next chunk of slots for iteration
870 * @root: radix tree root
871 * @iter: iterator state
872 * @flags: RADIX_TREE_ITER_* flags and tag index
873 * Returns: pointer to chunk first slot, or NULL if iteration is over
875 void **radix_tree_next_chunk(struct radix_tree_root *root,
876 struct radix_tree_iter *iter, unsigned flags)
878 unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK;
879 struct radix_tree_node *node, *child;
880 unsigned long index, offset, maxindex;
882 if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
886 * Catch next_index overflow after ~0UL. iter->index never overflows
887 * during iterating; it can be zero only at the beginning.
888 * And we cannot overflow iter->next_index in a single step,
889 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
891 * This condition also used by radix_tree_next_slot() to stop
892 * contiguous iterating, and forbid swithing to the next chunk.
894 index = iter->next_index;
895 if (!index && iter->index)
899 shift = radix_tree_load_root(root, &child, &maxindex);
900 if (index > maxindex)
905 if (!radix_tree_is_internal_node(child)) {
906 /* Single-slot tree */
908 iter->next_index = maxindex + 1;
910 __set_iter_shift(iter, 0);
911 return (void **)&root->rnode;
915 node = entry_to_node(child);
916 shift -= RADIX_TREE_MAP_SHIFT;
917 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
918 offset = radix_tree_descend(node, &child, offset);
920 if ((flags & RADIX_TREE_ITER_TAGGED) ?
921 !tag_get(node, tag, offset) : !child) {
923 if (flags & RADIX_TREE_ITER_CONTIG)
926 if (flags & RADIX_TREE_ITER_TAGGED)
927 offset = radix_tree_find_next_bit(
932 while (++offset < RADIX_TREE_MAP_SIZE) {
933 void *slot = node->slots[offset];
934 if (is_sibling_entry(node, slot))
939 index &= ~node_maxindex(node);
940 index += offset << shift;
941 /* Overflow after ~0UL */
944 if (offset == RADIX_TREE_MAP_SIZE)
946 child = rcu_dereference_raw(node->slots[offset]);
949 if ((child == NULL) || (child == RADIX_TREE_RETRY))
951 } while (radix_tree_is_internal_node(child));
953 /* Update the iterator state */
954 iter->index = (index &~ node_maxindex(node)) | (offset << node->shift);
955 iter->next_index = (index | node_maxindex(node)) + 1;
956 __set_iter_shift(iter, shift);
958 /* Construct iter->tags bit-mask from node->tags[tag] array */
959 if (flags & RADIX_TREE_ITER_TAGGED) {
960 unsigned tag_long, tag_bit;
962 tag_long = offset / BITS_PER_LONG;
963 tag_bit = offset % BITS_PER_LONG;
964 iter->tags = node->tags[tag][tag_long] >> tag_bit;
965 /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
966 if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
967 /* Pick tags from next element */
969 iter->tags |= node->tags[tag][tag_long + 1] <<
970 (BITS_PER_LONG - tag_bit);
971 /* Clip chunk size, here only BITS_PER_LONG tags */
972 iter->next_index = index + BITS_PER_LONG;
976 return node->slots + offset;
978 EXPORT_SYMBOL(radix_tree_next_chunk);
981 * radix_tree_range_tag_if_tagged - for each item in given range set given
982 * tag if item has another tag set
983 * @root: radix tree root
984 * @first_indexp: pointer to a starting index of a range to scan
985 * @last_index: last index of a range to scan
986 * @nr_to_tag: maximum number items to tag
987 * @iftag: tag index to test
988 * @settag: tag index to set if tested tag is set
990 * This function scans range of radix tree from first_index to last_index
991 * (inclusive). For each item in the range if iftag is set, the function sets
992 * also settag. The function stops either after tagging nr_to_tag items or
993 * after reaching last_index.
995 * The tags must be set from the leaf level only and propagated back up the
996 * path to the root. We must do this so that we resolve the full path before
997 * setting any tags on intermediate nodes. If we set tags as we descend, then
998 * we can get to the leaf node and find that the index that has the iftag
999 * set is outside the range we are scanning. This reults in dangling tags and
1000 * can lead to problems with later tag operations (e.g. livelocks on lookups).
1002 * The function returns the number of leaves where the tag was set and sets
1003 * *first_indexp to the first unscanned index.
1004 * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
1005 * be prepared to handle that.
1007 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
1008 unsigned long *first_indexp, unsigned long last_index,
1009 unsigned long nr_to_tag,
1010 unsigned int iftag, unsigned int settag)
1012 struct radix_tree_node *slot, *node = NULL;
1013 unsigned long maxindex;
1014 unsigned int shift = radix_tree_load_root(root, &slot, &maxindex);
1015 unsigned long tagged = 0;
1016 unsigned long index = *first_indexp;
1018 last_index = min(last_index, maxindex);
1019 if (index > last_index)
1023 if (!root_tag_get(root, iftag)) {
1024 *first_indexp = last_index + 1;
1027 if (!radix_tree_is_internal_node(slot)) {
1028 *first_indexp = last_index + 1;
1029 root_tag_set(root, settag);
1033 node = entry_to_node(slot);
1034 shift -= RADIX_TREE_MAP_SHIFT;
1037 unsigned long upindex;
1040 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
1041 offset = radix_tree_descend(node, &slot, offset);
1044 if (!tag_get(node, iftag, offset))
1046 /* Sibling slots never have tags set on them */
1047 if (radix_tree_is_internal_node(slot)) {
1048 node = entry_to_node(slot);
1049 shift -= RADIX_TREE_MAP_SHIFT;
1055 tag_set(node, settag, offset);
1057 slot = node->parent;
1058 /* walk back up the path tagging interior nodes */
1059 upindex = index >> shift;
1061 upindex >>= RADIX_TREE_MAP_SHIFT;
1062 offset = upindex & RADIX_TREE_MAP_MASK;
1064 /* stop if we find a node with the tag already set */
1065 if (tag_get(slot, settag, offset))
1067 tag_set(slot, settag, offset);
1068 slot = slot->parent;
1072 /* Go to next item at level determined by 'shift' */
1073 index = ((index >> shift) + 1) << shift;
1074 /* Overflow can happen when last_index is ~0UL... */
1075 if (index > last_index || !index)
1077 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
1078 while (offset == 0) {
1080 * We've fully scanned this node. Go up. Because
1081 * last_index is guaranteed to be in the tree, what
1082 * we do below cannot wander astray.
1084 node = node->parent;
1085 shift += RADIX_TREE_MAP_SHIFT;
1086 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
1088 if (is_sibling_entry(node, node->slots[offset]))
1090 if (tagged >= nr_to_tag)
1094 * We need not to tag the root tag if there is no tag which is set with
1095 * settag within the range from *first_indexp to last_index.
1098 root_tag_set(root, settag);
1099 *first_indexp = index;
1103 EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
1106 * radix_tree_gang_lookup - perform multiple lookup on a radix tree
1107 * @root: radix tree root
1108 * @results: where the results of the lookup are placed
1109 * @first_index: start the lookup from this key
1110 * @max_items: place up to this many items at *results
1112 * Performs an index-ascending scan of the tree for present items. Places
1113 * them at *@results and returns the number of items which were placed at
1116 * The implementation is naive.
1118 * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
1119 * rcu_read_lock. In this case, rather than the returned results being
1120 * an atomic snapshot of the tree at a single point in time, the
1121 * semantics of an RCU protected gang lookup are as though multiple
1122 * radix_tree_lookups have been issued in individual locks, and results
1123 * stored in 'results'.
1126 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
1127 unsigned long first_index, unsigned int max_items)
1129 struct radix_tree_iter iter;
1131 unsigned int ret = 0;
1133 if (unlikely(!max_items))
1136 radix_tree_for_each_slot(slot, root, &iter, first_index) {
1137 results[ret] = rcu_dereference_raw(*slot);
1140 if (radix_tree_is_internal_node(results[ret])) {
1141 slot = radix_tree_iter_retry(&iter);
1144 if (++ret == max_items)
1150 EXPORT_SYMBOL(radix_tree_gang_lookup);
1153 * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
1154 * @root: radix tree root
1155 * @results: where the results of the lookup are placed
1156 * @indices: where their indices should be placed (but usually NULL)
1157 * @first_index: start the lookup from this key
1158 * @max_items: place up to this many items at *results
1160 * Performs an index-ascending scan of the tree for present items. Places
1161 * their slots at *@results and returns the number of items which were
1162 * placed at *@results.
1164 * The implementation is naive.
1166 * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
1167 * be dereferenced with radix_tree_deref_slot, and if using only RCU
1168 * protection, radix_tree_deref_slot may fail requiring a retry.
1171 radix_tree_gang_lookup_slot(struct radix_tree_root *root,
1172 void ***results, unsigned long *indices,
1173 unsigned long first_index, unsigned int max_items)
1175 struct radix_tree_iter iter;
1177 unsigned int ret = 0;
1179 if (unlikely(!max_items))
1182 radix_tree_for_each_slot(slot, root, &iter, first_index) {
1183 results[ret] = slot;
1185 indices[ret] = iter.index;
1186 if (++ret == max_items)
1192 EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
1195 * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
1197 * @root: radix tree root
1198 * @results: where the results of the lookup are placed
1199 * @first_index: start the lookup from this key
1200 * @max_items: place up to this many items at *results
1201 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1203 * Performs an index-ascending scan of the tree for present items which
1204 * have the tag indexed by @tag set. Places the items at *@results and
1205 * returns the number of items which were placed at *@results.
1208 radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
1209 unsigned long first_index, unsigned int max_items,
1212 struct radix_tree_iter iter;
1214 unsigned int ret = 0;
1216 if (unlikely(!max_items))
1219 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1220 results[ret] = rcu_dereference_raw(*slot);
1223 if (radix_tree_is_internal_node(results[ret])) {
1224 slot = radix_tree_iter_retry(&iter);
1227 if (++ret == max_items)
1233 EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
1236 * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
1237 * radix tree based on a tag
1238 * @root: radix tree root
1239 * @results: where the results of the lookup are placed
1240 * @first_index: start the lookup from this key
1241 * @max_items: place up to this many items at *results
1242 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1244 * Performs an index-ascending scan of the tree for present items which
1245 * have the tag indexed by @tag set. Places the slots at *@results and
1246 * returns the number of slots which were placed at *@results.
1249 radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
1250 unsigned long first_index, unsigned int max_items,
1253 struct radix_tree_iter iter;
1255 unsigned int ret = 0;
1257 if (unlikely(!max_items))
1260 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1261 results[ret] = slot;
1262 if (++ret == max_items)
1268 EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1270 #if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP)
1271 #include <linux/sched.h> /* for cond_resched() */
1273 struct locate_info {
1274 unsigned long found_index;
1279 * This linear search is at present only useful to shmem_unuse_inode().
1281 static unsigned long __locate(struct radix_tree_node *slot, void *item,
1282 unsigned long index, struct locate_info *info)
1287 shift = slot->shift + RADIX_TREE_MAP_SHIFT;
1290 shift -= RADIX_TREE_MAP_SHIFT;
1292 for (i = (index >> shift) & RADIX_TREE_MAP_MASK;
1293 i < RADIX_TREE_MAP_SIZE;
1294 i++, index += (1UL << shift)) {
1295 struct radix_tree_node *node =
1296 rcu_dereference_raw(slot->slots[i]);
1297 if (node == RADIX_TREE_RETRY)
1299 if (!radix_tree_is_internal_node(node)) {
1301 info->found_index = index;
1307 node = entry_to_node(node);
1308 if (is_sibling_entry(slot, node))
1313 if (i == RADIX_TREE_MAP_SIZE)
1318 if ((index == 0) && (i == RADIX_TREE_MAP_SIZE))
1324 * radix_tree_locate_item - search through radix tree for item
1325 * @root: radix tree root
1326 * @item: item to be found
1328 * Returns index where item was found, or -1 if not found.
1329 * Caller must hold no lock (since this time-consuming function needs
1330 * to be preemptible), and must check afterwards if item is still there.
1332 unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
1334 struct radix_tree_node *node;
1335 unsigned long max_index;
1336 unsigned long cur_index = 0;
1337 struct locate_info info = {
1344 node = rcu_dereference_raw(root->rnode);
1345 if (!radix_tree_is_internal_node(node)) {
1348 info.found_index = 0;
1352 node = entry_to_node(node);
1354 max_index = node_maxindex(node);
1355 if (cur_index > max_index) {
1360 cur_index = __locate(node, item, cur_index, &info);
1363 } while (!info.stop && cur_index <= max_index);
1365 return info.found_index;
1368 unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
1372 #endif /* CONFIG_SHMEM && CONFIG_SWAP */
1375 * radix_tree_shrink - shrink radix tree to minimum height
1376 * @root radix tree root
1378 static inline bool radix_tree_shrink(struct radix_tree_root *root)
1380 bool shrunk = false;
1383 struct radix_tree_node *node = root->rnode;
1384 struct radix_tree_node *child;
1386 if (!radix_tree_is_internal_node(node))
1388 node = entry_to_node(node);
1391 * The candidate node has more than one child, or its child
1392 * is not at the leftmost slot, or the child is a multiorder
1393 * entry, we cannot shrink.
1395 if (node->count != 1)
1397 child = node->slots[0];
1400 if (!radix_tree_is_internal_node(child) && node->shift)
1403 if (radix_tree_is_internal_node(child))
1404 entry_to_node(child)->parent = NULL;
1407 * We don't need rcu_assign_pointer(), since we are simply
1408 * moving the node from one part of the tree to another: if it
1409 * was safe to dereference the old pointer to it
1410 * (node->slots[0]), it will be safe to dereference the new
1411 * one (root->rnode) as far as dependent read barriers go.
1413 root->rnode = child;
1416 * We have a dilemma here. The node's slot[0] must not be
1417 * NULLed in case there are concurrent lookups expecting to
1418 * find the item. However if this was a bottom-level node,
1419 * then it may be subject to the slot pointer being visible
1420 * to callers dereferencing it. If item corresponding to
1421 * slot[0] is subsequently deleted, these callers would expect
1422 * their slot to become empty sooner or later.
1424 * For example, lockless pagecache will look up a slot, deref
1425 * the page pointer, and if the page has 0 refcount it means it
1426 * was concurrently deleted from pagecache so try the deref
1427 * again. Fortunately there is already a requirement for logic
1428 * to retry the entire slot lookup -- the indirect pointer
1429 * problem (replacing direct root node with an indirect pointer
1430 * also results in a stale slot). So tag the slot as indirect
1431 * to force callers to retry.
1433 if (!radix_tree_is_internal_node(child))
1434 node->slots[0] = RADIX_TREE_RETRY;
1436 radix_tree_node_free(node);
1444 * __radix_tree_delete_node - try to free node after clearing a slot
1445 * @root: radix tree root
1446 * @node: node containing @index
1448 * After clearing the slot at @index in @node from radix tree
1449 * rooted at @root, call this function to attempt freeing the
1450 * node and shrinking the tree.
1452 * Returns %true if @node was freed, %false otherwise.
1454 bool __radix_tree_delete_node(struct radix_tree_root *root,
1455 struct radix_tree_node *node)
1457 bool deleted = false;
1460 struct radix_tree_node *parent;
1463 if (node == entry_to_node(root->rnode))
1464 deleted |= radix_tree_shrink(root);
1468 parent = node->parent;
1470 parent->slots[node->offset] = NULL;
1473 root_tag_clear_all(root);
1477 radix_tree_node_free(node);
1486 static inline void delete_sibling_entries(struct radix_tree_node *node,
1487 void *ptr, unsigned offset)
1489 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1491 for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) {
1492 if (node->slots[offset + i] != ptr)
1494 node->slots[offset + i] = NULL;
1501 * radix_tree_delete_item - delete an item from a radix tree
1502 * @root: radix tree root
1504 * @item: expected item
1506 * Remove @item at @index from the radix tree rooted at @root.
1508 * Returns the address of the deleted item, or NULL if it was not present
1509 * or the entry at the given @index was not @item.
1511 void *radix_tree_delete_item(struct radix_tree_root *root,
1512 unsigned long index, void *item)
1514 struct radix_tree_node *node;
1515 unsigned int offset;
1520 entry = __radix_tree_lookup(root, index, &node, &slot);
1524 if (item && entry != item)
1528 root_tag_clear_all(root);
1533 offset = get_slot_offset(node, slot);
1536 * Clear all tags associated with the item to be deleted.
1537 * This way of doing it would be inefficient, but seldom is any set.
1539 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
1540 if (tag_get(node, tag, offset))
1541 radix_tree_tag_clear(root, index, tag);
1544 delete_sibling_entries(node, node_to_entry(slot), offset);
1545 node->slots[offset] = NULL;
1548 __radix_tree_delete_node(root, node);
1552 EXPORT_SYMBOL(radix_tree_delete_item);
1555 * radix_tree_delete - delete an item from a radix tree
1556 * @root: radix tree root
1559 * Remove the item at @index from the radix tree rooted at @root.
1561 * Returns the address of the deleted item, or NULL if it was not present.
1563 void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1565 return radix_tree_delete_item(root, index, NULL);
1567 EXPORT_SYMBOL(radix_tree_delete);
1570 * radix_tree_tagged - test whether any items in the tree are tagged
1571 * @root: radix tree root
1574 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
1576 return root_tag_get(root, tag);
1578 EXPORT_SYMBOL(radix_tree_tagged);
1581 radix_tree_node_ctor(void *arg)
1583 struct radix_tree_node *node = arg;
1585 memset(node, 0, sizeof(*node));
1586 INIT_LIST_HEAD(&node->private_list);
1589 static int radix_tree_callback(struct notifier_block *nfb,
1590 unsigned long action, void *hcpu)
1592 int cpu = (long)hcpu;
1593 struct radix_tree_preload *rtp;
1594 struct radix_tree_node *node;
1596 /* Free per-cpu pool of preloaded nodes */
1597 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1598 rtp = &per_cpu(radix_tree_preloads, cpu);
1601 rtp->nodes = node->private_data;
1602 kmem_cache_free(radix_tree_node_cachep, node);
1609 void __init radix_tree_init(void)
1611 radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
1612 sizeof(struct radix_tree_node), 0,
1613 SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
1614 radix_tree_node_ctor);
1615 hotcpu_notifier(radix_tree_callback, 0);