1 // SPDX-License-Identifier: GPL-2.0+
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
10 * DOC: Interesting implementation details of the Maple Tree
12 * Each node type has a number of slots for entries and a number of slots for
13 * pivots. In the case of dense nodes, the pivots are implied by the position
14 * and are simply the slot index + the minimum of the node.
16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
17 * indicate that the tree is specifying ranges, Pivots may appear in the
18 * subtree with an entry attached to the value where as keys are unique to a
19 * specific position of a B-tree. Pivot values are inclusive of the slot with
23 * The following illustrates the layout of a range64 nodes slots and pivots.
26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
28 * │ │ │ │ │ │ │ │ └─ Implied maximum
29 * │ │ │ │ │ │ │ └─ Pivot 14
30 * │ │ │ │ │ │ └─ Pivot 13
31 * │ │ │ │ │ └─ Pivot 12
39 * Internal (non-leaf) nodes contain pointers to other nodes.
40 * Leaf nodes contain entries.
42 * The location of interest is often referred to as an offset. All offsets have
43 * a slot, but the last offset has an implied pivot from the node above (or
44 * UINT_MAX for the root node.
46 * Ranges complicate certain write activities. When modifying any of
47 * the B-tree variants, it is known that one entry will either be added or
48 * deleted. When modifying the Maple Tree, one store operation may overwrite
49 * the entire data set, or one half of the tree, or the middle half of the tree.
54 #include <linux/maple_tree.h>
55 #include <linux/xarray.h>
56 #include <linux/types.h>
57 #include <linux/export.h>
58 #include <linux/slab.h>
59 #include <linux/limits.h>
60 #include <asm/barrier.h>
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/maple_tree.h>
65 #define MA_ROOT_PARENT 1
69 * * MA_STATE_BULK - Bulk insert mode
70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
73 #define MA_STATE_BULK 1
74 #define MA_STATE_REBALANCE 2
75 #define MA_STATE_PREALLOC 4
77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 static struct kmem_cache *maple_node_cache;
82 #ifdef CONFIG_DEBUG_MAPLE_TREE
83 static const unsigned long mt_max[] = {
84 [maple_dense] = MAPLE_NODE_SLOTS,
85 [maple_leaf_64] = ULONG_MAX,
86 [maple_range_64] = ULONG_MAX,
87 [maple_arange_64] = ULONG_MAX,
89 #define mt_node_max(x) mt_max[mte_node_type(x)]
92 static const unsigned char mt_slots[] = {
93 [maple_dense] = MAPLE_NODE_SLOTS,
94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
95 [maple_range_64] = MAPLE_RANGE64_SLOTS,
96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
98 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
100 static const unsigned char mt_pivots[] = {
102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
108 static const unsigned char mt_min_slots[] = {
109 [maple_dense] = MAPLE_NODE_SLOTS / 2,
110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
119 struct maple_big_node {
120 struct maple_pnode *parent;
121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
125 unsigned long padding[MAPLE_BIG_NODE_GAPS];
126 unsigned long gap[MAPLE_BIG_NODE_GAPS];
130 enum maple_type type;
134 * The maple_subtree_state is used to build a tree to replace a segment of an
135 * existing tree in a more atomic way. Any walkers of the older tree will hit a
136 * dead node and restart on updates.
138 struct maple_subtree_state {
139 struct ma_state *orig_l; /* Original left side of subtree */
140 struct ma_state *orig_r; /* Original right side of subtree */
141 struct ma_state *l; /* New left side of subtree */
142 struct ma_state *m; /* New middle of subtree (rare) */
143 struct ma_state *r; /* New right side of subtree */
144 struct ma_topiary *free; /* nodes to be freed */
145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
146 struct maple_big_node *bn;
149 #ifdef CONFIG_KASAN_STACK
150 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
151 #define noinline_for_kasan noinline_for_stack
153 #define noinline_for_kasan inline
157 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
159 return kmem_cache_alloc(maple_node_cache, gfp);
162 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
164 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
167 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
169 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
172 static void mt_free_rcu(struct rcu_head *head)
174 struct maple_node *node = container_of(head, struct maple_node, rcu);
176 kmem_cache_free(maple_node_cache, node);
180 * ma_free_rcu() - Use rcu callback to free a maple node
181 * @node: The node to free
183 * The maple tree uses the parent pointer to indicate this node is no longer in
184 * use and will be freed.
186 static void ma_free_rcu(struct maple_node *node)
188 WARN_ON(node->parent != ma_parent_ptr(node));
189 call_rcu(&node->rcu, mt_free_rcu);
192 static void mas_set_height(struct ma_state *mas)
194 unsigned int new_flags = mas->tree->ma_flags;
196 new_flags &= ~MT_FLAGS_HEIGHT_MASK;
197 BUG_ON(mas->depth > MAPLE_HEIGHT_MAX);
198 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
199 mas->tree->ma_flags = new_flags;
202 static unsigned int mas_mt_height(struct ma_state *mas)
204 return mt_height(mas->tree);
207 static inline enum maple_type mte_node_type(const struct maple_enode *entry)
209 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
210 MAPLE_NODE_TYPE_MASK;
213 static inline bool ma_is_dense(const enum maple_type type)
215 return type < maple_leaf_64;
218 static inline bool ma_is_leaf(const enum maple_type type)
220 return type < maple_range_64;
223 static inline bool mte_is_leaf(const struct maple_enode *entry)
225 return ma_is_leaf(mte_node_type(entry));
229 * We also reserve values with the bottom two bits set to '10' which are
232 static inline bool mt_is_reserved(const void *entry)
234 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
235 xa_is_internal(entry);
238 static inline void mas_set_err(struct ma_state *mas, long err)
240 mas->node = MA_ERROR(err);
243 static inline bool mas_is_ptr(struct ma_state *mas)
245 return mas->node == MAS_ROOT;
248 static inline bool mas_is_start(struct ma_state *mas)
250 return mas->node == MAS_START;
253 bool mas_is_err(struct ma_state *mas)
255 return xa_is_err(mas->node);
258 static inline bool mas_searchable(struct ma_state *mas)
260 if (mas_is_none(mas))
269 static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
271 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
275 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
276 * @entry: The maple encoded node
278 * Return: a maple topiary pointer
280 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
282 return (struct maple_topiary *)
283 ((unsigned long)entry & ~MAPLE_NODE_MASK);
287 * mas_mn() - Get the maple state node.
288 * @mas: The maple state
290 * Return: the maple node (not encoded - bare pointer).
292 static inline struct maple_node *mas_mn(const struct ma_state *mas)
294 return mte_to_node(mas->node);
298 * mte_set_node_dead() - Set a maple encoded node as dead.
299 * @mn: The maple encoded node.
301 static inline void mte_set_node_dead(struct maple_enode *mn)
303 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
304 smp_wmb(); /* Needed for RCU */
307 /* Bit 1 indicates the root is a node */
308 #define MAPLE_ROOT_NODE 0x02
309 /* maple_type stored bit 3-6 */
310 #define MAPLE_ENODE_TYPE_SHIFT 0x03
311 /* Bit 2 means a NULL somewhere below */
312 #define MAPLE_ENODE_NULL 0x04
314 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
315 enum maple_type type)
317 return (void *)((unsigned long)node |
318 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
321 static inline void *mte_mk_root(const struct maple_enode *node)
323 return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
326 static inline void *mte_safe_root(const struct maple_enode *node)
328 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
331 static inline void *mte_set_full(const struct maple_enode *node)
333 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
336 static inline void *mte_clear_full(const struct maple_enode *node)
338 return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
341 static inline bool mte_has_null(const struct maple_enode *node)
343 return (unsigned long)node & MAPLE_ENODE_NULL;
346 static inline bool ma_is_root(struct maple_node *node)
348 return ((unsigned long)node->parent & MA_ROOT_PARENT);
351 static inline bool mte_is_root(const struct maple_enode *node)
353 return ma_is_root(mte_to_node(node));
356 static inline bool mas_is_root_limits(const struct ma_state *mas)
358 return !mas->min && mas->max == ULONG_MAX;
361 static inline bool mt_is_alloc(struct maple_tree *mt)
363 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
368 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
369 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
370 * bit values need an extra bit to store the offset. This extra bit comes from
371 * a reuse of the last bit in the node type. This is possible by using bit 1 to
372 * indicate if bit 2 is part of the type or the slot.
376 * 0x?00 = 16 bit nodes
377 * 0x010 = 32 bit nodes
378 * 0x110 = 64 bit nodes
380 * Slot size and alignment
382 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
383 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
384 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
387 #define MAPLE_PARENT_ROOT 0x01
389 #define MAPLE_PARENT_SLOT_SHIFT 0x03
390 #define MAPLE_PARENT_SLOT_MASK 0xF8
392 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
393 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
395 #define MAPLE_PARENT_RANGE64 0x06
396 #define MAPLE_PARENT_RANGE32 0x04
397 #define MAPLE_PARENT_NOT_RANGE16 0x02
400 * mte_parent_shift() - Get the parent shift for the slot storage.
401 * @parent: The parent pointer cast as an unsigned long
402 * Return: The shift into that pointer to the star to of the slot
404 static inline unsigned long mte_parent_shift(unsigned long parent)
406 /* Note bit 1 == 0 means 16B */
407 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
408 return MAPLE_PARENT_SLOT_SHIFT;
410 return MAPLE_PARENT_16B_SLOT_SHIFT;
414 * mte_parent_slot_mask() - Get the slot mask for the parent.
415 * @parent: The parent pointer cast as an unsigned long.
416 * Return: The slot mask for that parent.
418 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
420 /* Note bit 1 == 0 means 16B */
421 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
422 return MAPLE_PARENT_SLOT_MASK;
424 return MAPLE_PARENT_16B_SLOT_MASK;
428 * mas_parent_enum() - Return the maple_type of the parent from the stored
430 * @mas: The maple state
431 * @node: The maple_enode to extract the parent's enum
432 * Return: The node->parent maple_type
435 enum maple_type mte_parent_enum(struct maple_enode *p_enode,
436 struct maple_tree *mt)
438 unsigned long p_type;
440 p_type = (unsigned long)p_enode;
441 if (p_type & MAPLE_PARENT_ROOT)
442 return 0; /* Validated in the caller. */
444 p_type &= MAPLE_NODE_MASK;
445 p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type));
448 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
450 return maple_arange_64;
451 return maple_range_64;
458 enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode)
460 return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree);
464 * mte_set_parent() - Set the parent node and encode the slot
465 * @enode: The encoded maple node.
466 * @parent: The encoded maple node that is the parent of @enode.
467 * @slot: The slot that @enode resides in @parent.
469 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
473 void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent,
476 unsigned long val = (unsigned long)parent;
479 enum maple_type p_type = mte_node_type(parent);
481 BUG_ON(p_type == maple_dense);
482 BUG_ON(p_type == maple_leaf_64);
486 case maple_arange_64:
487 shift = MAPLE_PARENT_SLOT_SHIFT;
488 type = MAPLE_PARENT_RANGE64;
497 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
498 val |= (slot << shift) | type;
499 mte_to_node(enode)->parent = ma_parent_ptr(val);
503 * mte_parent_slot() - get the parent slot of @enode.
504 * @enode: The encoded maple node.
506 * Return: The slot in the parent node where @enode resides.
508 static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
510 unsigned long val = (unsigned long)mte_to_node(enode)->parent;
512 if (val & MA_ROOT_PARENT)
516 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
517 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
519 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
523 * mte_parent() - Get the parent of @node.
524 * @node: The encoded maple node.
526 * Return: The parent maple node.
528 static inline struct maple_node *mte_parent(const struct maple_enode *enode)
530 return (void *)((unsigned long)
531 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
535 * ma_dead_node() - check if the @enode is dead.
536 * @enode: The encoded maple node
538 * Return: true if dead, false otherwise.
540 static inline bool ma_dead_node(const struct maple_node *node)
542 struct maple_node *parent = (void *)((unsigned long)
543 node->parent & ~MAPLE_NODE_MASK);
545 return (parent == node);
549 * mte_dead_node() - check if the @enode is dead.
550 * @enode: The encoded maple node
552 * Return: true if dead, false otherwise.
554 static inline bool mte_dead_node(const struct maple_enode *enode)
556 struct maple_node *parent, *node;
558 node = mte_to_node(enode);
559 parent = mte_parent(enode);
560 return (parent == node);
564 * mas_allocated() - Get the number of nodes allocated in a maple state.
565 * @mas: The maple state
567 * The ma_state alloc member is overloaded to hold a pointer to the first
568 * allocated node or to the number of requested nodes to allocate. If bit 0 is
569 * set, then the alloc contains the number of requested nodes. If there is an
570 * allocated node, then the total allocated nodes is in that node.
572 * Return: The total number of nodes allocated
574 static inline unsigned long mas_allocated(const struct ma_state *mas)
576 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
579 return mas->alloc->total;
583 * mas_set_alloc_req() - Set the requested number of allocations.
584 * @mas: the maple state
585 * @count: the number of allocations.
587 * The requested number of allocations is either in the first allocated node,
588 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
589 * no allocated node. Set the request either in the node or do the necessary
590 * encoding to store in @mas->alloc directly.
592 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
594 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
598 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
602 mas->alloc->request_count = count;
606 * mas_alloc_req() - get the requested number of allocations.
607 * @mas: The maple state
609 * The alloc count is either stored directly in @mas, or in
610 * @mas->alloc->request_count if there is at least one node allocated. Decode
611 * the request count if it's stored directly in @mas->alloc.
613 * Return: The allocation request count.
615 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
617 if ((unsigned long)mas->alloc & 0x1)
618 return (unsigned long)(mas->alloc) >> 1;
620 return mas->alloc->request_count;
625 * ma_pivots() - Get a pointer to the maple node pivots.
626 * @node - the maple node
627 * @type - the node type
629 * In the event of a dead node, this array may be %NULL
631 * Return: A pointer to the maple node pivots
633 static inline unsigned long *ma_pivots(struct maple_node *node,
634 enum maple_type type)
637 case maple_arange_64:
638 return node->ma64.pivot;
641 return node->mr64.pivot;
649 * ma_gaps() - Get a pointer to the maple node gaps.
650 * @node - the maple node
651 * @type - the node type
653 * Return: A pointer to the maple node gaps
655 static inline unsigned long *ma_gaps(struct maple_node *node,
656 enum maple_type type)
659 case maple_arange_64:
660 return node->ma64.gap;
670 * mte_pivot() - Get the pivot at @piv of the maple encoded node.
671 * @mn: The maple encoded node.
674 * Return: the pivot at @piv of @mn.
676 static inline unsigned long mte_pivot(const struct maple_enode *mn,
679 struct maple_node *node = mte_to_node(mn);
680 enum maple_type type = mte_node_type(mn);
682 if (piv >= mt_pivots[type]) {
687 case maple_arange_64:
688 return node->ma64.pivot[piv];
691 return node->mr64.pivot[piv];
699 * mas_safe_pivot() - get the pivot at @piv or mas->max.
700 * @mas: The maple state
701 * @pivots: The pointer to the maple node pivots
702 * @piv: The pivot to fetch
703 * @type: The maple node type
705 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
708 static inline unsigned long
709 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
710 unsigned char piv, enum maple_type type)
712 if (piv >= mt_pivots[type])
719 * mas_safe_min() - Return the minimum for a given offset.
720 * @mas: The maple state
721 * @pivots: The pointer to the maple node pivots
722 * @offset: The offset into the pivot array
724 * Return: The minimum range value that is contained in @offset.
726 static inline unsigned long
727 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
730 return pivots[offset - 1] + 1;
736 * mas_logical_pivot() - Get the logical pivot of a given offset.
737 * @mas: The maple state
738 * @pivots: The pointer to the maple node pivots
739 * @offset: The offset into the pivot array
740 * @type: The maple node type
742 * When there is no value at a pivot (beyond the end of the data), then the
743 * pivot is actually @mas->max.
745 * Return: the logical pivot of a given @offset.
747 static inline unsigned long
748 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
749 unsigned char offset, enum maple_type type)
751 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type);
763 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
764 * @mn: The encoded maple node
765 * @piv: The pivot offset
766 * @val: The value of the pivot
768 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
771 struct maple_node *node = mte_to_node(mn);
772 enum maple_type type = mte_node_type(mn);
774 BUG_ON(piv >= mt_pivots[type]);
779 node->mr64.pivot[piv] = val;
781 case maple_arange_64:
782 node->ma64.pivot[piv] = val;
791 * ma_slots() - Get a pointer to the maple node slots.
792 * @mn: The maple node
793 * @mt: The maple node type
795 * Return: A pointer to the maple node slots
797 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
801 case maple_arange_64:
802 return mn->ma64.slot;
805 return mn->mr64.slot;
811 static inline bool mt_locked(const struct maple_tree *mt)
813 return mt_external_lock(mt) ? mt_lock_is_held(mt) :
814 lockdep_is_held(&mt->ma_lock);
817 static inline void *mt_slot(const struct maple_tree *mt,
818 void __rcu **slots, unsigned char offset)
820 return rcu_dereference_check(slots[offset], mt_locked(mt));
824 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
825 * @mas: The maple state
826 * @slots: The pointer to the slots
827 * @offset: The offset into the slots array to fetch
829 * Return: The entry stored in @slots at the @offset.
831 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
832 unsigned char offset)
834 return rcu_dereference_protected(slots[offset], mt_locked(mas->tree));
838 * mas_slot() - Get the slot value when not holding the maple tree lock.
839 * @mas: The maple state
840 * @slots: The pointer to the slots
841 * @offset: The offset into the slots array to fetch
843 * Return: The entry stored in @slots at the @offset
845 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
846 unsigned char offset)
848 return mt_slot(mas->tree, slots, offset);
852 * mas_root() - Get the maple tree root.
853 * @mas: The maple state.
855 * Return: The pointer to the root of the tree
857 static inline void *mas_root(struct ma_state *mas)
859 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
862 static inline void *mt_root_locked(struct maple_tree *mt)
864 return rcu_dereference_protected(mt->ma_root, mt_locked(mt));
868 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
869 * @mas: The maple state.
871 * Return: The pointer to the root of the tree
873 static inline void *mas_root_locked(struct ma_state *mas)
875 return mt_root_locked(mas->tree);
878 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
882 case maple_arange_64:
883 return &mn->ma64.meta;
885 return &mn->mr64.meta;
890 * ma_set_meta() - Set the metadata information of a node.
891 * @mn: The maple node
892 * @mt: The maple node type
893 * @offset: The offset of the highest sub-gap in this node.
894 * @end: The end of the data in this node.
896 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
897 unsigned char offset, unsigned char end)
899 struct maple_metadata *meta = ma_meta(mn, mt);
906 * mas_clear_meta() - clear the metadata information of a node, if it exists
907 * @mas: The maple state
908 * @mn: The maple node
909 * @mt: The maple node type
910 * @offset: The offset of the highest sub-gap in this node.
911 * @end: The end of the data in this node.
913 static inline void mas_clear_meta(struct ma_state *mas, struct maple_node *mn,
916 struct maple_metadata *meta;
917 unsigned long *pivots;
923 pivots = mn->mr64.pivot;
924 if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
925 slots = mn->mr64.slot;
926 next = mas_slot_locked(mas, slots,
927 MAPLE_RANGE64_SLOTS - 1);
928 if (unlikely((mte_to_node(next) && mte_node_type(next))))
929 return; /* The last slot is a node, no metadata */
932 case maple_arange_64:
933 meta = ma_meta(mn, mt);
944 * ma_meta_end() - Get the data end of a node from the metadata
945 * @mn: The maple node
946 * @mt: The maple node type
948 static inline unsigned char ma_meta_end(struct maple_node *mn,
951 struct maple_metadata *meta = ma_meta(mn, mt);
957 * ma_meta_gap() - Get the largest gap location of a node from the metadata
958 * @mn: The maple node
959 * @mt: The maple node type
961 static inline unsigned char ma_meta_gap(struct maple_node *mn,
964 BUG_ON(mt != maple_arange_64);
966 return mn->ma64.meta.gap;
970 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
971 * @mn: The maple node
972 * @mn: The maple node type
973 * @offset: The location of the largest gap.
975 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
976 unsigned char offset)
979 struct maple_metadata *meta = ma_meta(mn, mt);
985 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
986 * @mat - the ma_topiary, a linked list of dead nodes.
987 * @dead_enode - the node to be marked as dead and added to the tail of the list
989 * Add the @dead_enode to the linked list in @mat.
991 static inline void mat_add(struct ma_topiary *mat,
992 struct maple_enode *dead_enode)
994 mte_set_node_dead(dead_enode);
995 mte_to_mat(dead_enode)->next = NULL;
997 mat->tail = mat->head = dead_enode;
1001 mte_to_mat(mat->tail)->next = dead_enode;
1002 mat->tail = dead_enode;
1005 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
1006 static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
1009 * mas_mat_free() - Free all nodes in a dead list.
1010 * @mas - the maple state
1011 * @mat - the ma_topiary linked list of dead nodes to free.
1013 * Free walk a dead list.
1015 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
1017 struct maple_enode *next;
1020 next = mte_to_mat(mat->head)->next;
1021 mas_free(mas, mat->head);
1027 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
1028 * @mas - the maple state
1029 * @mat - the ma_topiary linked list of dead nodes to free.
1031 * Destroy walk a dead list.
1033 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
1035 struct maple_enode *next;
1038 next = mte_to_mat(mat->head)->next;
1039 mte_destroy_walk(mat->head, mat->mtree);
1044 * mas_descend() - Descend into the slot stored in the ma_state.
1045 * @mas - the maple state.
1047 * Note: Not RCU safe, only use in write side or debug code.
1049 static inline void mas_descend(struct ma_state *mas)
1051 enum maple_type type;
1052 unsigned long *pivots;
1053 struct maple_node *node;
1057 type = mte_node_type(mas->node);
1058 pivots = ma_pivots(node, type);
1059 slots = ma_slots(node, type);
1062 mas->min = pivots[mas->offset - 1] + 1;
1063 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1064 mas->node = mas_slot(mas, slots, mas->offset);
1068 * mte_set_gap() - Set a maple node gap.
1069 * @mn: The encoded maple node
1070 * @gap: The offset of the gap to set
1071 * @val: The gap value
1073 static inline void mte_set_gap(const struct maple_enode *mn,
1074 unsigned char gap, unsigned long val)
1076 switch (mte_node_type(mn)) {
1079 case maple_arange_64:
1080 mte_to_node(mn)->ma64.gap[gap] = val;
1086 * mas_ascend() - Walk up a level of the tree.
1087 * @mas: The maple state
1089 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1090 * may cause several levels of walking up to find the correct min and max.
1091 * May find a dead node which will cause a premature return.
1092 * Return: 1 on dead node, 0 otherwise
1094 static int mas_ascend(struct ma_state *mas)
1096 struct maple_enode *p_enode; /* parent enode. */
1097 struct maple_enode *a_enode; /* ancestor enode. */
1098 struct maple_node *a_node; /* ancestor node. */
1099 struct maple_node *p_node; /* parent node. */
1100 unsigned char a_slot;
1101 enum maple_type a_type;
1102 unsigned long min, max;
1103 unsigned long *pivots;
1104 unsigned char offset;
1105 bool set_max = false, set_min = false;
1107 a_node = mas_mn(mas);
1108 if (ma_is_root(a_node)) {
1113 p_node = mte_parent(mas->node);
1114 if (unlikely(a_node == p_node))
1116 a_type = mas_parent_enum(mas, mas->node);
1117 offset = mte_parent_slot(mas->node);
1118 a_enode = mt_mk_node(p_node, a_type);
1120 /* Check to make sure all parent information is still accurate */
1121 if (p_node != mte_parent(mas->node))
1124 mas->node = a_enode;
1125 mas->offset = offset;
1127 if (mte_is_root(a_enode)) {
1128 mas->max = ULONG_MAX;
1137 a_type = mas_parent_enum(mas, p_enode);
1138 a_node = mte_parent(p_enode);
1139 a_slot = mte_parent_slot(p_enode);
1140 a_enode = mt_mk_node(a_node, a_type);
1141 pivots = ma_pivots(a_node, a_type);
1143 if (unlikely(ma_dead_node(a_node)))
1146 if (!set_min && a_slot) {
1148 min = pivots[a_slot - 1] + 1;
1151 if (!set_max && a_slot < mt_pivots[a_type]) {
1153 max = pivots[a_slot];
1156 if (unlikely(ma_dead_node(a_node)))
1159 if (unlikely(ma_is_root(a_node)))
1162 } while (!set_min || !set_max);
1170 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1171 * @mas: The maple state
1173 * Return: A pointer to a maple node.
1175 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1177 struct maple_alloc *ret, *node = mas->alloc;
1178 unsigned long total = mas_allocated(mas);
1179 unsigned int req = mas_alloc_req(mas);
1181 /* nothing or a request pending. */
1182 if (WARN_ON(!total))
1186 /* single allocation in this ma_state */
1192 if (node->node_count == 1) {
1193 /* Single allocation in this node. */
1194 mas->alloc = node->slot[0];
1195 mas->alloc->total = node->total - 1;
1200 ret = node->slot[--node->node_count];
1201 node->slot[node->node_count] = NULL;
1207 mas_set_alloc_req(mas, req);
1210 memset(ret, 0, sizeof(*ret));
1211 return (struct maple_node *)ret;
1215 * mas_push_node() - Push a node back on the maple state allocation.
1216 * @mas: The maple state
1217 * @used: The used maple node
1219 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1220 * requested node count as necessary.
1222 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1224 struct maple_alloc *reuse = (struct maple_alloc *)used;
1225 struct maple_alloc *head = mas->alloc;
1226 unsigned long count;
1227 unsigned int requested = mas_alloc_req(mas);
1229 count = mas_allocated(mas);
1231 reuse->request_count = 0;
1232 reuse->node_count = 0;
1233 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1234 head->slot[head->node_count++] = reuse;
1240 if ((head) && !((unsigned long)head & 0x1)) {
1241 reuse->slot[0] = head;
1242 reuse->node_count = 1;
1243 reuse->total += head->total;
1249 mas_set_alloc_req(mas, requested - 1);
1253 * mas_alloc_nodes() - Allocate nodes into a maple state
1254 * @mas: The maple state
1255 * @gfp: The GFP Flags
1257 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1259 struct maple_alloc *node;
1260 unsigned long allocated = mas_allocated(mas);
1261 unsigned int requested = mas_alloc_req(mas);
1263 void **slots = NULL;
1264 unsigned int max_req = 0;
1269 mas_set_alloc_req(mas, 0);
1270 if (mas->mas_flags & MA_STATE_PREALLOC) {
1273 WARN_ON(!allocated);
1276 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1277 node = (struct maple_alloc *)mt_alloc_one(gfp);
1282 node->slot[0] = mas->alloc;
1283 node->node_count = 1;
1285 node->node_count = 0;
1289 node->total = ++allocated;
1294 node->request_count = 0;
1296 max_req = MAPLE_ALLOC_SLOTS;
1297 if (node->node_count) {
1298 unsigned int offset = node->node_count;
1300 slots = (void **)&node->slot[offset];
1303 slots = (void **)&node->slot;
1306 max_req = min(requested, max_req);
1307 count = mt_alloc_bulk(gfp, max_req, slots);
1311 node->node_count += count;
1313 node = node->slot[0];
1314 node->node_count = 0;
1315 node->request_count = 0;
1318 mas->alloc->total = allocated;
1322 /* Clean up potential freed allocations on bulk failure */
1323 memset(slots, 0, max_req * sizeof(unsigned long));
1325 mas_set_alloc_req(mas, requested);
1326 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1327 mas->alloc->total = allocated;
1328 mas_set_err(mas, -ENOMEM);
1332 * mas_free() - Free an encoded maple node
1333 * @mas: The maple state
1334 * @used: The encoded maple node to free.
1336 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1339 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1341 struct maple_node *tmp = mte_to_node(used);
1343 if (mt_in_rcu(mas->tree))
1346 mas_push_node(mas, tmp);
1350 * mas_node_count() - Check if enough nodes are allocated and request more if
1351 * there is not enough nodes.
1352 * @mas: The maple state
1353 * @count: The number of nodes needed
1354 * @gfp: the gfp flags
1356 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1358 unsigned long allocated = mas_allocated(mas);
1360 if (allocated < count) {
1361 mas_set_alloc_req(mas, count - allocated);
1362 mas_alloc_nodes(mas, gfp);
1367 * mas_node_count() - Check if enough nodes are allocated and request more if
1368 * there is not enough nodes.
1369 * @mas: The maple state
1370 * @count: The number of nodes needed
1372 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1374 static void mas_node_count(struct ma_state *mas, int count)
1376 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1380 * mas_start() - Sets up maple state for operations.
1381 * @mas: The maple state.
1383 * If mas->node == MAS_START, then set the min, max and depth to
1387 * - If mas->node is an error or not MAS_START, return NULL.
1388 * - If it's an empty tree: NULL & mas->node == MAS_NONE
1389 * - If it's a single entry: The entry & mas->node == MAS_ROOT
1390 * - If it's a tree: NULL & mas->node == safe root node.
1392 static inline struct maple_enode *mas_start(struct ma_state *mas)
1394 if (likely(mas_is_start(mas))) {
1395 struct maple_enode *root;
1398 mas->max = ULONG_MAX;
1402 root = mas_root(mas);
1403 /* Tree with nodes */
1404 if (likely(xa_is_node(root))) {
1406 mas->node = mte_safe_root(root);
1408 if (mte_dead_node(mas->node))
1415 if (unlikely(!root)) {
1416 mas->node = MAS_NONE;
1417 mas->offset = MAPLE_NODE_SLOTS;
1421 /* Single entry tree */
1422 mas->node = MAS_ROOT;
1423 mas->offset = MAPLE_NODE_SLOTS;
1425 /* Single entry tree. */
1436 * ma_data_end() - Find the end of the data in a node.
1437 * @node: The maple node
1438 * @type: The maple node type
1439 * @pivots: The array of pivots in the node
1440 * @max: The maximum value in the node
1442 * Uses metadata to find the end of the data when possible.
1443 * Return: The zero indexed last slot with data (may be null).
1445 static inline unsigned char ma_data_end(struct maple_node *node,
1446 enum maple_type type,
1447 unsigned long *pivots,
1450 unsigned char offset;
1455 if (type == maple_arange_64)
1456 return ma_meta_end(node, type);
1458 offset = mt_pivots[type] - 1;
1459 if (likely(!pivots[offset]))
1460 return ma_meta_end(node, type);
1462 if (likely(pivots[offset] == max))
1465 return mt_pivots[type];
1469 * mas_data_end() - Find the end of the data (slot).
1470 * @mas: the maple state
1472 * This method is optimized to check the metadata of a node if the node type
1473 * supports data end metadata.
1475 * Return: The zero indexed last slot with data (may be null).
1477 static inline unsigned char mas_data_end(struct ma_state *mas)
1479 enum maple_type type;
1480 struct maple_node *node;
1481 unsigned char offset;
1482 unsigned long *pivots;
1484 type = mte_node_type(mas->node);
1486 if (type == maple_arange_64)
1487 return ma_meta_end(node, type);
1489 pivots = ma_pivots(node, type);
1490 if (unlikely(ma_dead_node(node)))
1493 offset = mt_pivots[type] - 1;
1494 if (likely(!pivots[offset]))
1495 return ma_meta_end(node, type);
1497 if (likely(pivots[offset] == mas->max))
1500 return mt_pivots[type];
1504 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1505 * @mas - the maple state
1507 * Return: The maximum gap in the leaf.
1509 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1512 unsigned long pstart, gap, max_gap;
1513 struct maple_node *mn;
1514 unsigned long *pivots;
1517 unsigned char max_piv;
1519 mt = mte_node_type(mas->node);
1521 slots = ma_slots(mn, mt);
1523 if (unlikely(ma_is_dense(mt))) {
1525 for (i = 0; i < mt_slots[mt]; i++) {
1540 * Check the first implied pivot optimizes the loop below and slot 1 may
1541 * be skipped if there is a gap in slot 0.
1543 pivots = ma_pivots(mn, mt);
1544 if (likely(!slots[0])) {
1545 max_gap = pivots[0] - mas->min + 1;
1551 /* reduce max_piv as the special case is checked before the loop */
1552 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1554 * Check end implied pivot which can only be a gap on the right most
1557 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1558 gap = ULONG_MAX - pivots[max_piv];
1563 for (; i <= max_piv; i++) {
1564 /* data == no gap. */
1565 if (likely(slots[i]))
1568 pstart = pivots[i - 1];
1569 gap = pivots[i] - pstart;
1573 /* There cannot be two gaps in a row. */
1580 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1581 * @node: The maple node
1582 * @gaps: The pointer to the gaps
1583 * @mt: The maple node type
1584 * @*off: Pointer to store the offset location of the gap.
1586 * Uses the metadata data end to scan backwards across set gaps.
1588 * Return: The maximum gap value
1590 static inline unsigned long
1591 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1594 unsigned char offset, i;
1595 unsigned long max_gap = 0;
1597 i = offset = ma_meta_end(node, mt);
1599 if (gaps[i] > max_gap) {
1610 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1611 * @mas: The maple state.
1613 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
1615 * Return: The gap value.
1617 static inline unsigned long mas_max_gap(struct ma_state *mas)
1619 unsigned long *gaps;
1620 unsigned char offset;
1622 struct maple_node *node;
1624 mt = mte_node_type(mas->node);
1626 return mas_leaf_max_gap(mas);
1629 offset = ma_meta_gap(node, mt);
1630 if (offset == MAPLE_ARANGE64_META_MAX)
1633 gaps = ma_gaps(node, mt);
1634 return gaps[offset];
1638 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1639 * @mas: The maple state
1640 * @offset: The gap offset in the parent to set
1641 * @new: The new gap value.
1643 * Set the parent gap then continue to set the gap upwards, using the metadata
1644 * of the parent to see if it is necessary to check the node above.
1646 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1649 unsigned long meta_gap = 0;
1650 struct maple_node *pnode;
1651 struct maple_enode *penode;
1652 unsigned long *pgaps;
1653 unsigned char meta_offset;
1654 enum maple_type pmt;
1656 pnode = mte_parent(mas->node);
1657 pmt = mas_parent_enum(mas, mas->node);
1658 penode = mt_mk_node(pnode, pmt);
1659 pgaps = ma_gaps(pnode, pmt);
1662 meta_offset = ma_meta_gap(pnode, pmt);
1663 if (meta_offset == MAPLE_ARANGE64_META_MAX)
1666 meta_gap = pgaps[meta_offset];
1668 pgaps[offset] = new;
1670 if (meta_gap == new)
1673 if (offset != meta_offset) {
1677 ma_set_meta_gap(pnode, pmt, offset);
1678 } else if (new < meta_gap) {
1680 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1681 ma_set_meta_gap(pnode, pmt, meta_offset);
1684 if (ma_is_root(pnode))
1687 /* Go to the parent node. */
1688 pnode = mte_parent(penode);
1689 pmt = mas_parent_enum(mas, penode);
1690 pgaps = ma_gaps(pnode, pmt);
1691 offset = mte_parent_slot(penode);
1692 penode = mt_mk_node(pnode, pmt);
1697 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1698 * @mas - the maple state.
1700 static inline void mas_update_gap(struct ma_state *mas)
1702 unsigned char pslot;
1703 unsigned long p_gap;
1704 unsigned long max_gap;
1706 if (!mt_is_alloc(mas->tree))
1709 if (mte_is_root(mas->node))
1712 max_gap = mas_max_gap(mas);
1714 pslot = mte_parent_slot(mas->node);
1715 p_gap = ma_gaps(mte_parent(mas->node),
1716 mas_parent_enum(mas, mas->node))[pslot];
1718 if (p_gap != max_gap)
1719 mas_parent_gap(mas, pslot, max_gap);
1723 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1724 * @parent with the slot encoded.
1725 * @mas - the maple state (for the tree)
1726 * @parent - the maple encoded node containing the children.
1728 static inline void mas_adopt_children(struct ma_state *mas,
1729 struct maple_enode *parent)
1731 enum maple_type type = mte_node_type(parent);
1732 struct maple_node *node = mas_mn(mas);
1733 void __rcu **slots = ma_slots(node, type);
1734 unsigned long *pivots = ma_pivots(node, type);
1735 struct maple_enode *child;
1736 unsigned char offset;
1738 offset = ma_data_end(node, type, pivots, mas->max);
1740 child = mas_slot_locked(mas, slots, offset);
1741 mte_set_parent(child, parent, offset);
1746 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
1747 * parent encoding to locate the maple node in the tree.
1748 * @mas - the ma_state to use for operations.
1749 * @advanced - boolean to adopt the child nodes and free the old node (false) or
1750 * leave the node (true) and handle the adoption and free elsewhere.
1752 static inline void mas_replace(struct ma_state *mas, bool advanced)
1753 __must_hold(mas->tree->lock)
1755 struct maple_node *mn = mas_mn(mas);
1756 struct maple_enode *old_enode;
1757 unsigned char offset = 0;
1758 void __rcu **slots = NULL;
1760 if (ma_is_root(mn)) {
1761 old_enode = mas_root_locked(mas);
1763 offset = mte_parent_slot(mas->node);
1764 slots = ma_slots(mte_parent(mas->node),
1765 mas_parent_enum(mas, mas->node));
1766 old_enode = mas_slot_locked(mas, slots, offset);
1769 if (!advanced && !mte_is_leaf(mas->node))
1770 mas_adopt_children(mas, mas->node);
1772 if (mte_is_root(mas->node)) {
1773 mn->parent = ma_parent_ptr(
1774 ((unsigned long)mas->tree | MA_ROOT_PARENT));
1775 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1776 mas_set_height(mas);
1778 rcu_assign_pointer(slots[offset], mas->node);
1782 mte_set_node_dead(old_enode);
1783 mas_free(mas, old_enode);
1788 * mas_new_child() - Find the new child of a node.
1789 * @mas: the maple state
1790 * @child: the maple state to store the child.
1792 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
1793 __must_hold(mas->tree->lock)
1796 unsigned char offset;
1798 unsigned long *pivots;
1799 struct maple_enode *entry;
1800 struct maple_node *node;
1803 mt = mte_node_type(mas->node);
1805 slots = ma_slots(node, mt);
1806 pivots = ma_pivots(node, mt);
1807 end = ma_data_end(node, mt, pivots, mas->max);
1808 for (offset = mas->offset; offset <= end; offset++) {
1809 entry = mas_slot_locked(mas, slots, offset);
1810 if (mte_parent(entry) == node) {
1812 mas->offset = offset + 1;
1813 child->offset = offset;
1823 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1824 * old data or set b_node->b_end.
1825 * @b_node: the maple_big_node
1826 * @shift: the shift count
1828 static inline void mab_shift_right(struct maple_big_node *b_node,
1829 unsigned char shift)
1831 unsigned long size = b_node->b_end * sizeof(unsigned long);
1833 memmove(b_node->pivot + shift, b_node->pivot, size);
1834 memmove(b_node->slot + shift, b_node->slot, size);
1835 if (b_node->type == maple_arange_64)
1836 memmove(b_node->gap + shift, b_node->gap, size);
1840 * mab_middle_node() - Check if a middle node is needed (unlikely)
1841 * @b_node: the maple_big_node that contains the data.
1842 * @size: the amount of data in the b_node
1843 * @split: the potential split location
1844 * @slot_count: the size that can be stored in a single node being considered.
1846 * Return: true if a middle node is required.
1848 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1849 unsigned char slot_count)
1851 unsigned char size = b_node->b_end;
1853 if (size >= 2 * slot_count)
1856 if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1863 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1864 * @b_node: the maple_big_node with the data
1865 * @split: the suggested split location
1866 * @slot_count: the number of slots in the node being considered.
1868 * Return: the split location.
1870 static inline int mab_no_null_split(struct maple_big_node *b_node,
1871 unsigned char split, unsigned char slot_count)
1873 if (!b_node->slot[split]) {
1875 * If the split is less than the max slot && the right side will
1876 * still be sufficient, then increment the split on NULL.
1878 if ((split < slot_count - 1) &&
1879 (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1888 * mab_calc_split() - Calculate the split location and if there needs to be two
1890 * @bn: The maple_big_node with the data
1891 * @mid_split: The second split, if required. 0 otherwise.
1893 * Return: The first split location. The middle split is set in @mid_split.
1895 static inline int mab_calc_split(struct ma_state *mas,
1896 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1898 unsigned char b_end = bn->b_end;
1899 int split = b_end / 2; /* Assume equal split. */
1900 unsigned char slot_min, slot_count = mt_slots[bn->type];
1903 * To support gap tracking, all NULL entries are kept together and a node cannot
1904 * end on a NULL entry, with the exception of the left-most leaf. The
1905 * limitation means that the split of a node must be checked for this condition
1906 * and be able to put more data in one direction or the other.
1908 if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1910 split = b_end - mt_min_slots[bn->type];
1912 if (!ma_is_leaf(bn->type))
1915 mas->mas_flags |= MA_STATE_REBALANCE;
1916 if (!bn->slot[split])
1922 * Although extremely rare, it is possible to enter what is known as the 3-way
1923 * split scenario. The 3-way split comes about by means of a store of a range
1924 * that overwrites the end and beginning of two full nodes. The result is a set
1925 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1926 * also be located in different parent nodes which are also full. This can
1927 * carry upwards all the way to the root in the worst case.
1929 if (unlikely(mab_middle_node(bn, split, slot_count))) {
1931 *mid_split = split * 2;
1933 slot_min = mt_min_slots[bn->type];
1937 * Avoid having a range less than the slot count unless it
1938 * causes one node to be deficient.
1939 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1941 while (((bn->pivot[split] - min) < slot_count - 1) &&
1942 (split < slot_count - 1) && (b_end - split > slot_min))
1946 /* Avoid ending a node on a NULL entry */
1947 split = mab_no_null_split(bn, split, slot_count);
1949 if (unlikely(*mid_split))
1950 *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1956 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1957 * and set @b_node->b_end to the next free slot.
1958 * @mas: The maple state
1959 * @mas_start: The starting slot to copy
1960 * @mas_end: The end slot to copy (inclusively)
1961 * @b_node: The maple_big_node to place the data
1962 * @mab_start: The starting location in maple_big_node to store the data.
1964 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1965 unsigned char mas_end, struct maple_big_node *b_node,
1966 unsigned char mab_start)
1969 struct maple_node *node;
1971 unsigned long *pivots, *gaps;
1972 int i = mas_start, j = mab_start;
1973 unsigned char piv_end;
1976 mt = mte_node_type(mas->node);
1977 pivots = ma_pivots(node, mt);
1979 b_node->pivot[j] = pivots[i++];
1980 if (unlikely(i > mas_end))
1985 piv_end = min(mas_end, mt_pivots[mt]);
1986 for (; i < piv_end; i++, j++) {
1987 b_node->pivot[j] = pivots[i];
1988 if (unlikely(!b_node->pivot[j]))
1991 if (unlikely(mas->max == b_node->pivot[j]))
1995 if (likely(i <= mas_end))
1996 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
1999 b_node->b_end = ++j;
2001 slots = ma_slots(node, mt);
2002 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
2003 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
2004 gaps = ma_gaps(node, mt);
2005 memcpy(b_node->gap + mab_start, gaps + mas_start,
2006 sizeof(unsigned long) * j);
2011 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
2012 * @mas: The maple state
2013 * @node: The maple node
2014 * @pivots: pointer to the maple node pivots
2015 * @mt: The maple type
2016 * @end: The assumed end
2018 * Note, end may be incremented within this function but not modified at the
2019 * source. This is fine since the metadata is the last thing to be stored in a
2020 * node during a write.
2022 static inline void mas_leaf_set_meta(struct ma_state *mas,
2023 struct maple_node *node, unsigned long *pivots,
2024 enum maple_type mt, unsigned char end)
2026 /* There is no room for metadata already */
2027 if (mt_pivots[mt] <= end)
2030 if (pivots[end] && pivots[end] < mas->max)
2033 if (end < mt_slots[mt] - 1)
2034 ma_set_meta(node, mt, 0, end);
2038 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
2039 * @b_node: the maple_big_node that has the data
2040 * @mab_start: the start location in @b_node.
2041 * @mab_end: The end location in @b_node (inclusively)
2042 * @mas: The maple state with the maple encoded node.
2044 static inline void mab_mas_cp(struct maple_big_node *b_node,
2045 unsigned char mab_start, unsigned char mab_end,
2046 struct ma_state *mas, bool new_max)
2049 enum maple_type mt = mte_node_type(mas->node);
2050 struct maple_node *node = mte_to_node(mas->node);
2051 void __rcu **slots = ma_slots(node, mt);
2052 unsigned long *pivots = ma_pivots(node, mt);
2053 unsigned long *gaps = NULL;
2056 if (mab_end - mab_start > mt_pivots[mt])
2059 if (!pivots[mt_pivots[mt] - 1])
2060 slots[mt_pivots[mt]] = NULL;
2064 pivots[j++] = b_node->pivot[i++];
2065 } while (i <= mab_end && likely(b_node->pivot[i]));
2067 memcpy(slots, b_node->slot + mab_start,
2068 sizeof(void *) * (i - mab_start));
2071 mas->max = b_node->pivot[i - 1];
2074 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2075 unsigned long max_gap = 0;
2076 unsigned char offset = 15;
2078 gaps = ma_gaps(node, mt);
2080 gaps[--j] = b_node->gap[--i];
2081 if (gaps[j] > max_gap) {
2087 ma_set_meta(node, mt, offset, end);
2089 mas_leaf_set_meta(mas, node, pivots, mt, end);
2094 * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2095 * @mas: the maple state with the maple encoded node of the sub-tree.
2097 * Descend through a sub-tree and adopt children who do not have the correct
2098 * parents set. Follow the parents which have the correct parents as they are
2099 * the new entries which need to be followed to find other incorrectly set
2102 static inline void mas_descend_adopt(struct ma_state *mas)
2104 struct ma_state list[3], next[3];
2108 * At each level there may be up to 3 correct parent pointers which indicates
2109 * the new nodes which need to be walked to find any new nodes at a lower level.
2112 for (i = 0; i < 3; i++) {
2119 while (!mte_is_leaf(list[0].node)) {
2121 for (i = 0; i < 3; i++) {
2122 if (mas_is_none(&list[i]))
2125 if (i && list[i-1].node == list[i].node)
2128 while ((n < 3) && (mas_new_child(&list[i], &next[n])))
2131 mas_adopt_children(&list[i], list[i].node);
2135 next[n++].node = MAS_NONE;
2137 /* descend by setting the list to the children */
2138 for (i = 0; i < 3; i++)
2144 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2145 * @mas: The maple state
2146 * @end: The maple node end
2147 * @mt: The maple node type
2149 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2152 if (!(mas->mas_flags & MA_STATE_BULK))
2155 if (mte_is_root(mas->node))
2158 if (end > mt_min_slots[mt]) {
2159 mas->mas_flags &= ~MA_STATE_REBALANCE;
2165 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2166 * data from a maple encoded node.
2167 * @wr_mas: the maple write state
2168 * @b_node: the maple_big_node to fill with data
2169 * @offset_end: the offset to end copying
2171 * Return: The actual end of the data stored in @b_node
2173 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2174 struct maple_big_node *b_node, unsigned char offset_end)
2177 unsigned char b_end;
2178 /* Possible underflow of piv will wrap back to 0 before use. */
2180 struct ma_state *mas = wr_mas->mas;
2182 b_node->type = wr_mas->type;
2186 /* Copy start data up to insert. */
2187 mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2188 b_end = b_node->b_end;
2189 piv = b_node->pivot[b_end - 1];
2193 if (piv + 1 < mas->index) {
2194 /* Handle range starting after old range */
2195 b_node->slot[b_end] = wr_mas->content;
2196 if (!wr_mas->content)
2197 b_node->gap[b_end] = mas->index - 1 - piv;
2198 b_node->pivot[b_end++] = mas->index - 1;
2201 /* Store the new entry. */
2202 mas->offset = b_end;
2203 b_node->slot[b_end] = wr_mas->entry;
2204 b_node->pivot[b_end] = mas->last;
2207 if (mas->last >= mas->max)
2210 /* Handle new range ending before old range ends */
2211 piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2212 if (piv > mas->last) {
2213 if (piv == ULONG_MAX)
2214 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2216 if (offset_end != slot)
2217 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2220 b_node->slot[++b_end] = wr_mas->content;
2221 if (!wr_mas->content)
2222 b_node->gap[b_end] = piv - mas->last + 1;
2223 b_node->pivot[b_end] = piv;
2226 slot = offset_end + 1;
2227 if (slot > wr_mas->node_end)
2230 /* Copy end data to the end of the node. */
2231 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2236 b_node->b_end = b_end;
2240 * mas_prev_sibling() - Find the previous node with the same parent.
2241 * @mas: the maple state
2243 * Return: True if there is a previous sibling, false otherwise.
2245 static inline bool mas_prev_sibling(struct ma_state *mas)
2247 unsigned int p_slot = mte_parent_slot(mas->node);
2249 if (mte_is_root(mas->node))
2256 mas->offset = p_slot - 1;
2262 * mas_next_sibling() - Find the next node with the same parent.
2263 * @mas: the maple state
2265 * Return: true if there is a next sibling, false otherwise.
2267 static inline bool mas_next_sibling(struct ma_state *mas)
2269 MA_STATE(parent, mas->tree, mas->index, mas->last);
2271 if (mte_is_root(mas->node))
2275 mas_ascend(&parent);
2276 parent.offset = mte_parent_slot(mas->node) + 1;
2277 if (parent.offset > mas_data_end(&parent))
2286 * mte_node_or_node() - Return the encoded node or MAS_NONE.
2287 * @enode: The encoded maple node.
2289 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2291 * Return: @enode or MAS_NONE
2293 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
2298 return ma_enode_ptr(MAS_NONE);
2302 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2303 * @wr_mas: The maple write state
2305 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2307 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2309 struct ma_state *mas = wr_mas->mas;
2310 unsigned char count;
2311 unsigned char offset;
2312 unsigned long index, min, max;
2314 if (unlikely(ma_is_dense(wr_mas->type))) {
2315 wr_mas->r_max = wr_mas->r_min = mas->index;
2316 mas->offset = mas->index = mas->min;
2320 wr_mas->node = mas_mn(wr_mas->mas);
2321 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2322 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2323 wr_mas->pivots, mas->max);
2324 offset = mas->offset;
2325 min = mas_safe_min(mas, wr_mas->pivots, offset);
2326 if (unlikely(offset == count))
2329 max = wr_mas->pivots[offset];
2331 if (unlikely(index <= max))
2334 if (unlikely(!max && offset))
2338 while (++offset < count) {
2339 max = wr_mas->pivots[offset];
2342 else if (unlikely(!max))
2351 wr_mas->r_max = max;
2352 wr_mas->r_min = min;
2353 wr_mas->offset_end = mas->offset = offset;
2357 * mas_topiary_range() - Add a range of slots to the topiary.
2358 * @mas: The maple state
2359 * @destroy: The topiary to add the slots (usually destroy)
2360 * @start: The starting slot inclusively
2361 * @end: The end slot inclusively
2363 static inline void mas_topiary_range(struct ma_state *mas,
2364 struct ma_topiary *destroy, unsigned char start, unsigned char end)
2367 unsigned char offset;
2369 MT_BUG_ON(mas->tree, mte_is_leaf(mas->node));
2370 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
2371 for (offset = start; offset <= end; offset++) {
2372 struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
2374 if (mte_dead_node(enode))
2377 mat_add(destroy, enode);
2382 * mast_topiary() - Add the portions of the tree to the removal list; either to
2383 * be freed or discarded (destroy walk).
2384 * @mast: The maple_subtree_state.
2386 static inline void mast_topiary(struct maple_subtree_state *mast)
2388 MA_WR_STATE(wr_mas, mast->orig_l, NULL);
2389 unsigned char r_start, r_end;
2390 unsigned char l_start, l_end;
2391 void __rcu **l_slots, **r_slots;
2393 wr_mas.type = mte_node_type(mast->orig_l->node);
2394 mast->orig_l->index = mast->orig_l->last;
2395 mas_wr_node_walk(&wr_mas);
2396 l_start = mast->orig_l->offset + 1;
2397 l_end = mas_data_end(mast->orig_l);
2399 r_end = mast->orig_r->offset;
2404 l_slots = ma_slots(mas_mn(mast->orig_l),
2405 mte_node_type(mast->orig_l->node));
2407 r_slots = ma_slots(mas_mn(mast->orig_r),
2408 mte_node_type(mast->orig_r->node));
2410 if ((l_start < l_end) &&
2411 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
2415 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
2420 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
2423 /* At the node where left and right sides meet, add the parts between */
2424 if (mast->orig_l->node == mast->orig_r->node) {
2425 return mas_topiary_range(mast->orig_l, mast->destroy,
2429 /* mast->orig_r is different and consumed. */
2430 if (mte_is_leaf(mast->orig_r->node))
2433 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
2437 if (l_start <= l_end)
2438 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
2440 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
2443 if (r_start <= r_end)
2444 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
2448 * mast_rebalance_next() - Rebalance against the next node
2449 * @mast: The maple subtree state
2450 * @old_r: The encoded maple node to the right (next node).
2452 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2454 unsigned char b_end = mast->bn->b_end;
2456 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2458 mast->orig_r->last = mast->orig_r->max;
2462 * mast_rebalance_prev() - Rebalance against the previous node
2463 * @mast: The maple subtree state
2464 * @old_l: The encoded maple node to the left (previous node)
2466 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2468 unsigned char end = mas_data_end(mast->orig_l) + 1;
2469 unsigned char b_end = mast->bn->b_end;
2471 mab_shift_right(mast->bn, end);
2472 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2473 mast->l->min = mast->orig_l->min;
2474 mast->orig_l->index = mast->orig_l->min;
2475 mast->bn->b_end = end + b_end;
2476 mast->l->offset += end;
2480 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2481 * the node to the right. Checking the nodes to the right then the left at each
2482 * level upwards until root is reached. Free and destroy as needed.
2483 * Data is copied into the @mast->bn.
2484 * @mast: The maple_subtree_state.
2487 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2489 struct ma_state r_tmp = *mast->orig_r;
2490 struct ma_state l_tmp = *mast->orig_l;
2491 struct maple_enode *ancestor = NULL;
2492 unsigned char start, end;
2493 unsigned char depth = 0;
2495 r_tmp = *mast->orig_r;
2496 l_tmp = *mast->orig_l;
2498 mas_ascend(mast->orig_r);
2499 mas_ascend(mast->orig_l);
2502 (mast->orig_r->node == mast->orig_l->node)) {
2503 ancestor = mast->orig_r->node;
2504 end = mast->orig_r->offset - 1;
2505 start = mast->orig_l->offset + 1;
2508 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2510 ancestor = mast->orig_r->node;
2514 mast->orig_r->offset++;
2516 mas_descend(mast->orig_r);
2517 mast->orig_r->offset = 0;
2521 mast_rebalance_next(mast);
2523 unsigned char l_off = 0;
2524 struct maple_enode *child = r_tmp.node;
2527 if (ancestor == r_tmp.node)
2533 if (l_off < r_tmp.offset)
2534 mas_topiary_range(&r_tmp, mast->destroy,
2535 l_off, r_tmp.offset);
2537 if (l_tmp.node != child)
2538 mat_add(mast->free, child);
2540 } while (r_tmp.node != ancestor);
2542 *mast->orig_l = l_tmp;
2545 } else if (mast->orig_l->offset != 0) {
2547 ancestor = mast->orig_l->node;
2548 end = mas_data_end(mast->orig_l);
2551 mast->orig_l->offset--;
2553 mas_descend(mast->orig_l);
2554 mast->orig_l->offset =
2555 mas_data_end(mast->orig_l);
2559 mast_rebalance_prev(mast);
2561 unsigned char r_off;
2562 struct maple_enode *child = l_tmp.node;
2565 if (ancestor == l_tmp.node)
2568 r_off = mas_data_end(&l_tmp);
2570 if (l_tmp.offset < r_off)
2573 if (l_tmp.offset < r_off)
2574 mas_topiary_range(&l_tmp, mast->destroy,
2575 l_tmp.offset, r_off);
2577 if (r_tmp.node != child)
2578 mat_add(mast->free, child);
2580 } while (l_tmp.node != ancestor);
2582 *mast->orig_r = r_tmp;
2585 } while (!mte_is_root(mast->orig_r->node));
2587 *mast->orig_r = r_tmp;
2588 *mast->orig_l = l_tmp;
2593 * mast_ascend_free() - Add current original maple state nodes to the free list
2595 * @mast: the maple subtree state.
2597 * Ascend the original left and right sides and add the previous nodes to the
2598 * free list. Set the slots to point to the correct location in the new nodes.
2601 mast_ascend_free(struct maple_subtree_state *mast)
2603 MA_WR_STATE(wr_mas, mast->orig_r, NULL);
2604 struct maple_enode *left = mast->orig_l->node;
2605 struct maple_enode *right = mast->orig_r->node;
2607 mas_ascend(mast->orig_l);
2608 mas_ascend(mast->orig_r);
2609 mat_add(mast->free, left);
2612 mat_add(mast->free, right);
2614 mast->orig_r->offset = 0;
2615 mast->orig_r->index = mast->r->max;
2616 /* last should be larger than or equal to index */
2617 if (mast->orig_r->last < mast->orig_r->index)
2618 mast->orig_r->last = mast->orig_r->index;
2620 * The node may not contain the value so set slot to ensure all
2621 * of the nodes contents are freed or destroyed.
2623 wr_mas.type = mte_node_type(mast->orig_r->node);
2624 mas_wr_node_walk(&wr_mas);
2625 /* Set up the left side of things */
2626 mast->orig_l->offset = 0;
2627 mast->orig_l->index = mast->l->min;
2628 wr_mas.mas = mast->orig_l;
2629 wr_mas.type = mte_node_type(mast->orig_l->node);
2630 mas_wr_node_walk(&wr_mas);
2632 mast->bn->type = wr_mas.type;
2636 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2637 * @mas: the maple state with the allocations.
2638 * @b_node: the maple_big_node with the type encoding.
2640 * Use the node type from the maple_big_node to allocate a new node from the
2641 * ma_state. This function exists mainly for code readability.
2643 * Return: A new maple encoded node
2645 static inline struct maple_enode
2646 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2648 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2652 * mas_mab_to_node() - Set up right and middle nodes
2654 * @mas: the maple state that contains the allocations.
2655 * @b_node: the node which contains the data.
2656 * @left: The pointer which will have the left node
2657 * @right: The pointer which may have the right node
2658 * @middle: the pointer which may have the middle node (rare)
2659 * @mid_split: the split location for the middle node
2661 * Return: the split of left.
2663 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2664 struct maple_big_node *b_node, struct maple_enode **left,
2665 struct maple_enode **right, struct maple_enode **middle,
2666 unsigned char *mid_split, unsigned long min)
2668 unsigned char split = 0;
2669 unsigned char slot_count = mt_slots[b_node->type];
2671 *left = mas_new_ma_node(mas, b_node);
2676 if (b_node->b_end < slot_count) {
2677 split = b_node->b_end;
2679 split = mab_calc_split(mas, b_node, mid_split, min);
2680 *right = mas_new_ma_node(mas, b_node);
2684 *middle = mas_new_ma_node(mas, b_node);
2691 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2693 * @b_node - the big node to add the entry
2694 * @mas - the maple state to get the pivot (mas->max)
2695 * @entry - the entry to add, if NULL nothing happens.
2697 static inline void mab_set_b_end(struct maple_big_node *b_node,
2698 struct ma_state *mas,
2704 b_node->slot[b_node->b_end] = entry;
2705 if (mt_is_alloc(mas->tree))
2706 b_node->gap[b_node->b_end] = mas_max_gap(mas);
2707 b_node->pivot[b_node->b_end++] = mas->max;
2711 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2712 * of @mas->node to either @left or @right, depending on @slot and @split
2714 * @mas - the maple state with the node that needs a parent
2715 * @left - possible parent 1
2716 * @right - possible parent 2
2717 * @slot - the slot the mas->node was placed
2718 * @split - the split location between @left and @right
2720 static inline void mas_set_split_parent(struct ma_state *mas,
2721 struct maple_enode *left,
2722 struct maple_enode *right,
2723 unsigned char *slot, unsigned char split)
2725 if (mas_is_none(mas))
2728 if ((*slot) <= split)
2729 mte_set_parent(mas->node, left, *slot);
2731 mte_set_parent(mas->node, right, (*slot) - split - 1);
2737 * mte_mid_split_check() - Check if the next node passes the mid-split
2738 * @**l: Pointer to left encoded maple node.
2739 * @**m: Pointer to middle encoded maple node.
2740 * @**r: Pointer to right encoded maple node.
2742 * @*split: The split location.
2743 * @mid_split: The middle split.
2745 static inline void mte_mid_split_check(struct maple_enode **l,
2746 struct maple_enode **r,
2747 struct maple_enode *right,
2749 unsigned char *split,
2750 unsigned char mid_split)
2755 if (slot < mid_split)
2764 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2765 * is taken from @mast->l.
2766 * @mast - the maple subtree state
2767 * @left - the left node
2768 * @right - the right node
2769 * @split - the split location.
2771 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2772 struct maple_enode *left,
2773 struct maple_enode *middle,
2774 struct maple_enode *right,
2775 unsigned char split,
2776 unsigned char mid_split)
2779 struct maple_enode *l = left;
2780 struct maple_enode *r = right;
2782 if (mas_is_none(mast->l))
2788 slot = mast->l->offset;
2790 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2791 mas_set_split_parent(mast->l, l, r, &slot, split);
2793 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2794 mas_set_split_parent(mast->m, l, r, &slot, split);
2796 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2797 mas_set_split_parent(mast->r, l, r, &slot, split);
2801 * mas_wmb_replace() - Write memory barrier and replace
2802 * @mas: The maple state
2803 * @free: the maple topiary list of nodes to free
2804 * @destroy: The maple topiary list of nodes to destroy (walk and free)
2806 * Updates gap as necessary.
2808 static inline void mas_wmb_replace(struct ma_state *mas,
2809 struct ma_topiary *free,
2810 struct ma_topiary *destroy)
2812 /* All nodes must see old data as dead prior to replacing that data */
2813 smp_wmb(); /* Needed for RCU */
2815 /* Insert the new data in the tree */
2816 mas_replace(mas, true);
2818 if (!mte_is_leaf(mas->node))
2819 mas_descend_adopt(mas);
2821 mas_mat_free(mas, free);
2824 mas_mat_destroy(mas, destroy);
2826 if (mte_is_leaf(mas->node))
2829 mas_update_gap(mas);
2833 * mast_new_root() - Set a new tree root during subtree creation
2834 * @mast: The maple subtree state
2835 * @mas: The maple state
2837 static inline void mast_new_root(struct maple_subtree_state *mast,
2838 struct ma_state *mas)
2840 mas_mn(mast->l)->parent =
2841 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
2842 if (!mte_dead_node(mast->orig_l->node) &&
2843 !mte_is_root(mast->orig_l->node)) {
2845 mast_ascend_free(mast);
2847 } while (!mte_is_root(mast->orig_l->node));
2849 if ((mast->orig_l->node != mas->node) &&
2850 (mast->l->depth > mas_mt_height(mas))) {
2851 mat_add(mast->free, mas->node);
2856 * mast_cp_to_nodes() - Copy data out to nodes.
2857 * @mast: The maple subtree state
2858 * @left: The left encoded maple node
2859 * @middle: The middle encoded maple node
2860 * @right: The right encoded maple node
2861 * @split: The location to split between left and (middle ? middle : right)
2862 * @mid_split: The location to split between middle and right.
2864 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2865 struct maple_enode *left, struct maple_enode *middle,
2866 struct maple_enode *right, unsigned char split, unsigned char mid_split)
2868 bool new_lmax = true;
2870 mast->l->node = mte_node_or_none(left);
2871 mast->m->node = mte_node_or_none(middle);
2872 mast->r->node = mte_node_or_none(right);
2874 mast->l->min = mast->orig_l->min;
2875 if (split == mast->bn->b_end) {
2876 mast->l->max = mast->orig_r->max;
2880 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2883 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2884 mast->m->min = mast->bn->pivot[split] + 1;
2888 mast->r->max = mast->orig_r->max;
2890 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2891 mast->r->min = mast->bn->pivot[split] + 1;
2896 * mast_combine_cp_left - Copy in the original left side of the tree into the
2897 * combined data set in the maple subtree state big node.
2898 * @mast: The maple subtree state
2900 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2902 unsigned char l_slot = mast->orig_l->offset;
2907 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2911 * mast_combine_cp_right: Copy in the original right side of the tree into the
2912 * combined data set in the maple subtree state big node.
2913 * @mast: The maple subtree state
2915 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2917 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2920 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2921 mt_slot_count(mast->orig_r->node), mast->bn,
2923 mast->orig_r->last = mast->orig_r->max;
2927 * mast_sufficient: Check if the maple subtree state has enough data in the big
2928 * node to create at least one sufficient node
2929 * @mast: the maple subtree state
2931 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2933 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2940 * mast_overflow: Check if there is too much data in the subtree state for a
2942 * @mast: The maple subtree state
2944 static inline bool mast_overflow(struct maple_subtree_state *mast)
2946 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2952 static inline void *mtree_range_walk(struct ma_state *mas)
2954 unsigned long *pivots;
2955 unsigned char offset;
2956 struct maple_node *node;
2957 struct maple_enode *next, *last;
2958 enum maple_type type;
2961 unsigned long max, min;
2962 unsigned long prev_max, prev_min;
2970 node = mte_to_node(next);
2971 type = mte_node_type(next);
2972 pivots = ma_pivots(node, type);
2973 end = ma_data_end(node, type, pivots, max);
2974 if (unlikely(ma_dead_node(node)))
2977 if (pivots[offset] >= mas->index) {
2980 max = pivots[offset];
2986 } while ((offset < end) && (pivots[offset] < mas->index));
2989 min = pivots[offset - 1] + 1;
2991 if (likely(offset < end && pivots[offset]))
2992 max = pivots[offset];
2995 slots = ma_slots(node, type);
2996 next = mt_slot(mas->tree, slots, offset);
2997 if (unlikely(ma_dead_node(node)))
2999 } while (!ma_is_leaf(type));
3001 mas->offset = offset;
3004 mas->min = prev_min;
3005 mas->max = prev_max;
3007 return (void *)next;
3015 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
3016 * @mas: The starting maple state
3017 * @mast: The maple_subtree_state, keeps track of 4 maple states.
3018 * @count: The estimated count of iterations needed.
3020 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
3021 * is hit. First @b_node is split into two entries which are inserted into the
3022 * next iteration of the loop. @b_node is returned populated with the final
3023 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
3024 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
3025 * to account of what has been copied into the new sub-tree. The update of
3026 * orig_l_mas->last is used in mas_consume to find the slots that will need to
3027 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
3028 * the new sub-tree in case the sub-tree becomes the full tree.
3030 * Return: the number of elements in b_node during the last loop.
3032 static int mas_spanning_rebalance(struct ma_state *mas,
3033 struct maple_subtree_state *mast, unsigned char count)
3035 unsigned char split, mid_split;
3036 unsigned char slot = 0;
3037 struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
3039 MA_STATE(l_mas, mas->tree, mas->index, mas->index);
3040 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3041 MA_STATE(m_mas, mas->tree, mas->index, mas->index);
3042 MA_TOPIARY(free, mas->tree);
3043 MA_TOPIARY(destroy, mas->tree);
3046 * The tree needs to be rebalanced and leaves need to be kept at the same level.
3047 * Rebalancing is done by use of the ``struct maple_topiary``.
3053 mast->destroy = &destroy;
3054 l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
3056 /* Check if this is not root and has sufficient data. */
3057 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
3058 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
3059 mast_spanning_rebalance(mast);
3061 mast->orig_l->depth = 0;
3064 * Each level of the tree is examined and balanced, pushing data to the left or
3065 * right, or rebalancing against left or right nodes is employed to avoid
3066 * rippling up the tree to limit the amount of churn. Once a new sub-section of
3067 * the tree is created, there may be a mix of new and old nodes. The old nodes
3068 * will have the incorrect parent pointers and currently be in two trees: the
3069 * original tree and the partially new tree. To remedy the parent pointers in
3070 * the old tree, the new data is swapped into the active tree and a walk down
3071 * the tree is performed and the parent pointers are updated.
3072 * See mas_descend_adopt() for more information..
3076 mast->bn->type = mte_node_type(mast->orig_l->node);
3077 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
3078 &mid_split, mast->orig_l->min);
3079 mast_set_split_parents(mast, left, middle, right, split,
3081 mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
3084 * Copy data from next level in the tree to mast->bn from next
3087 memset(mast->bn, 0, sizeof(struct maple_big_node));
3088 mast->bn->type = mte_node_type(left);
3089 mast->orig_l->depth++;
3091 /* Root already stored in l->node. */
3092 if (mas_is_root_limits(mast->l))
3095 mast_ascend_free(mast);
3096 mast_combine_cp_left(mast);
3097 l_mas.offset = mast->bn->b_end;
3098 mab_set_b_end(mast->bn, &l_mas, left);
3099 mab_set_b_end(mast->bn, &m_mas, middle);
3100 mab_set_b_end(mast->bn, &r_mas, right);
3102 /* Copy anything necessary out of the right node. */
3103 mast_combine_cp_right(mast);
3105 mast->orig_l->last = mast->orig_l->max;
3107 if (mast_sufficient(mast))
3110 if (mast_overflow(mast))
3113 /* May be a new root stored in mast->bn */
3114 if (mas_is_root_limits(mast->orig_l))
3117 mast_spanning_rebalance(mast);
3119 /* rebalancing from other nodes may require another loop. */
3124 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
3125 mte_node_type(mast->orig_l->node));
3126 mast->orig_l->depth++;
3127 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
3128 mte_set_parent(left, l_mas.node, slot);
3130 mte_set_parent(middle, l_mas.node, ++slot);
3133 mte_set_parent(right, l_mas.node, ++slot);
3135 if (mas_is_root_limits(mast->l)) {
3137 mast_new_root(mast, mas);
3139 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
3142 if (!mte_dead_node(mast->orig_l->node))
3143 mat_add(&free, mast->orig_l->node);
3145 mas->depth = mast->orig_l->depth;
3146 *mast->orig_l = l_mas;
3147 mte_set_node_dead(mas->node);
3149 /* Set up mas for insertion. */
3150 mast->orig_l->depth = mas->depth;
3151 mast->orig_l->alloc = mas->alloc;
3152 *mas = *mast->orig_l;
3153 mas_wmb_replace(mas, &free, &destroy);
3154 mtree_range_walk(mas);
3155 return mast->bn->b_end;
3159 * mas_rebalance() - Rebalance a given node.
3160 * @mas: The maple state
3161 * @b_node: The big maple node.
3163 * Rebalance two nodes into a single node or two new nodes that are sufficient.
3164 * Continue upwards until tree is sufficient.
3166 * Return: the number of elements in b_node during the last loop.
3168 static inline int mas_rebalance(struct ma_state *mas,
3169 struct maple_big_node *b_node)
3171 char empty_count = mas_mt_height(mas);
3172 struct maple_subtree_state mast;
3173 unsigned char shift, b_end = ++b_node->b_end;
3175 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3176 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3178 trace_ma_op(__func__, mas);
3181 * Rebalancing occurs if a node is insufficient. Data is rebalanced
3182 * against the node to the right if it exists, otherwise the node to the
3183 * left of this node is rebalanced against this node. If rebalancing
3184 * causes just one node to be produced instead of two, then the parent
3185 * is also examined and rebalanced if it is insufficient. Every level
3186 * tries to combine the data in the same way. If one node contains the
3187 * entire range of the tree, then that node is used as a new root node.
3189 mas_node_count(mas, 1 + empty_count * 3);
3190 if (mas_is_err(mas))
3193 mast.orig_l = &l_mas;
3194 mast.orig_r = &r_mas;
3196 mast.bn->type = mte_node_type(mas->node);
3198 l_mas = r_mas = *mas;
3200 if (mas_next_sibling(&r_mas)) {
3201 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3202 r_mas.last = r_mas.index = r_mas.max;
3204 mas_prev_sibling(&l_mas);
3205 shift = mas_data_end(&l_mas) + 1;
3206 mab_shift_right(b_node, shift);
3207 mas->offset += shift;
3208 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3209 b_node->b_end = shift + b_end;
3210 l_mas.index = l_mas.last = l_mas.min;
3213 return mas_spanning_rebalance(mas, &mast, empty_count);
3217 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3219 * @mas: The maple state
3220 * @end: The end of the left-most node.
3222 * During a mass-insert event (such as forking), it may be necessary to
3223 * rebalance the left-most node when it is not sufficient.
3225 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3227 enum maple_type mt = mte_node_type(mas->node);
3228 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3229 struct maple_enode *eparent;
3230 unsigned char offset, tmp, split = mt_slots[mt] / 2;
3231 void __rcu **l_slots, **slots;
3232 unsigned long *l_pivs, *pivs, gap;
3233 bool in_rcu = mt_in_rcu(mas->tree);
3235 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3238 mas_prev_sibling(&l_mas);
3242 /* Allocate for both left and right as well as parent. */
3243 mas_node_count(mas, 3);
3244 if (mas_is_err(mas))
3247 newnode = mas_pop_node(mas);
3253 newnode->parent = node->parent;
3254 slots = ma_slots(newnode, mt);
3255 pivs = ma_pivots(newnode, mt);
3256 left = mas_mn(&l_mas);
3257 l_slots = ma_slots(left, mt);
3258 l_pivs = ma_pivots(left, mt);
3259 if (!l_slots[split])
3261 tmp = mas_data_end(&l_mas) - split;
3263 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3264 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3265 pivs[tmp] = l_mas.max;
3266 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3267 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3269 l_mas.max = l_pivs[split];
3270 mas->min = l_mas.max + 1;
3271 eparent = mt_mk_node(mte_parent(l_mas.node),
3272 mas_parent_enum(&l_mas, l_mas.node));
3275 unsigned char max_p = mt_pivots[mt];
3276 unsigned char max_s = mt_slots[mt];
3279 memset(pivs + tmp, 0,
3280 sizeof(unsigned long *) * (max_p - tmp));
3282 if (tmp < mt_slots[mt])
3283 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3285 memcpy(node, newnode, sizeof(struct maple_node));
3286 ma_set_meta(node, mt, 0, tmp - 1);
3287 mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
3290 /* Remove data from l_pivs. */
3292 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3293 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3294 ma_set_meta(left, mt, 0, split);
3299 /* RCU requires replacing both l_mas, mas, and parent. */
3300 mas->node = mt_mk_node(newnode, mt);
3301 ma_set_meta(newnode, mt, 0, tmp);
3303 new_left = mas_pop_node(mas);
3304 new_left->parent = left->parent;
3305 mt = mte_node_type(l_mas.node);
3306 slots = ma_slots(new_left, mt);
3307 pivs = ma_pivots(new_left, mt);
3308 memcpy(slots, l_slots, sizeof(void *) * split);
3309 memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3310 ma_set_meta(new_left, mt, 0, split);
3311 l_mas.node = mt_mk_node(new_left, mt);
3313 /* replace parent. */
3314 offset = mte_parent_slot(mas->node);
3315 mt = mas_parent_enum(&l_mas, l_mas.node);
3316 parent = mas_pop_node(mas);
3317 slots = ma_slots(parent, mt);
3318 pivs = ma_pivots(parent, mt);
3319 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
3320 rcu_assign_pointer(slots[offset], mas->node);
3321 rcu_assign_pointer(slots[offset - 1], l_mas.node);
3322 pivs[offset - 1] = l_mas.max;
3323 eparent = mt_mk_node(parent, mt);
3325 gap = mas_leaf_max_gap(mas);
3326 mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3327 gap = mas_leaf_max_gap(&l_mas);
3328 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3332 mas_replace(mas, false);
3334 mas_update_gap(mas);
3338 * mas_split_final_node() - Split the final node in a subtree operation.
3339 * @mast: the maple subtree state
3340 * @mas: The maple state
3341 * @height: The height of the tree in case it's a new root.
3343 static inline bool mas_split_final_node(struct maple_subtree_state *mast,
3344 struct ma_state *mas, int height)
3346 struct maple_enode *ancestor;
3348 if (mte_is_root(mas->node)) {
3349 if (mt_is_alloc(mas->tree))
3350 mast->bn->type = maple_arange_64;
3352 mast->bn->type = maple_range_64;
3353 mas->depth = height;
3356 * Only a single node is used here, could be root.
3357 * The Big_node data should just fit in a single node.
3359 ancestor = mas_new_ma_node(mas, mast->bn);
3360 mte_set_parent(mast->l->node, ancestor, mast->l->offset);
3361 mte_set_parent(mast->r->node, ancestor, mast->r->offset);
3362 mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3364 mast->l->node = ancestor;
3365 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3366 mas->offset = mast->bn->b_end - 1;
3371 * mast_fill_bnode() - Copy data into the big node in the subtree state
3372 * @mast: The maple subtree state
3373 * @mas: the maple state
3374 * @skip: The number of entries to skip for new nodes insertion.
3376 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3377 struct ma_state *mas,
3381 struct maple_enode *old = mas->node;
3382 unsigned char split;
3384 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3385 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3386 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3387 mast->bn->b_end = 0;
3389 if (mte_is_root(mas->node)) {
3393 mat_add(mast->free, old);
3394 mas->offset = mte_parent_slot(mas->node);
3397 if (cp && mast->l->offset)
3398 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3400 split = mast->bn->b_end;
3401 mab_set_b_end(mast->bn, mast->l, mast->l->node);
3402 mast->r->offset = mast->bn->b_end;
3403 mab_set_b_end(mast->bn, mast->r, mast->r->node);
3404 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3408 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3409 mast->bn, mast->bn->b_end);
3412 mast->bn->type = mte_node_type(mas->node);
3416 * mast_split_data() - Split the data in the subtree state big node into regular
3418 * @mast: The maple subtree state
3419 * @mas: The maple state
3420 * @split: The location to split the big node
3422 static inline void mast_split_data(struct maple_subtree_state *mast,
3423 struct ma_state *mas, unsigned char split)
3425 unsigned char p_slot;
3427 mab_mas_cp(mast->bn, 0, split, mast->l, true);
3428 mte_set_pivot(mast->r->node, 0, mast->r->max);
3429 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3430 mast->l->offset = mte_parent_slot(mas->node);
3431 mast->l->max = mast->bn->pivot[split];
3432 mast->r->min = mast->l->max + 1;
3433 if (mte_is_leaf(mas->node))
3436 p_slot = mast->orig_l->offset;
3437 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3439 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3444 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3445 * data to the right or left node if there is room.
3446 * @mas: The maple state
3447 * @height: The current height of the maple state
3448 * @mast: The maple subtree state
3449 * @left: Push left or not.
3451 * Keeping the height of the tree low means faster lookups.
3453 * Return: True if pushed, false otherwise.
3455 static inline bool mas_push_data(struct ma_state *mas, int height,
3456 struct maple_subtree_state *mast, bool left)
3458 unsigned char slot_total = mast->bn->b_end;
3459 unsigned char end, space, split;
3461 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3463 tmp_mas.depth = mast->l->depth;
3465 if (left && !mas_prev_sibling(&tmp_mas))
3467 else if (!left && !mas_next_sibling(&tmp_mas))
3470 end = mas_data_end(&tmp_mas);
3472 space = 2 * mt_slot_count(mas->node) - 2;
3473 /* -2 instead of -1 to ensure there isn't a triple split */
3474 if (ma_is_leaf(mast->bn->type))
3477 if (mas->max == ULONG_MAX)
3480 if (slot_total >= space)
3483 /* Get the data; Fill mast->bn */
3486 mab_shift_right(mast->bn, end + 1);
3487 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3488 mast->bn->b_end = slot_total + 1;
3490 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3493 /* Configure mast for splitting of mast->bn */
3494 split = mt_slots[mast->bn->type] - 2;
3496 /* Switch mas to prev node */
3497 mat_add(mast->free, mas->node);
3499 /* Start using mast->l for the left side. */
3500 tmp_mas.node = mast->l->node;
3503 mat_add(mast->free, tmp_mas.node);
3504 tmp_mas.node = mast->r->node;
3506 split = slot_total - split;
3508 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3509 /* Update parent slot for split calculation. */
3511 mast->orig_l->offset += end + 1;
3513 mast_split_data(mast, mas, split);
3514 mast_fill_bnode(mast, mas, 2);
3515 mas_split_final_node(mast, mas, height + 1);
3520 * mas_split() - Split data that is too big for one node into two.
3521 * @mas: The maple state
3522 * @b_node: The maple big node
3523 * Return: 1 on success, 0 on failure.
3525 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3527 struct maple_subtree_state mast;
3529 unsigned char mid_split, split = 0;
3532 * Splitting is handled differently from any other B-tree; the Maple
3533 * Tree splits upwards. Splitting up means that the split operation
3534 * occurs when the walk of the tree hits the leaves and not on the way
3535 * down. The reason for splitting up is that it is impossible to know
3536 * how much space will be needed until the leaf is (or leaves are)
3537 * reached. Since overwriting data is allowed and a range could
3538 * overwrite more than one range or result in changing one entry into 3
3539 * entries, it is impossible to know if a split is required until the
3542 * Splitting is a balancing act between keeping allocations to a minimum
3543 * and avoiding a 'jitter' event where a tree is expanded to make room
3544 * for an entry followed by a contraction when the entry is removed. To
3545 * accomplish the balance, there are empty slots remaining in both left
3546 * and right nodes after a split.
3548 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3549 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3550 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3551 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3552 MA_TOPIARY(mat, mas->tree);
3554 trace_ma_op(__func__, mas);
3555 mas->depth = mas_mt_height(mas);
3556 /* Allocation failures will happen early. */
3557 mas_node_count(mas, 1 + mas->depth * 2);
3558 if (mas_is_err(mas))
3563 mast.orig_l = &prev_l_mas;
3564 mast.orig_r = &prev_r_mas;
3568 while (height++ <= mas->depth) {
3569 if (mt_slots[b_node->type] > b_node->b_end) {
3570 mas_split_final_node(&mast, mas, height);
3574 l_mas = r_mas = *mas;
3575 l_mas.node = mas_new_ma_node(mas, b_node);
3576 r_mas.node = mas_new_ma_node(mas, b_node);
3578 * Another way that 'jitter' is avoided is to terminate a split up early if the
3579 * left or right node has space to spare. This is referred to as "pushing left"
3580 * or "pushing right" and is similar to the B* tree, except the nodes left or
3581 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3582 * is a significant savings.
3584 /* Try to push left. */
3585 if (mas_push_data(mas, height, &mast, true))
3588 /* Try to push right. */
3589 if (mas_push_data(mas, height, &mast, false))
3592 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3593 mast_split_data(&mast, mas, split);
3595 * Usually correct, mab_mas_cp in the above call overwrites
3598 mast.r->max = mas->max;
3599 mast_fill_bnode(&mast, mas, 1);
3600 prev_l_mas = *mast.l;
3601 prev_r_mas = *mast.r;
3604 /* Set the original node as dead */
3605 mat_add(mast.free, mas->node);
3606 mas->node = l_mas.node;
3607 mas_wmb_replace(mas, mast.free, NULL);
3608 mtree_range_walk(mas);
3613 * mas_reuse_node() - Reuse the node to store the data.
3614 * @wr_mas: The maple write state
3615 * @bn: The maple big node
3616 * @end: The end of the data.
3618 * Will always return false in RCU mode.
3620 * Return: True if node was reused, false otherwise.
3622 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3623 struct maple_big_node *bn, unsigned char end)
3625 /* Need to be rcu safe. */
3626 if (mt_in_rcu(wr_mas->mas->tree))
3629 if (end > bn->b_end) {
3630 int clear = mt_slots[wr_mas->type] - bn->b_end;
3632 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3633 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3635 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3640 * mas_commit_b_node() - Commit the big node into the tree.
3641 * @wr_mas: The maple write state
3642 * @b_node: The maple big node
3643 * @end: The end of the data.
3645 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3646 struct maple_big_node *b_node, unsigned char end)
3648 struct maple_node *node;
3649 unsigned char b_end = b_node->b_end;
3650 enum maple_type b_type = b_node->type;
3652 if ((b_end < mt_min_slots[b_type]) &&
3653 (!mte_is_root(wr_mas->mas->node)) &&
3654 (mas_mt_height(wr_mas->mas) > 1))
3655 return mas_rebalance(wr_mas->mas, b_node);
3657 if (b_end >= mt_slots[b_type])
3658 return mas_split(wr_mas->mas, b_node);
3660 if (mas_reuse_node(wr_mas, b_node, end))
3663 mas_node_count(wr_mas->mas, 1);
3664 if (mas_is_err(wr_mas->mas))
3667 node = mas_pop_node(wr_mas->mas);
3668 node->parent = mas_mn(wr_mas->mas)->parent;
3669 wr_mas->mas->node = mt_mk_node(node, b_type);
3670 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3671 mas_replace(wr_mas->mas, false);
3673 mas_update_gap(wr_mas->mas);
3678 * mas_root_expand() - Expand a root to a node
3679 * @mas: The maple state
3680 * @entry: The entry to store into the tree
3682 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3684 void *contents = mas_root_locked(mas);
3685 enum maple_type type = maple_leaf_64;
3686 struct maple_node *node;
3688 unsigned long *pivots;
3691 mas_node_count(mas, 1);
3692 if (unlikely(mas_is_err(mas)))
3695 node = mas_pop_node(mas);
3696 pivots = ma_pivots(node, type);
3697 slots = ma_slots(node, type);
3698 node->parent = ma_parent_ptr(
3699 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3700 mas->node = mt_mk_node(node, type);
3704 rcu_assign_pointer(slots[slot], contents);
3705 if (likely(mas->index > 1))
3708 pivots[slot++] = mas->index - 1;
3711 rcu_assign_pointer(slots[slot], entry);
3713 pivots[slot] = mas->last;
3714 if (mas->last != ULONG_MAX)
3717 mas_set_height(mas);
3719 /* swap the new root into the tree */
3720 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3721 ma_set_meta(node, maple_leaf_64, 0, slot);
3725 static inline void mas_store_root(struct ma_state *mas, void *entry)
3727 if (likely((mas->last != 0) || (mas->index != 0)))
3728 mas_root_expand(mas, entry);
3729 else if (((unsigned long) (entry) & 3) == 2)
3730 mas_root_expand(mas, entry);
3732 rcu_assign_pointer(mas->tree->ma_root, entry);
3733 mas->node = MAS_START;
3738 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3740 * @mas: The maple state
3741 * @piv: The pivot value being written
3742 * @type: The maple node type
3743 * @entry: The data to write
3745 * Spanning writes are writes that start in one node and end in another OR if
3746 * the write of a %NULL will cause the node to end with a %NULL.
3748 * Return: True if this is a spanning write, false otherwise.
3750 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3753 unsigned long last = wr_mas->mas->last;
3754 unsigned long piv = wr_mas->r_max;
3755 enum maple_type type = wr_mas->type;
3756 void *entry = wr_mas->entry;
3758 /* Contained in this pivot */
3762 max = wr_mas->mas->max;
3763 if (unlikely(ma_is_leaf(type))) {
3764 /* Fits in the node, but may span slots. */
3768 /* Writes to the end of the node but not null. */
3769 if ((last == max) && entry)
3773 * Writing ULONG_MAX is not a spanning write regardless of the
3774 * value being written as long as the range fits in the node.
3776 if ((last == ULONG_MAX) && (last == max))
3778 } else if (piv == last) {
3782 /* Detect spanning store wr walk */
3783 if (last == ULONG_MAX)
3787 trace_ma_write(__func__, wr_mas->mas, piv, entry);
3792 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3794 wr_mas->type = mte_node_type(wr_mas->mas->node);
3795 mas_wr_node_walk(wr_mas);
3796 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3799 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3801 wr_mas->mas->max = wr_mas->r_max;
3802 wr_mas->mas->min = wr_mas->r_min;
3803 wr_mas->mas->node = wr_mas->content;
3804 wr_mas->mas->offset = 0;
3805 wr_mas->mas->depth++;
3808 * mas_wr_walk() - Walk the tree for a write.
3809 * @wr_mas: The maple write state
3811 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3813 * Return: True if it's contained in a node, false on spanning write.
3815 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3817 struct ma_state *mas = wr_mas->mas;
3820 mas_wr_walk_descend(wr_mas);
3821 if (unlikely(mas_is_span_wr(wr_mas)))
3824 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3826 if (ma_is_leaf(wr_mas->type))
3829 mas_wr_walk_traverse(wr_mas);
3835 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3837 struct ma_state *mas = wr_mas->mas;
3840 mas_wr_walk_descend(wr_mas);
3841 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3843 if (ma_is_leaf(wr_mas->type))
3845 mas_wr_walk_traverse(wr_mas);
3851 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3852 * @l_wr_mas: The left maple write state
3853 * @r_wr_mas: The right maple write state
3855 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3856 struct ma_wr_state *r_wr_mas)
3858 struct ma_state *r_mas = r_wr_mas->mas;
3859 struct ma_state *l_mas = l_wr_mas->mas;
3860 unsigned char l_slot;
3862 l_slot = l_mas->offset;
3863 if (!l_wr_mas->content)
3864 l_mas->index = l_wr_mas->r_min;
3866 if ((l_mas->index == l_wr_mas->r_min) &&
3868 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3870 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3872 l_mas->index = l_mas->min;
3874 l_mas->offset = l_slot - 1;
3877 if (!r_wr_mas->content) {
3878 if (r_mas->last < r_wr_mas->r_max)
3879 r_mas->last = r_wr_mas->r_max;
3881 } else if ((r_mas->last == r_wr_mas->r_max) &&
3882 (r_mas->last < r_mas->max) &&
3883 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3884 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3885 r_wr_mas->type, r_mas->offset + 1);
3890 static inline void *mas_state_walk(struct ma_state *mas)
3894 entry = mas_start(mas);
3895 if (mas_is_none(mas))
3898 if (mas_is_ptr(mas))
3901 return mtree_range_walk(mas);
3905 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3908 * @mas: The maple state.
3910 * Note: Leaves mas in undesirable state.
3911 * Return: The entry for @mas->index or %NULL on dead node.
3913 static inline void *mtree_lookup_walk(struct ma_state *mas)
3915 unsigned long *pivots;
3916 unsigned char offset;
3917 struct maple_node *node;
3918 struct maple_enode *next;
3919 enum maple_type type;
3928 node = mte_to_node(next);
3929 type = mte_node_type(next);
3930 pivots = ma_pivots(node, type);
3931 end = ma_data_end(node, type, pivots, max);
3932 if (unlikely(ma_dead_node(node)))
3935 if (pivots[offset] >= mas->index)
3940 } while ((offset < end) && (pivots[offset] < mas->index));
3942 if (likely(offset > end))
3943 max = pivots[offset];
3946 slots = ma_slots(node, type);
3947 next = mt_slot(mas->tree, slots, offset);
3948 if (unlikely(ma_dead_node(node)))
3950 } while (!ma_is_leaf(type));
3952 return (void *)next;
3960 * mas_new_root() - Create a new root node that only contains the entry passed
3962 * @mas: The maple state
3963 * @entry: The entry to store.
3965 * Only valid when the index == 0 and the last == ULONG_MAX
3967 * Return 0 on error, 1 on success.
3969 static inline int mas_new_root(struct ma_state *mas, void *entry)
3971 struct maple_enode *root = mas_root_locked(mas);
3972 enum maple_type type = maple_leaf_64;
3973 struct maple_node *node;
3975 unsigned long *pivots;
3977 if (!entry && !mas->index && mas->last == ULONG_MAX) {
3979 mas_set_height(mas);
3980 rcu_assign_pointer(mas->tree->ma_root, entry);
3981 mas->node = MAS_START;
3985 mas_node_count(mas, 1);
3986 if (mas_is_err(mas))
3989 node = mas_pop_node(mas);
3990 pivots = ma_pivots(node, type);
3991 slots = ma_slots(node, type);
3992 node->parent = ma_parent_ptr(
3993 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3994 mas->node = mt_mk_node(node, type);
3995 rcu_assign_pointer(slots[0], entry);
3996 pivots[0] = mas->last;
3998 mas_set_height(mas);
3999 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
4002 if (xa_is_node(root))
4003 mte_destroy_walk(root, mas->tree);
4008 * mas_wr_spanning_store() - Create a subtree with the store operation completed
4009 * and new nodes where necessary, then place the sub-tree in the actual tree.
4010 * Note that mas is expected to point to the node which caused the store to
4012 * @wr_mas: The maple write state
4014 * Return: 0 on error, positive on success.
4016 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
4018 struct maple_subtree_state mast;
4019 struct maple_big_node b_node;
4020 struct ma_state *mas;
4021 unsigned char height;
4023 /* Left and Right side of spanning store */
4024 MA_STATE(l_mas, NULL, 0, 0);
4025 MA_STATE(r_mas, NULL, 0, 0);
4027 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
4028 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
4031 * A store operation that spans multiple nodes is called a spanning
4032 * store and is handled early in the store call stack by the function
4033 * mas_is_span_wr(). When a spanning store is identified, the maple
4034 * state is duplicated. The first maple state walks the left tree path
4035 * to ``index``, the duplicate walks the right tree path to ``last``.
4036 * The data in the two nodes are combined into a single node, two nodes,
4037 * or possibly three nodes (see the 3-way split above). A ``NULL``
4038 * written to the last entry of a node is considered a spanning store as
4039 * a rebalance is required for the operation to complete and an overflow
4040 * of data may happen.
4043 trace_ma_op(__func__, mas);
4045 if (unlikely(!mas->index && mas->last == ULONG_MAX))
4046 return mas_new_root(mas, wr_mas->entry);
4048 * Node rebalancing may occur due to this store, so there may be three new
4049 * entries per level plus a new root.
4051 height = mas_mt_height(mas);
4052 mas_node_count(mas, 1 + height * 3);
4053 if (mas_is_err(mas))
4057 * Set up right side. Need to get to the next offset after the spanning
4058 * store to ensure it's not NULL and to combine both the next node and
4059 * the node with the start together.
4062 /* Avoid overflow, walk to next slot in the tree. */
4066 r_mas.index = r_mas.last;
4067 mas_wr_walk_index(&r_wr_mas);
4068 r_mas.last = r_mas.index = mas->last;
4070 /* Set up left side. */
4072 mas_wr_walk_index(&l_wr_mas);
4074 if (!wr_mas->entry) {
4075 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
4076 mas->offset = l_mas.offset;
4077 mas->index = l_mas.index;
4078 mas->last = l_mas.last = r_mas.last;
4081 /* expanding NULLs may make this cover the entire range */
4082 if (!l_mas.index && r_mas.last == ULONG_MAX) {
4083 mas_set_range(mas, 0, ULONG_MAX);
4084 return mas_new_root(mas, wr_mas->entry);
4087 memset(&b_node, 0, sizeof(struct maple_big_node));
4088 /* Copy l_mas and store the value in b_node. */
4089 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
4090 /* Copy r_mas into b_node. */
4091 if (r_mas.offset <= r_wr_mas.node_end)
4092 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
4093 &b_node, b_node.b_end + 1);
4097 /* Stop spanning searches by searching for just index. */
4098 l_mas.index = l_mas.last = mas->index;
4101 mast.orig_l = &l_mas;
4102 mast.orig_r = &r_mas;
4103 /* Combine l_mas and r_mas and split them up evenly again. */
4104 return mas_spanning_rebalance(mas, &mast, height + 1);
4108 * mas_wr_node_store() - Attempt to store the value in a node
4109 * @wr_mas: The maple write state
4111 * Attempts to reuse the node, but may allocate.
4113 * Return: True if stored, false otherwise
4115 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
4117 struct ma_state *mas = wr_mas->mas;
4118 void __rcu **dst_slots;
4119 unsigned long *dst_pivots;
4120 unsigned char dst_offset;
4121 unsigned char new_end = wr_mas->node_end;
4122 unsigned char offset;
4123 unsigned char node_slots = mt_slots[wr_mas->type];
4124 struct maple_node reuse, *newnode;
4125 unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
4126 bool in_rcu = mt_in_rcu(mas->tree);
4128 offset = mas->offset;
4129 if (mas->last == wr_mas->r_max) {
4130 /* runs right to the end of the node */
4131 if (mas->last == mas->max)
4133 /* don't copy this offset */
4134 wr_mas->offset_end++;
4135 } else if (mas->last < wr_mas->r_max) {
4136 /* new range ends in this range */
4137 if (unlikely(wr_mas->r_max == ULONG_MAX))
4138 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
4142 if (wr_mas->end_piv == mas->last)
4143 wr_mas->offset_end++;
4145 new_end -= wr_mas->offset_end - offset - 1;
4148 /* new range starts within a range */
4149 if (wr_mas->r_min < mas->index)
4152 /* Not enough room */
4153 if (new_end >= node_slots)
4156 /* Not enough data. */
4157 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
4158 !(mas->mas_flags & MA_STATE_BULK))
4163 mas_node_count(mas, 1);
4164 if (mas_is_err(mas))
4167 newnode = mas_pop_node(mas);
4169 memset(&reuse, 0, sizeof(struct maple_node));
4173 newnode->parent = mas_mn(mas)->parent;
4174 dst_pivots = ma_pivots(newnode, wr_mas->type);
4175 dst_slots = ma_slots(newnode, wr_mas->type);
4176 /* Copy from start to insert point */
4177 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
4178 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
4179 dst_offset = offset;
4181 /* Handle insert of new range starting after old range */
4182 if (wr_mas->r_min < mas->index) {
4184 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
4185 dst_pivots[dst_offset++] = mas->index - 1;
4188 /* Store the new entry and range end. */
4189 if (dst_offset < max_piv)
4190 dst_pivots[dst_offset] = mas->last;
4191 mas->offset = dst_offset;
4192 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
4195 * this range wrote to the end of the node or it overwrote the rest of
4198 if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
4199 new_end = dst_offset;
4204 /* Copy to the end of node if necessary. */
4205 copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
4206 memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
4207 sizeof(void *) * copy_size);
4208 if (dst_offset < max_piv) {
4209 if (copy_size > max_piv - dst_offset)
4210 copy_size = max_piv - dst_offset;
4212 memcpy(dst_pivots + dst_offset,
4213 wr_mas->pivots + wr_mas->offset_end,
4214 sizeof(unsigned long) * copy_size);
4217 if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
4218 dst_pivots[new_end] = mas->max;
4221 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
4223 mte_set_node_dead(mas->node);
4224 mas->node = mt_mk_node(newnode, wr_mas->type);
4225 mas_replace(mas, false);
4227 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4229 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4230 mas_update_gap(mas);
4235 * mas_wr_slot_store: Attempt to store a value in a slot.
4236 * @wr_mas: the maple write state
4238 * Return: True if stored, false otherwise
4240 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4242 struct ma_state *mas = wr_mas->mas;
4243 unsigned long lmax; /* Logical max. */
4244 unsigned char offset = mas->offset;
4246 if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
4247 (offset != wr_mas->node_end)))
4250 if (offset == wr_mas->node_end - 1)
4253 lmax = wr_mas->pivots[offset + 1];
4255 /* going to overwrite too many slots. */
4256 if (lmax < mas->last)
4259 if (wr_mas->r_min == mas->index) {
4260 /* overwriting two or more ranges with one. */
4261 if (lmax == mas->last)
4264 /* Overwriting all of offset and a portion of offset + 1. */
4265 rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
4266 wr_mas->pivots[offset] = mas->last;
4270 /* Doesn't end on the next range end. */
4271 if (lmax != mas->last)
4274 /* Overwriting a portion of offset and all of offset + 1 */
4275 if ((offset + 1 < mt_pivots[wr_mas->type]) &&
4276 (wr_mas->entry || wr_mas->pivots[offset + 1]))
4277 wr_mas->pivots[offset + 1] = mas->last;
4279 rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
4280 wr_mas->pivots[offset] = mas->index - 1;
4281 mas->offset++; /* Keep mas accurate. */
4284 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4285 mas_update_gap(mas);
4289 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4291 while ((wr_mas->mas->last > wr_mas->end_piv) &&
4292 (wr_mas->offset_end < wr_mas->node_end))
4293 wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end];
4295 if (wr_mas->mas->last > wr_mas->end_piv)
4296 wr_mas->end_piv = wr_mas->mas->max;
4299 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4301 struct ma_state *mas = wr_mas->mas;
4303 if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end])
4304 mas->last = wr_mas->end_piv;
4306 /* Check next slot(s) if we are overwriting the end */
4307 if ((mas->last == wr_mas->end_piv) &&
4308 (wr_mas->node_end != wr_mas->offset_end) &&
4309 !wr_mas->slots[wr_mas->offset_end + 1]) {
4310 wr_mas->offset_end++;
4311 if (wr_mas->offset_end == wr_mas->node_end)
4312 mas->last = mas->max;
4314 mas->last = wr_mas->pivots[wr_mas->offset_end];
4315 wr_mas->end_piv = mas->last;
4318 if (!wr_mas->content) {
4319 /* If this one is null, the next and prev are not */
4320 mas->index = wr_mas->r_min;
4322 /* Check prev slot if we are overwriting the start */
4323 if (mas->index == wr_mas->r_min && mas->offset &&
4324 !wr_mas->slots[mas->offset - 1]) {
4326 wr_mas->r_min = mas->index =
4327 mas_safe_min(mas, wr_mas->pivots, mas->offset);
4328 wr_mas->r_max = wr_mas->pivots[mas->offset];
4333 static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
4335 unsigned char end = wr_mas->node_end;
4336 unsigned char new_end = end + 1;
4337 struct ma_state *mas = wr_mas->mas;
4338 unsigned char node_pivots = mt_pivots[wr_mas->type];
4340 if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) {
4341 if (new_end < node_pivots)
4342 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4344 if (new_end < node_pivots)
4345 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4347 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
4348 mas->offset = new_end;
4349 wr_mas->pivots[end] = mas->index - 1;
4354 if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) {
4355 if (new_end < node_pivots)
4356 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4358 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
4359 if (new_end < node_pivots)
4360 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4362 wr_mas->pivots[end] = mas->last;
4363 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
4371 * mas_wr_bnode() - Slow path for a modification.
4372 * @wr_mas: The write maple state
4374 * This is where split, rebalance end up.
4376 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4378 struct maple_big_node b_node;
4380 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4381 memset(&b_node, 0, sizeof(struct maple_big_node));
4382 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4383 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4386 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4388 unsigned char node_slots;
4389 unsigned char node_size;
4390 struct ma_state *mas = wr_mas->mas;
4392 /* Direct replacement */
4393 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4394 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4395 if (!!wr_mas->entry ^ !!wr_mas->content)
4396 mas_update_gap(mas);
4400 /* Attempt to append */
4401 node_slots = mt_slots[wr_mas->type];
4402 node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
4403 if (mas->max == ULONG_MAX)
4406 /* slot and node store will not fit, go to the slow path */
4407 if (unlikely(node_size >= node_slots))
4410 if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
4411 (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
4412 if (!wr_mas->content || !wr_mas->entry)
4413 mas_update_gap(mas);
4417 if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
4419 else if (mas_wr_node_store(wr_mas))
4422 if (mas_is_err(mas))
4426 mas_wr_bnode(wr_mas);
4430 * mas_wr_store_entry() - Internal call to store a value
4431 * @mas: The maple state
4432 * @entry: The entry to store.
4434 * Return: The contents that was stored at the index.
4436 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4438 struct ma_state *mas = wr_mas->mas;
4440 wr_mas->content = mas_start(mas);
4441 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4442 mas_store_root(mas, wr_mas->entry);
4443 return wr_mas->content;
4446 if (unlikely(!mas_wr_walk(wr_mas))) {
4447 mas_wr_spanning_store(wr_mas);
4448 return wr_mas->content;
4451 /* At this point, we are at the leaf node that needs to be altered. */
4452 wr_mas->end_piv = wr_mas->r_max;
4453 mas_wr_end_piv(wr_mas);
4456 mas_wr_extend_null(wr_mas);
4458 /* New root for a single pointer */
4459 if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4460 mas_new_root(mas, wr_mas->entry);
4461 return wr_mas->content;
4464 mas_wr_modify(wr_mas);
4465 return wr_mas->content;
4469 * mas_insert() - Internal call to insert a value
4470 * @mas: The maple state
4471 * @entry: The entry to store
4473 * Return: %NULL or the contents that already exists at the requested index
4474 * otherwise. The maple state needs to be checked for error conditions.
4476 static inline void *mas_insert(struct ma_state *mas, void *entry)
4478 MA_WR_STATE(wr_mas, mas, entry);
4481 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4482 * tree. If the insert fits exactly into an existing gap with a value
4483 * of NULL, then the slot only needs to be written with the new value.
4484 * If the range being inserted is adjacent to another range, then only a
4485 * single pivot needs to be inserted (as well as writing the entry). If
4486 * the new range is within a gap but does not touch any other ranges,
4487 * then two pivots need to be inserted: the start - 1, and the end. As
4488 * usual, the entry must be written. Most operations require a new node
4489 * to be allocated and replace an existing node to ensure RCU safety,
4490 * when in RCU mode. The exception to requiring a newly allocated node
4491 * is when inserting at the end of a node (appending). When done
4492 * carefully, appending can reuse the node in place.
4494 wr_mas.content = mas_start(mas);
4498 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4499 mas_store_root(mas, entry);
4503 /* spanning writes always overwrite something */
4504 if (!mas_wr_walk(&wr_mas))
4507 /* At this point, we are at the leaf node that needs to be altered. */
4508 wr_mas.offset_end = mas->offset;
4509 wr_mas.end_piv = wr_mas.r_max;
4511 if (wr_mas.content || (mas->last > wr_mas.r_max))
4517 mas_wr_modify(&wr_mas);
4518 return wr_mas.content;
4521 mas_set_err(mas, -EEXIST);
4522 return wr_mas.content;
4527 * mas_prev_node() - Find the prev non-null entry at the same level in the
4528 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
4529 * @mas: The maple state
4530 * @min: The lower limit to search
4532 * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4533 * Return: 1 if the node is dead, 0 otherwise.
4535 static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
4540 struct maple_node *node;
4541 struct maple_enode *enode;
4542 unsigned long *pivots;
4544 if (mas_is_none(mas))
4550 if (ma_is_root(node))
4554 if (unlikely(mas_ascend(mas)))
4556 offset = mas->offset;
4561 mt = mte_node_type(mas->node);
4563 slots = ma_slots(node, mt);
4564 pivots = ma_pivots(node, mt);
4565 if (unlikely(ma_dead_node(node)))
4568 mas->max = pivots[offset];
4570 mas->min = pivots[offset - 1] + 1;
4571 if (unlikely(ma_dead_node(node)))
4579 enode = mas_slot(mas, slots, offset);
4580 if (unlikely(ma_dead_node(node)))
4584 mt = mte_node_type(mas->node);
4586 slots = ma_slots(node, mt);
4587 pivots = ma_pivots(node, mt);
4588 offset = ma_data_end(node, mt, pivots, mas->max);
4589 if (unlikely(ma_dead_node(node)))
4593 mas->min = pivots[offset - 1] + 1;
4595 if (offset < mt_pivots[mt])
4596 mas->max = pivots[offset];
4602 mas->node = mas_slot(mas, slots, offset);
4603 if (unlikely(ma_dead_node(node)))
4606 mas->offset = mas_data_end(mas);
4607 if (unlikely(mte_dead_node(mas->node)))
4613 mas->offset = offset;
4615 mas->min = pivots[offset - 1] + 1;
4617 if (unlikely(ma_dead_node(node)))
4620 mas->node = MAS_NONE;
4625 * mas_next_node() - Get the next node at the same level in the tree.
4626 * @mas: The maple state
4627 * @max: The maximum pivot value to check.
4629 * The next value will be mas->node[mas->offset] or MAS_NONE.
4630 * Return: 1 on dead node, 0 otherwise.
4632 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4635 unsigned long min, pivot;
4636 unsigned long *pivots;
4637 struct maple_enode *enode;
4639 unsigned char offset;
4640 unsigned char node_end;
4644 if (mas->max >= max)
4649 if (ma_is_root(node))
4656 if (unlikely(mas_ascend(mas)))
4659 offset = mas->offset;
4662 mt = mte_node_type(mas->node);
4663 pivots = ma_pivots(node, mt);
4664 node_end = ma_data_end(node, mt, pivots, mas->max);
4665 if (unlikely(ma_dead_node(node)))
4668 } while (unlikely(offset == node_end));
4670 slots = ma_slots(node, mt);
4671 pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
4672 while (unlikely(level > 1)) {
4673 /* Descend, if necessary */
4674 enode = mas_slot(mas, slots, offset);
4675 if (unlikely(ma_dead_node(node)))
4681 mt = mte_node_type(mas->node);
4682 slots = ma_slots(node, mt);
4683 pivots = ma_pivots(node, mt);
4684 if (unlikely(ma_dead_node(node)))
4691 enode = mas_slot(mas, slots, offset);
4692 if (unlikely(ma_dead_node(node)))
4701 if (unlikely(ma_dead_node(node)))
4704 mas->node = MAS_NONE;
4709 * mas_next_nentry() - Get the next node entry
4710 * @mas: The maple state
4711 * @max: The maximum value to check
4712 * @*range_start: Pointer to store the start of the range.
4714 * Sets @mas->offset to the offset of the next node entry, @mas->last to the
4715 * pivot of the entry.
4717 * Return: The next entry, %NULL otherwise
4719 static inline void *mas_next_nentry(struct ma_state *mas,
4720 struct maple_node *node, unsigned long max, enum maple_type type)
4722 unsigned char count;
4723 unsigned long pivot;
4724 unsigned long *pivots;
4728 if (mas->last == mas->max) {
4729 mas->index = mas->max;
4733 slots = ma_slots(node, type);
4734 pivots = ma_pivots(node, type);
4735 count = ma_data_end(node, type, pivots, mas->max);
4736 if (unlikely(ma_dead_node(node)))
4739 mas->index = mas_safe_min(mas, pivots, mas->offset);
4740 if (unlikely(ma_dead_node(node)))
4743 if (mas->index > max)
4746 if (mas->offset > count)
4749 while (mas->offset < count) {
4750 pivot = pivots[mas->offset];
4751 entry = mas_slot(mas, slots, mas->offset);
4752 if (ma_dead_node(node))
4761 mas->index = pivot + 1;
4765 if (mas->index > mas->max) {
4766 mas->index = mas->last;
4770 pivot = mas_safe_pivot(mas, pivots, mas->offset, type);
4771 entry = mas_slot(mas, slots, mas->offset);
4772 if (ma_dead_node(node))
4786 static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4789 mas_set(mas, index);
4790 mas_state_walk(mas);
4791 if (mas_is_start(mas))
4796 * mas_next_entry() - Internal function to get the next entry.
4797 * @mas: The maple state
4798 * @limit: The maximum range start.
4800 * Set the @mas->node to the next entry and the range_start to
4801 * the beginning value for the entry. Does not check beyond @limit.
4802 * Sets @mas->index and @mas->last to the limit if it is hit.
4803 * Restarts on dead nodes.
4805 * Return: the next entry or %NULL.
4807 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4810 struct maple_enode *prev_node;
4811 struct maple_node *node;
4812 unsigned char offset;
4816 if (mas->index > limit) {
4817 mas->index = mas->last = limit;
4823 offset = mas->offset;
4824 prev_node = mas->node;
4826 mt = mte_node_type(mas->node);
4828 if (unlikely(mas->offset >= mt_slots[mt])) {
4829 mas->offset = mt_slots[mt] - 1;
4833 while (!mas_is_none(mas)) {
4834 entry = mas_next_nentry(mas, node, limit, mt);
4835 if (unlikely(ma_dead_node(node))) {
4836 mas_rewalk(mas, last);
4843 if (unlikely((mas->index > limit)))
4847 prev_node = mas->node;
4848 offset = mas->offset;
4849 if (unlikely(mas_next_node(mas, node, limit))) {
4850 mas_rewalk(mas, last);
4855 mt = mte_node_type(mas->node);
4858 mas->index = mas->last = limit;
4859 mas->offset = offset;
4860 mas->node = prev_node;
4865 * mas_prev_nentry() - Get the previous node entry.
4866 * @mas: The maple state.
4867 * @limit: The lower limit to check for a value.
4869 * Return: the entry, %NULL otherwise.
4871 static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
4872 unsigned long index)
4874 unsigned long pivot, min;
4875 unsigned char offset;
4876 struct maple_node *mn;
4878 unsigned long *pivots;
4887 mt = mte_node_type(mas->node);
4888 offset = mas->offset - 1;
4889 if (offset >= mt_slots[mt])
4890 offset = mt_slots[mt] - 1;
4892 slots = ma_slots(mn, mt);
4893 pivots = ma_pivots(mn, mt);
4894 if (unlikely(ma_dead_node(mn))) {
4895 mas_rewalk(mas, index);
4899 if (offset == mt_pivots[mt])
4902 pivot = pivots[offset];
4904 if (unlikely(ma_dead_node(mn))) {
4905 mas_rewalk(mas, index);
4909 while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) ||
4911 pivot = pivots[--offset];
4913 min = mas_safe_min(mas, pivots, offset);
4914 entry = mas_slot(mas, slots, offset);
4915 if (unlikely(ma_dead_node(mn))) {
4916 mas_rewalk(mas, index);
4920 if (likely(entry)) {
4921 mas->offset = offset;
4928 static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
4932 if (mas->index < min) {
4933 mas->index = mas->last = min;
4934 mas->node = MAS_NONE;
4938 while (likely(!mas_is_none(mas))) {
4939 entry = mas_prev_nentry(mas, min, mas->index);
4940 if (unlikely(mas->last < min))
4946 if (unlikely(mas_prev_node(mas, min))) {
4947 mas_rewalk(mas, mas->index);
4956 mas->index = mas->last = min;
4961 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4962 * highest gap address of a given size in a given node and descend.
4963 * @mas: The maple state
4964 * @size: The needed size.
4966 * Return: True if found in a leaf, false otherwise.
4969 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
4971 enum maple_type type = mte_node_type(mas->node);
4972 struct maple_node *node = mas_mn(mas);
4973 unsigned long *pivots, *gaps;
4975 unsigned long gap = 0;
4976 unsigned long max, min;
4977 unsigned char offset;
4979 if (unlikely(mas_is_err(mas)))
4982 if (ma_is_dense(type)) {
4984 mas->offset = (unsigned char)(mas->index - mas->min);
4988 pivots = ma_pivots(node, type);
4989 slots = ma_slots(node, type);
4990 gaps = ma_gaps(node, type);
4991 offset = mas->offset;
4992 min = mas_safe_min(mas, pivots, offset);
4993 /* Skip out of bounds. */
4994 while (mas->last < min)
4995 min = mas_safe_min(mas, pivots, --offset);
4997 max = mas_safe_pivot(mas, pivots, offset, type);
4998 while (mas->index <= max) {
5002 else if (!mas_slot(mas, slots, offset))
5003 gap = max - min + 1;
5006 if ((size <= gap) && (size <= mas->last - min + 1))
5010 /* Skip the next slot, it cannot be a gap. */
5015 max = pivots[offset];
5016 min = mas_safe_min(mas, pivots, offset);
5026 min = mas_safe_min(mas, pivots, offset);
5029 if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
5032 if (unlikely(ma_is_leaf(type))) {
5033 mas->offset = offset;
5035 mas->max = min + gap - 1;
5039 /* descend, only happens under lock. */
5040 mas->node = mas_slot(mas, slots, offset);
5043 mas->offset = mas_data_end(mas);
5047 if (!mte_is_root(mas->node))
5051 mas_set_err(mas, -EBUSY);
5055 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
5057 enum maple_type type = mte_node_type(mas->node);
5058 unsigned long pivot, min, gap = 0;
5059 unsigned char offset;
5060 unsigned long *gaps;
5061 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
5062 void __rcu **slots = ma_slots(mas_mn(mas), type);
5065 if (ma_is_dense(type)) {
5066 mas->offset = (unsigned char)(mas->index - mas->min);
5070 gaps = ma_gaps(mte_to_node(mas->node), type);
5071 offset = mas->offset;
5072 min = mas_safe_min(mas, pivots, offset);
5073 for (; offset < mt_slots[type]; offset++) {
5074 pivot = mas_safe_pivot(mas, pivots, offset, type);
5075 if (offset && !pivot)
5078 /* Not within lower bounds */
5079 if (mas->index > pivot)
5084 else if (!mas_slot(mas, slots, offset))
5085 gap = min(pivot, mas->last) - max(mas->index, min) + 1;
5090 if (ma_is_leaf(type)) {
5094 if (mas->index <= pivot) {
5095 mas->node = mas_slot(mas, slots, offset);
5104 if (mas->last <= pivot) {
5105 mas_set_err(mas, -EBUSY);
5110 if (mte_is_root(mas->node))
5113 mas->offset = offset;
5118 * mas_walk() - Search for @mas->index in the tree.
5119 * @mas: The maple state.
5121 * mas->index and mas->last will be set to the range if there is a value. If
5122 * mas->node is MAS_NONE, reset to MAS_START.
5124 * Return: the entry at the location or %NULL.
5126 void *mas_walk(struct ma_state *mas)
5131 entry = mas_state_walk(mas);
5132 if (mas_is_start(mas))
5135 if (mas_is_ptr(mas)) {
5140 mas->last = ULONG_MAX;
5145 if (mas_is_none(mas)) {
5147 mas->last = ULONG_MAX;
5152 EXPORT_SYMBOL_GPL(mas_walk);
5154 static inline bool mas_rewind_node(struct ma_state *mas)
5159 if (mte_is_root(mas->node)) {
5169 mas->offset = --slot;
5174 * mas_skip_node() - Internal function. Skip over a node.
5175 * @mas: The maple state.
5177 * Return: true if there is another node, false otherwise.
5179 static inline bool mas_skip_node(struct ma_state *mas)
5181 if (mas_is_err(mas))
5185 if (mte_is_root(mas->node)) {
5186 if (mas->offset >= mas_data_end(mas)) {
5187 mas_set_err(mas, -EBUSY);
5193 } while (mas->offset >= mas_data_end(mas));
5200 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
5202 * @mas: The maple state
5203 * @size: The size of the gap required
5205 * Search between @mas->index and @mas->last for a gap of @size.
5207 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
5209 struct maple_enode *last = NULL;
5212 * There are 4 options:
5213 * go to child (descend)
5214 * go back to parent (ascend)
5215 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5216 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5218 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
5219 if (last == mas->node)
5227 * mas_fill_gap() - Fill a located gap with @entry.
5228 * @mas: The maple state
5229 * @entry: The value to store
5230 * @slot: The offset into the node to store the @entry
5231 * @size: The size of the entry
5232 * @index: The start location
5234 static inline void mas_fill_gap(struct ma_state *mas, void *entry,
5235 unsigned char slot, unsigned long size, unsigned long *index)
5237 MA_WR_STATE(wr_mas, mas, entry);
5238 unsigned char pslot = mte_parent_slot(mas->node);
5239 struct maple_enode *mn = mas->node;
5240 unsigned long *pivots;
5241 enum maple_type ptype;
5243 * mas->index is the start address for the search
5244 * which may no longer be needed.
5245 * mas->last is the end address for the search
5248 *index = mas->index;
5249 mas->last = mas->index + size - 1;
5252 * It is possible that using mas->max and mas->min to correctly
5253 * calculate the index and last will cause an issue in the gap
5254 * calculation, so fix the ma_state here
5257 ptype = mte_node_type(mas->node);
5258 pivots = ma_pivots(mas_mn(mas), ptype);
5259 mas->max = mas_safe_pivot(mas, pivots, pslot, ptype);
5260 mas->min = mas_safe_min(mas, pivots, pslot);
5263 mas_wr_store_entry(&wr_mas);
5267 * mas_sparse_area() - Internal function. Return upper or lower limit when
5268 * searching for a gap in an empty tree.
5269 * @mas: The maple state
5270 * @min: the minimum range
5271 * @max: The maximum range
5272 * @size: The size of the gap
5273 * @fwd: Searching forward or back
5275 static inline void mas_sparse_area(struct ma_state *mas, unsigned long min,
5276 unsigned long max, unsigned long size, bool fwd)
5278 unsigned long start = 0;
5280 if (!unlikely(mas_is_none(mas)))
5289 mas->last = start + size - 1;
5297 * mas_empty_area() - Get the lowest address within the range that is
5298 * sufficient for the size requested.
5299 * @mas: The maple state
5300 * @min: The lowest value of the range
5301 * @max: The highest value of the range
5302 * @size: The size needed
5304 int mas_empty_area(struct ma_state *mas, unsigned long min,
5305 unsigned long max, unsigned long size)
5307 unsigned char offset;
5308 unsigned long *pivots;
5311 if (mas_is_start(mas))
5313 else if (mas->offset >= 2)
5315 else if (!mas_skip_node(mas))
5319 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5320 mas_sparse_area(mas, min, max, size, true);
5324 /* The start of the window can only be within these values */
5327 mas_awalk(mas, size);
5329 if (unlikely(mas_is_err(mas)))
5330 return xa_err(mas->node);
5332 offset = mas->offset;
5333 if (unlikely(offset == MAPLE_NODE_SLOTS))
5336 mt = mte_node_type(mas->node);
5337 pivots = ma_pivots(mas_mn(mas), mt);
5339 mas->min = pivots[offset - 1] + 1;
5341 if (offset < mt_pivots[mt])
5342 mas->max = pivots[offset];
5344 if (mas->index < mas->min)
5345 mas->index = mas->min;
5347 mas->last = mas->index + size - 1;
5350 EXPORT_SYMBOL_GPL(mas_empty_area);
5353 * mas_empty_area_rev() - Get the highest address within the range that is
5354 * sufficient for the size requested.
5355 * @mas: The maple state
5356 * @min: The lowest value of the range
5357 * @max: The highest value of the range
5358 * @size: The size needed
5360 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5361 unsigned long max, unsigned long size)
5363 struct maple_enode *last = mas->node;
5365 if (mas_is_start(mas)) {
5367 mas->offset = mas_data_end(mas);
5368 } else if (mas->offset >= 2) {
5370 } else if (!mas_rewind_node(mas)) {
5375 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5376 mas_sparse_area(mas, min, max, size, false);
5380 /* The start of the window can only be within these values. */
5384 while (!mas_rev_awalk(mas, size)) {
5385 if (last == mas->node) {
5386 if (!mas_rewind_node(mas))
5393 if (mas_is_err(mas))
5394 return xa_err(mas->node);
5396 if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5400 * mas_rev_awalk() has set mas->min and mas->max to the gap values. If
5401 * the maximum is outside the window we are searching, then use the last
5402 * location in the search.
5403 * mas->max and mas->min is the range of the gap.
5404 * mas->index and mas->last are currently set to the search range.
5407 /* Trim the upper limit to the max. */
5408 if (mas->max <= mas->last)
5409 mas->last = mas->max;
5411 mas->index = mas->last - size + 1;
5414 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5416 static inline int mas_alloc(struct ma_state *mas, void *entry,
5417 unsigned long size, unsigned long *index)
5422 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5423 mas_root_expand(mas, entry);
5424 if (mas_is_err(mas))
5425 return xa_err(mas->node);
5428 return mte_pivot(mas->node, 0);
5429 return mte_pivot(mas->node, 1);
5432 /* Must be walking a tree. */
5433 mas_awalk(mas, size);
5434 if (mas_is_err(mas))
5435 return xa_err(mas->node);
5437 if (mas->offset == MAPLE_NODE_SLOTS)
5441 * At this point, mas->node points to the right node and we have an
5442 * offset that has a sufficient gap.
5446 min = mte_pivot(mas->node, mas->offset - 1) + 1;
5448 if (mas->index < min)
5451 mas_fill_gap(mas, entry, mas->offset, size, index);
5458 static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
5459 unsigned long max, void *entry,
5460 unsigned long size, unsigned long *index)
5464 ret = mas_empty_area_rev(mas, min, max, size);
5468 if (mas_is_err(mas))
5469 return xa_err(mas->node);
5471 if (mas->offset == MAPLE_NODE_SLOTS)
5474 mas_fill_gap(mas, entry, mas->offset, size, index);
5482 * mas_dead_leaves() - Mark all leaves of a node as dead.
5483 * @mas: The maple state
5484 * @slots: Pointer to the slot array
5485 * @type: The maple node type
5487 * Must hold the write lock.
5489 * Return: The number of leaves marked as dead.
5492 unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots,
5495 struct maple_node *node;
5496 enum maple_type type;
5500 for (offset = 0; offset < mt_slots[mt]; offset++) {
5501 entry = mas_slot_locked(mas, slots, offset);
5502 type = mte_node_type(entry);
5503 node = mte_to_node(entry);
5504 /* Use both node and type to catch LE & BE metadata */
5508 mte_set_node_dead(entry);
5510 rcu_assign_pointer(slots[offset], node);
5516 static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset)
5518 struct maple_node *next;
5519 void __rcu **slots = NULL;
5523 mas->node = mt_mk_node(next, next->type);
5524 slots = ma_slots(next, next->type);
5525 next = mas_slot_locked(mas, slots, offset);
5527 } while (!ma_is_leaf(next->type));
5532 static void mt_free_walk(struct rcu_head *head)
5535 struct maple_node *node, *start;
5536 struct maple_tree mt;
5537 unsigned char offset;
5538 enum maple_type type;
5539 MA_STATE(mas, &mt, 0, 0);
5541 node = container_of(head, struct maple_node, rcu);
5543 if (ma_is_leaf(node->type))
5546 mt_init_flags(&mt, node->ma_flags);
5549 mas.node = mt_mk_node(node, node->type);
5550 slots = mas_dead_walk(&mas, 0);
5551 node = mas_mn(&mas);
5553 mt_free_bulk(node->slot_len, slots);
5554 offset = node->parent_slot + 1;
5555 mas.node = node->piv_parent;
5556 if (mas_mn(&mas) == node)
5557 goto start_slots_free;
5559 type = mte_node_type(mas.node);
5560 slots = ma_slots(mte_to_node(mas.node), type);
5561 if ((offset < mt_slots[type]) && (slots[offset]))
5562 slots = mas_dead_walk(&mas, offset);
5564 node = mas_mn(&mas);
5565 } while ((node != start) || (node->slot_len < offset));
5567 slots = ma_slots(node, node->type);
5568 mt_free_bulk(node->slot_len, slots);
5573 mt_free_rcu(&node->rcu);
5576 static inline void __rcu **mas_destroy_descend(struct ma_state *mas,
5577 struct maple_enode *prev, unsigned char offset)
5579 struct maple_node *node;
5580 struct maple_enode *next = mas->node;
5581 void __rcu **slots = NULL;
5586 slots = ma_slots(node, mte_node_type(mas->node));
5587 next = mas_slot_locked(mas, slots, 0);
5588 if ((mte_dead_node(next))) {
5589 mte_to_node(next)->type = mte_node_type(next);
5590 next = mas_slot_locked(mas, slots, 1);
5593 mte_set_node_dead(mas->node);
5594 node->type = mte_node_type(mas->node);
5595 mas_clear_meta(mas, node, node->type);
5596 node->piv_parent = prev;
5597 node->parent_slot = offset;
5600 } while (!mte_is_leaf(next));
5605 static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
5609 struct maple_node *node = mte_to_node(enode);
5610 struct maple_enode *start;
5611 struct maple_tree mt;
5613 MA_STATE(mas, &mt, 0, 0);
5616 if (mte_is_leaf(enode)) {
5617 node->type = mte_node_type(enode);
5621 ma_flags &= ~MT_FLAGS_LOCK_MASK;
5622 mt_init_flags(&mt, ma_flags);
5625 mte_to_node(enode)->ma_flags = ma_flags;
5627 slots = mas_destroy_descend(&mas, start, 0);
5628 node = mas_mn(&mas);
5630 enum maple_type type;
5631 unsigned char offset;
5632 struct maple_enode *parent, *tmp;
5634 node->type = mte_node_type(mas.node);
5635 node->slot_len = mas_dead_leaves(&mas, slots, node->type);
5637 mt_free_bulk(node->slot_len, slots);
5638 offset = node->parent_slot + 1;
5639 mas.node = node->piv_parent;
5640 if (mas_mn(&mas) == node)
5641 goto start_slots_free;
5643 type = mte_node_type(mas.node);
5644 slots = ma_slots(mte_to_node(mas.node), type);
5645 if (offset >= mt_slots[type])
5648 tmp = mas_slot_locked(&mas, slots, offset);
5649 if (mte_node_type(tmp) && mte_to_node(tmp)) {
5652 slots = mas_destroy_descend(&mas, parent, offset);
5655 node = mas_mn(&mas);
5656 } while (start != mas.node);
5658 node = mas_mn(&mas);
5659 node->type = mte_node_type(mas.node);
5660 node->slot_len = mas_dead_leaves(&mas, slots, node->type);
5662 mt_free_bulk(node->slot_len, slots);
5669 mt_free_rcu(&node->rcu);
5671 mas_clear_meta(&mas, node, node->type);
5675 * mte_destroy_walk() - Free a tree or sub-tree.
5676 * @enode: the encoded maple node (maple_enode) to start
5677 * @mt: the tree to free - needed for node types.
5679 * Must hold the write lock.
5681 static inline void mte_destroy_walk(struct maple_enode *enode,
5682 struct maple_tree *mt)
5684 struct maple_node *node = mte_to_node(enode);
5686 if (mt_in_rcu(mt)) {
5687 mt_destroy_walk(enode, mt->ma_flags, false);
5688 call_rcu(&node->rcu, mt_free_walk);
5690 mt_destroy_walk(enode, mt->ma_flags, true);
5694 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5696 if (unlikely(mas_is_paused(wr_mas->mas)))
5697 mas_reset(wr_mas->mas);
5699 if (!mas_is_start(wr_mas->mas)) {
5700 if (mas_is_none(wr_mas->mas)) {
5701 mas_reset(wr_mas->mas);
5703 wr_mas->r_max = wr_mas->mas->max;
5704 wr_mas->type = mte_node_type(wr_mas->mas->node);
5705 if (mas_is_span_wr(wr_mas))
5706 mas_reset(wr_mas->mas);
5714 * mas_store() - Store an @entry.
5715 * @mas: The maple state.
5716 * @entry: The entry to store.
5718 * The @mas->index and @mas->last is used to set the range for the @entry.
5719 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5720 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5722 * Return: the first entry between mas->index and mas->last or %NULL.
5724 void *mas_store(struct ma_state *mas, void *entry)
5726 MA_WR_STATE(wr_mas, mas, entry);
5728 trace_ma_write(__func__, mas, 0, entry);
5729 #ifdef CONFIG_DEBUG_MAPLE_TREE
5730 if (mas->index > mas->last)
5731 pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry);
5732 MT_BUG_ON(mas->tree, mas->index > mas->last);
5733 if (mas->index > mas->last) {
5734 mas_set_err(mas, -EINVAL);
5741 * Storing is the same operation as insert with the added caveat that it
5742 * can overwrite entries. Although this seems simple enough, one may
5743 * want to examine what happens if a single store operation was to
5744 * overwrite multiple entries within a self-balancing B-Tree.
5746 mas_wr_store_setup(&wr_mas);
5747 mas_wr_store_entry(&wr_mas);
5748 return wr_mas.content;
5750 EXPORT_SYMBOL_GPL(mas_store);
5753 * mas_store_gfp() - Store a value into the tree.
5754 * @mas: The maple state
5755 * @entry: The entry to store
5756 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5758 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5761 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5763 MA_WR_STATE(wr_mas, mas, entry);
5765 mas_wr_store_setup(&wr_mas);
5766 trace_ma_write(__func__, mas, 0, entry);
5768 mas_wr_store_entry(&wr_mas);
5769 if (unlikely(mas_nomem(mas, gfp)))
5772 if (unlikely(mas_is_err(mas)))
5773 return xa_err(mas->node);
5777 EXPORT_SYMBOL_GPL(mas_store_gfp);
5780 * mas_store_prealloc() - Store a value into the tree using memory
5781 * preallocated in the maple state.
5782 * @mas: The maple state
5783 * @entry: The entry to store.
5785 void mas_store_prealloc(struct ma_state *mas, void *entry)
5787 MA_WR_STATE(wr_mas, mas, entry);
5789 mas_wr_store_setup(&wr_mas);
5790 trace_ma_write(__func__, mas, 0, entry);
5791 mas_wr_store_entry(&wr_mas);
5792 BUG_ON(mas_is_err(mas));
5795 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5798 * mas_preallocate() - Preallocate enough nodes for a store operation
5799 * @mas: The maple state
5800 * @gfp: The GFP_FLAGS to use for allocations.
5802 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5804 int mas_preallocate(struct ma_state *mas, gfp_t gfp)
5808 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
5809 mas->mas_flags |= MA_STATE_PREALLOC;
5810 if (likely(!mas_is_err(mas)))
5813 mas_set_alloc_req(mas, 0);
5814 ret = xa_err(mas->node);
5822 * mas_destroy() - destroy a maple state.
5823 * @mas: The maple state
5825 * Upon completion, check the left-most node and rebalance against the node to
5826 * the right if necessary. Frees any allocated nodes associated with this maple
5829 void mas_destroy(struct ma_state *mas)
5831 struct maple_alloc *node;
5832 unsigned long total;
5835 * When using mas_for_each() to insert an expected number of elements,
5836 * it is possible that the number inserted is less than the expected
5837 * number. To fix an invalid final node, a check is performed here to
5838 * rebalance the previous node with the final node.
5840 if (mas->mas_flags & MA_STATE_REBALANCE) {
5843 if (mas_is_start(mas))
5846 mtree_range_walk(mas);
5847 end = mas_data_end(mas) + 1;
5848 if (end < mt_min_slot_count(mas->node) - 1)
5849 mas_destroy_rebalance(mas, end);
5851 mas->mas_flags &= ~MA_STATE_REBALANCE;
5853 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5855 total = mas_allocated(mas);
5858 mas->alloc = node->slot[0];
5859 if (node->node_count > 1) {
5860 size_t count = node->node_count - 1;
5862 mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5865 kmem_cache_free(maple_node_cache, node);
5871 EXPORT_SYMBOL_GPL(mas_destroy);
5874 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5875 * @mas: The maple state
5876 * @nr_entries: The number of expected entries.
5878 * This will attempt to pre-allocate enough nodes to store the expected number
5879 * of entries. The allocations will occur using the bulk allocator interface
5880 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5881 * to ensure any unused nodes are freed.
5883 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5885 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5887 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5888 struct maple_enode *enode = mas->node;
5893 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5894 * forking a process and duplicating the VMAs from one tree to a new
5895 * tree. When such a situation arises, it is known that the new tree is
5896 * not going to be used until the entire tree is populated. For
5897 * performance reasons, it is best to use a bulk load with RCU disabled.
5898 * This allows for optimistic splitting that favours the left and reuse
5899 * of nodes during the operation.
5902 /* Optimize splitting for bulk insert in-order */
5903 mas->mas_flags |= MA_STATE_BULK;
5906 * Avoid overflow, assume a gap between each entry and a trailing null.
5907 * If this is wrong, it just means allocation can happen during
5908 * insertion of entries.
5910 nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5911 if (!mt_is_alloc(mas->tree))
5912 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5914 /* Leaves; reduce slots to keep space for expansion */
5915 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5916 /* Internal nodes */
5917 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5918 /* Add working room for split (2 nodes) + new parents */
5919 mas_node_count(mas, nr_nodes + 3);
5921 /* Detect if allocations run out */
5922 mas->mas_flags |= MA_STATE_PREALLOC;
5924 if (!mas_is_err(mas))
5927 ret = xa_err(mas->node);
5933 EXPORT_SYMBOL_GPL(mas_expected_entries);
5936 * mas_next() - Get the next entry.
5937 * @mas: The maple state
5938 * @max: The maximum index to check.
5940 * Returns the next entry after @mas->index.
5941 * Must hold rcu_read_lock or the write lock.
5942 * Can return the zero entry.
5944 * Return: The next entry or %NULL
5946 void *mas_next(struct ma_state *mas, unsigned long max)
5948 if (mas_is_none(mas) || mas_is_paused(mas))
5949 mas->node = MAS_START;
5951 if (mas_is_start(mas))
5952 mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5954 if (mas_is_ptr(mas)) {
5957 mas->last = ULONG_MAX;
5962 if (mas->last == ULONG_MAX)
5965 /* Retries on dead nodes handled by mas_next_entry */
5966 return mas_next_entry(mas, max);
5968 EXPORT_SYMBOL_GPL(mas_next);
5971 * mt_next() - get the next value in the maple tree
5972 * @mt: The maple tree
5973 * @index: The start index
5974 * @max: The maximum index to check
5976 * Return: The entry at @index or higher, or %NULL if nothing is found.
5978 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5981 MA_STATE(mas, mt, index, index);
5984 entry = mas_next(&mas, max);
5988 EXPORT_SYMBOL_GPL(mt_next);
5991 * mas_prev() - Get the previous entry
5992 * @mas: The maple state
5993 * @min: The minimum value to check.
5995 * Must hold rcu_read_lock or the write lock.
5996 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5999 * Return: the previous value or %NULL.
6001 void *mas_prev(struct ma_state *mas, unsigned long min)
6004 /* Nothing comes before 0 */
6006 mas->node = MAS_NONE;
6010 if (unlikely(mas_is_ptr(mas)))
6013 if (mas_is_none(mas) || mas_is_paused(mas))
6014 mas->node = MAS_START;
6016 if (mas_is_start(mas)) {
6022 if (mas_is_ptr(mas)) {
6028 mas->index = mas->last = 0;
6029 return mas_root_locked(mas);
6031 return mas_prev_entry(mas, min);
6033 EXPORT_SYMBOL_GPL(mas_prev);
6036 * mt_prev() - get the previous value in the maple tree
6037 * @mt: The maple tree
6038 * @index: The start index
6039 * @min: The minimum index to check
6041 * Return: The entry at @index or lower, or %NULL if nothing is found.
6043 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
6046 MA_STATE(mas, mt, index, index);
6049 entry = mas_prev(&mas, min);
6053 EXPORT_SYMBOL_GPL(mt_prev);
6056 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
6057 * @mas: The maple state to pause
6059 * Some users need to pause a walk and drop the lock they're holding in
6060 * order to yield to a higher priority thread or carry out an operation
6061 * on an entry. Those users should call this function before they drop
6062 * the lock. It resets the @mas to be suitable for the next iteration
6063 * of the loop after the user has reacquired the lock. If most entries
6064 * found during a walk require you to call mas_pause(), the mt_for_each()
6065 * iterator may be more appropriate.
6068 void mas_pause(struct ma_state *mas)
6070 mas->node = MAS_PAUSE;
6072 EXPORT_SYMBOL_GPL(mas_pause);
6075 * mas_find() - On the first call, find the entry at or after mas->index up to
6076 * %max. Otherwise, find the entry after mas->index.
6077 * @mas: The maple state
6078 * @max: The maximum value to check.
6080 * Must hold rcu_read_lock or the write lock.
6081 * If an entry exists, last and index are updated accordingly.
6082 * May set @mas->node to MAS_NONE.
6084 * Return: The entry or %NULL.
6086 void *mas_find(struct ma_state *mas, unsigned long max)
6088 if (unlikely(mas_is_paused(mas))) {
6089 if (unlikely(mas->last == ULONG_MAX)) {
6090 mas->node = MAS_NONE;
6093 mas->node = MAS_START;
6094 mas->index = ++mas->last;
6097 if (unlikely(mas_is_none(mas)))
6098 mas->node = MAS_START;
6100 if (unlikely(mas_is_start(mas))) {
6101 /* First run or continue */
6104 if (mas->index > max)
6107 entry = mas_walk(mas);
6112 if (unlikely(!mas_searchable(mas)))
6115 /* Retries on dead nodes handled by mas_next_entry */
6116 return mas_next_entry(mas, max);
6118 EXPORT_SYMBOL_GPL(mas_find);
6121 * mas_find_rev: On the first call, find the first non-null entry at or below
6122 * mas->index down to %min. Otherwise find the first non-null entry below
6123 * mas->index down to %min.
6124 * @mas: The maple state
6125 * @min: The minimum value to check.
6127 * Must hold rcu_read_lock or the write lock.
6128 * If an entry exists, last and index are updated accordingly.
6129 * May set @mas->node to MAS_NONE.
6131 * Return: The entry or %NULL.
6133 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6135 if (unlikely(mas_is_paused(mas))) {
6136 if (unlikely(mas->last == ULONG_MAX)) {
6137 mas->node = MAS_NONE;
6140 mas->node = MAS_START;
6141 mas->last = --mas->index;
6144 if (unlikely(mas_is_start(mas))) {
6145 /* First run or continue */
6148 if (mas->index < min)
6151 entry = mas_walk(mas);
6156 if (unlikely(!mas_searchable(mas)))
6159 if (mas->index < min)
6162 /* Retries on dead nodes handled by mas_prev_entry */
6163 return mas_prev_entry(mas, min);
6165 EXPORT_SYMBOL_GPL(mas_find_rev);
6168 * mas_erase() - Find the range in which index resides and erase the entire
6170 * @mas: The maple state
6172 * Must hold the write lock.
6173 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6174 * erases that range.
6176 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6178 void *mas_erase(struct ma_state *mas)
6181 MA_WR_STATE(wr_mas, mas, NULL);
6183 if (mas_is_none(mas) || mas_is_paused(mas))
6184 mas->node = MAS_START;
6186 /* Retry unnecessary when holding the write lock. */
6187 entry = mas_state_walk(mas);
6192 /* Must reset to ensure spanning writes of last slot are detected */
6194 mas_wr_store_setup(&wr_mas);
6195 mas_wr_store_entry(&wr_mas);
6196 if (mas_nomem(mas, GFP_KERNEL))
6201 EXPORT_SYMBOL_GPL(mas_erase);
6204 * mas_nomem() - Check if there was an error allocating and do the allocation
6205 * if necessary If there are allocations, then free them.
6206 * @mas: The maple state
6207 * @gfp: The GFP_FLAGS to use for allocations
6208 * Return: true on allocation, false otherwise.
6210 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6211 __must_hold(mas->tree->lock)
6213 if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6218 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6219 mtree_unlock(mas->tree);
6220 mas_alloc_nodes(mas, gfp);
6221 mtree_lock(mas->tree);
6223 mas_alloc_nodes(mas, gfp);
6226 if (!mas_allocated(mas))
6229 mas->node = MAS_START;
6233 void __init maple_tree_init(void)
6235 maple_node_cache = kmem_cache_create("maple_node",
6236 sizeof(struct maple_node), sizeof(struct maple_node),
6241 * mtree_load() - Load a value stored in a maple tree
6242 * @mt: The maple tree
6243 * @index: The index to load
6245 * Return: the entry or %NULL
6247 void *mtree_load(struct maple_tree *mt, unsigned long index)
6249 MA_STATE(mas, mt, index, index);
6252 trace_ma_read(__func__, &mas);
6255 entry = mas_start(&mas);
6256 if (unlikely(mas_is_none(&mas)))
6259 if (unlikely(mas_is_ptr(&mas))) {
6266 entry = mtree_lookup_walk(&mas);
6267 if (!entry && unlikely(mas_is_start(&mas)))
6271 if (xa_is_zero(entry))
6276 EXPORT_SYMBOL(mtree_load);
6279 * mtree_store_range() - Store an entry at a given range.
6280 * @mt: The maple tree
6281 * @index: The start of the range
6282 * @last: The end of the range
6283 * @entry: The entry to store
6284 * @gfp: The GFP_FLAGS to use for allocations
6286 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6289 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6290 unsigned long last, void *entry, gfp_t gfp)
6292 MA_STATE(mas, mt, index, last);
6293 MA_WR_STATE(wr_mas, &mas, entry);
6295 trace_ma_write(__func__, &mas, 0, entry);
6296 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6304 mas_wr_store_entry(&wr_mas);
6305 if (mas_nomem(&mas, gfp))
6309 if (mas_is_err(&mas))
6310 return xa_err(mas.node);
6314 EXPORT_SYMBOL(mtree_store_range);
6317 * mtree_store() - Store an entry at a given index.
6318 * @mt: The maple tree
6319 * @index: The index to store the value
6320 * @entry: The entry to store
6321 * @gfp: The GFP_FLAGS to use for allocations
6323 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6326 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6329 return mtree_store_range(mt, index, index, entry, gfp);
6331 EXPORT_SYMBOL(mtree_store);
6334 * mtree_insert_range() - Insert an entry at a give range if there is no value.
6335 * @mt: The maple tree
6336 * @first: The start of the range
6337 * @last: The end of the range
6338 * @entry: The entry to store
6339 * @gfp: The GFP_FLAGS to use for allocations.
6341 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6342 * request, -ENOMEM if memory could not be allocated.
6344 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6345 unsigned long last, void *entry, gfp_t gfp)
6347 MA_STATE(ms, mt, first, last);
6349 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6357 mas_insert(&ms, entry);
6358 if (mas_nomem(&ms, gfp))
6362 if (mas_is_err(&ms))
6363 return xa_err(ms.node);
6367 EXPORT_SYMBOL(mtree_insert_range);
6370 * mtree_insert() - Insert an entry at a give index if there is no value.
6371 * @mt: The maple tree
6372 * @index : The index to store the value
6373 * @entry: The entry to store
6374 * @gfp: The FGP_FLAGS to use for allocations.
6376 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6377 * request, -ENOMEM if memory could not be allocated.
6379 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6382 return mtree_insert_range(mt, index, index, entry, gfp);
6384 EXPORT_SYMBOL(mtree_insert);
6386 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6387 void *entry, unsigned long size, unsigned long min,
6388 unsigned long max, gfp_t gfp)
6392 MA_STATE(mas, mt, min, max - size);
6393 if (!mt_is_alloc(mt))
6396 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6412 mas.last = max - size;
6413 ret = mas_alloc(&mas, entry, size, startp);
6414 if (mas_nomem(&mas, gfp))
6420 EXPORT_SYMBOL(mtree_alloc_range);
6422 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6423 void *entry, unsigned long size, unsigned long min,
6424 unsigned long max, gfp_t gfp)
6428 MA_STATE(mas, mt, min, max - size);
6429 if (!mt_is_alloc(mt))
6432 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6446 ret = mas_rev_alloc(&mas, min, max, entry, size, startp);
6447 if (mas_nomem(&mas, gfp))
6453 EXPORT_SYMBOL(mtree_alloc_rrange);
6456 * mtree_erase() - Find an index and erase the entire range.
6457 * @mt: The maple tree
6458 * @index: The index to erase
6460 * Erasing is the same as a walk to an entry then a store of a NULL to that
6461 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6463 * Return: The entry stored at the @index or %NULL
6465 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6469 MA_STATE(mas, mt, index, index);
6470 trace_ma_op(__func__, &mas);
6473 entry = mas_erase(&mas);
6478 EXPORT_SYMBOL(mtree_erase);
6481 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6482 * @mt: The maple tree
6484 * Note: Does not handle locking.
6486 void __mt_destroy(struct maple_tree *mt)
6488 void *root = mt_root_locked(mt);
6490 rcu_assign_pointer(mt->ma_root, NULL);
6491 if (xa_is_node(root))
6492 mte_destroy_walk(root, mt);
6496 EXPORT_SYMBOL_GPL(__mt_destroy);
6499 * mtree_destroy() - Destroy a maple tree
6500 * @mt: The maple tree
6502 * Frees all resources used by the tree. Handles locking.
6504 void mtree_destroy(struct maple_tree *mt)
6510 EXPORT_SYMBOL(mtree_destroy);
6513 * mt_find() - Search from the start up until an entry is found.
6514 * @mt: The maple tree
6515 * @index: Pointer which contains the start location of the search
6516 * @max: The maximum value to check
6518 * Handles locking. @index will be incremented to one beyond the range.
6520 * Return: The entry at or after the @index or %NULL
6522 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6524 MA_STATE(mas, mt, *index, *index);
6526 #ifdef CONFIG_DEBUG_MAPLE_TREE
6527 unsigned long copy = *index;
6530 trace_ma_read(__func__, &mas);
6537 entry = mas_state_walk(&mas);
6538 if (mas_is_start(&mas))
6541 if (unlikely(xa_is_zero(entry)))
6547 while (mas_searchable(&mas) && (mas.index < max)) {
6548 entry = mas_next_entry(&mas, max);
6549 if (likely(entry && !xa_is_zero(entry)))
6553 if (unlikely(xa_is_zero(entry)))
6557 if (likely(entry)) {
6558 *index = mas.last + 1;
6559 #ifdef CONFIG_DEBUG_MAPLE_TREE
6560 if ((*index) && (*index) <= copy)
6561 pr_err("index not increased! %lx <= %lx\n",
6563 MT_BUG_ON(mt, (*index) && ((*index) <= copy));
6569 EXPORT_SYMBOL(mt_find);
6572 * mt_find_after() - Search from the start up until an entry is found.
6573 * @mt: The maple tree
6574 * @index: Pointer which contains the start location of the search
6575 * @max: The maximum value to check
6577 * Handles locking, detects wrapping on index == 0
6579 * Return: The entry at or after the @index or %NULL
6581 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6587 return mt_find(mt, index, max);
6589 EXPORT_SYMBOL(mt_find_after);
6591 #ifdef CONFIG_DEBUG_MAPLE_TREE
6592 atomic_t maple_tree_tests_run;
6593 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6594 atomic_t maple_tree_tests_passed;
6595 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6598 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6599 void mt_set_non_kernel(unsigned int val)
6601 kmem_cache_set_non_kernel(maple_node_cache, val);
6604 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
6605 unsigned long mt_get_alloc_size(void)
6607 return kmem_cache_get_alloc(maple_node_cache);
6610 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
6611 void mt_zero_nr_tallocated(void)
6613 kmem_cache_zero_nr_tallocated(maple_node_cache);
6616 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
6617 unsigned int mt_nr_tallocated(void)
6619 return kmem_cache_nr_tallocated(maple_node_cache);
6622 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
6623 unsigned int mt_nr_allocated(void)
6625 return kmem_cache_nr_allocated(maple_node_cache);
6629 * mas_dead_node() - Check if the maple state is pointing to a dead node.
6630 * @mas: The maple state
6631 * @index: The index to restore in @mas.
6633 * Used in test code.
6634 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6636 static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
6638 if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
6641 if (likely(!mte_dead_node(mas->node)))
6644 mas_rewalk(mas, index);
6648 void mt_cache_shrink(void)
6653 * mt_cache_shrink() - For testing, don't use this.
6655 * Certain testcases can trigger an OOM when combined with other memory
6656 * debugging configuration options. This function is used to reduce the
6657 * possibility of an out of memory even due to kmem_cache objects remaining
6658 * around for longer than usual.
6660 void mt_cache_shrink(void)
6662 kmem_cache_shrink(maple_node_cache);
6665 EXPORT_SYMBOL_GPL(mt_cache_shrink);
6667 #endif /* not defined __KERNEL__ */
6669 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6670 * @mas: The maple state
6671 * @offset: The offset into the slot array to fetch.
6673 * Return: The entry stored at @offset.
6675 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6676 unsigned char offset)
6678 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6684 * mas_first_entry() - Go the first leaf and find the first entry.
6685 * @mas: the maple state.
6686 * @limit: the maximum index to check.
6687 * @*r_start: Pointer to set to the range start.
6689 * Sets mas->offset to the offset of the entry, r_start to the range minimum.
6691 * Return: The first entry or MAS_NONE.
6693 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
6694 unsigned long limit, enum maple_type mt)
6698 unsigned long *pivots;
6702 mas->index = mas->min;
6703 if (mas->index > limit)
6708 while (likely(!ma_is_leaf(mt))) {
6709 MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6710 slots = ma_slots(mn, mt);
6711 entry = mas_slot(mas, slots, 0);
6712 pivots = ma_pivots(mn, mt);
6713 if (unlikely(ma_dead_node(mn)))
6718 mt = mte_node_type(mas->node);
6720 MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6723 slots = ma_slots(mn, mt);
6724 entry = mas_slot(mas, slots, 0);
6725 if (unlikely(ma_dead_node(mn)))
6728 /* Slot 0 or 1 must be set */
6729 if (mas->index > limit)
6736 entry = mas_slot(mas, slots, 1);
6737 pivots = ma_pivots(mn, mt);
6738 if (unlikely(ma_dead_node(mn)))
6741 mas->index = pivots[0] + 1;
6742 if (mas->index > limit)
6749 if (likely(!ma_dead_node(mn)))
6750 mas->node = MAS_NONE;
6754 /* Depth first search, post-order */
6755 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6758 struct maple_enode *p = MAS_NONE, *mn = mas->node;
6759 unsigned long p_min, p_max;
6761 mas_next_node(mas, mas_mn(mas), max);
6762 if (!mas_is_none(mas))
6765 if (mte_is_root(mn))
6770 while (mas->node != MAS_NONE) {
6774 mas_prev_node(mas, 0);
6785 /* Tree validations */
6786 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6787 unsigned long min, unsigned long max, unsigned int depth);
6788 static void mt_dump_range(unsigned long min, unsigned long max,
6791 static const char spaces[] = " ";
6794 pr_info("%.*s%lu: ", depth * 2, spaces, min);
6796 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
6799 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
6802 mt_dump_range(min, max, depth);
6804 if (xa_is_value(entry))
6805 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
6806 xa_to_value(entry), entry);
6807 else if (xa_is_zero(entry))
6808 pr_cont("zero (%ld)\n", xa_to_internal(entry));
6809 else if (mt_is_reserved(entry))
6810 pr_cont("UNKNOWN ENTRY (%p)\n", entry);
6812 pr_cont("%p\n", entry);
6815 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
6816 unsigned long min, unsigned long max, unsigned int depth)
6818 struct maple_range_64 *node = &mte_to_node(entry)->mr64;
6819 bool leaf = mte_is_leaf(entry);
6820 unsigned long first = min;
6823 pr_cont(" contents: ");
6824 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++)
6825 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6826 pr_cont("%p\n", node->slot[i]);
6827 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
6828 unsigned long last = max;
6830 if (i < (MAPLE_RANGE64_SLOTS - 1))
6831 last = node->pivot[i];
6832 else if (!node->slot[i] && max != mt_node_max(entry))
6834 if (last == 0 && i > 0)
6837 mt_dump_entry(mt_slot(mt, node->slot, i),
6838 first, last, depth + 1);
6839 else if (node->slot[i])
6840 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6841 first, last, depth + 1);
6846 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6847 node, last, max, i);
6854 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
6855 unsigned long min, unsigned long max, unsigned int depth)
6857 struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
6858 bool leaf = mte_is_leaf(entry);
6859 unsigned long first = min;
6862 pr_cont(" contents: ");
6863 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
6864 pr_cont("%lu ", node->gap[i]);
6865 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
6866 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
6867 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6868 pr_cont("%p\n", node->slot[i]);
6869 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
6870 unsigned long last = max;
6872 if (i < (MAPLE_ARANGE64_SLOTS - 1))
6873 last = node->pivot[i];
6874 else if (!node->slot[i])
6876 if (last == 0 && i > 0)
6879 mt_dump_entry(mt_slot(mt, node->slot, i),
6880 first, last, depth + 1);
6881 else if (node->slot[i])
6882 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6883 first, last, depth + 1);
6888 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6889 node, last, max, i);
6896 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6897 unsigned long min, unsigned long max, unsigned int depth)
6899 struct maple_node *node = mte_to_node(entry);
6900 unsigned int type = mte_node_type(entry);
6903 mt_dump_range(min, max, depth);
6905 pr_cont("node %p depth %d type %d parent %p", node, depth, type,
6906 node ? node->parent : NULL);
6910 for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
6912 pr_cont("OUT OF RANGE: ");
6913 mt_dump_entry(mt_slot(mt, node->slot, i),
6914 min + i, min + i, depth);
6918 case maple_range_64:
6919 mt_dump_range64(mt, entry, min, max, depth);
6921 case maple_arange_64:
6922 mt_dump_arange64(mt, entry, min, max, depth);
6926 pr_cont(" UNKNOWN TYPE\n");
6930 void mt_dump(const struct maple_tree *mt)
6932 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
6934 pr_info("maple_tree(%p) flags %X, height %u root %p\n",
6935 mt, mt->ma_flags, mt_height(mt), entry);
6936 if (!xa_is_node(entry))
6937 mt_dump_entry(entry, 0, 0, 0);
6939 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0);
6941 EXPORT_SYMBOL_GPL(mt_dump);
6944 * Calculate the maximum gap in a node and check if that's what is reported in
6945 * the parent (unless root).
6947 static void mas_validate_gaps(struct ma_state *mas)
6949 struct maple_enode *mte = mas->node;
6950 struct maple_node *p_mn;
6951 unsigned long gap = 0, max_gap = 0;
6952 unsigned long p_end, p_start = mas->min;
6953 unsigned char p_slot;
6954 unsigned long *gaps = NULL;
6955 unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
6958 if (ma_is_dense(mte_node_type(mte))) {
6959 for (i = 0; i < mt_slot_count(mte); i++) {
6960 if (mas_get_slot(mas, i)) {
6971 gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
6972 for (i = 0; i < mt_slot_count(mte); i++) {
6973 p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
6976 if (mas_get_slot(mas, i)) {
6981 gap += p_end - p_start + 1;
6983 void *entry = mas_get_slot(mas, i);
6987 if (gap != p_end - p_start + 1) {
6988 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
6990 mas_get_slot(mas, i), gap,
6994 MT_BUG_ON(mas->tree,
6995 gap != p_end - p_start + 1);
6998 if (gap > p_end - p_start + 1) {
6999 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
7000 mas_mn(mas), i, gap, p_end, p_start,
7001 p_end - p_start + 1);
7002 MT_BUG_ON(mas->tree,
7003 gap > p_end - p_start + 1);
7011 p_start = p_end + 1;
7012 if (p_end >= mas->max)
7017 if (mte_is_root(mte))
7020 p_slot = mte_parent_slot(mas->node);
7021 p_mn = mte_parent(mte);
7022 MT_BUG_ON(mas->tree, max_gap > mas->max);
7023 if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) {
7024 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
7028 MT_BUG_ON(mas->tree,
7029 ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap);
7032 static void mas_validate_parent_slot(struct ma_state *mas)
7034 struct maple_node *parent;
7035 struct maple_enode *node;
7036 enum maple_type p_type = mas_parent_enum(mas, mas->node);
7037 unsigned char p_slot = mte_parent_slot(mas->node);
7041 if (mte_is_root(mas->node))
7044 parent = mte_parent(mas->node);
7045 slots = ma_slots(parent, p_type);
7046 MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7048 /* Check prev/next parent slot for duplicate node entry */
7050 for (i = 0; i < mt_slots[p_type]; i++) {
7051 node = mas_slot(mas, slots, i);
7053 if (node != mas->node)
7054 pr_err("parent %p[%u] does not have %p\n",
7055 parent, i, mas_mn(mas));
7056 MT_BUG_ON(mas->tree, node != mas->node);
7057 } else if (node == mas->node) {
7058 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7059 mas_mn(mas), parent, i, p_slot);
7060 MT_BUG_ON(mas->tree, node == mas->node);
7065 static void mas_validate_child_slot(struct ma_state *mas)
7067 enum maple_type type = mte_node_type(mas->node);
7068 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7069 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7070 struct maple_enode *child;
7073 if (mte_is_leaf(mas->node))
7076 for (i = 0; i < mt_slots[type]; i++) {
7077 child = mas_slot(mas, slots, i);
7078 if (!pivots[i] || pivots[i] == mas->max)
7084 if (mte_parent_slot(child) != i) {
7085 pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7086 mas_mn(mas), i, mte_to_node(child),
7087 mte_parent_slot(child));
7088 MT_BUG_ON(mas->tree, 1);
7091 if (mte_parent(child) != mte_to_node(mas->node)) {
7092 pr_err("child %p has parent %p not %p\n",
7093 mte_to_node(child), mte_parent(child),
7094 mte_to_node(mas->node));
7095 MT_BUG_ON(mas->tree, 1);
7101 * Validate all pivots are within mas->min and mas->max.
7103 static void mas_validate_limits(struct ma_state *mas)
7106 unsigned long prev_piv = 0;
7107 enum maple_type type = mte_node_type(mas->node);
7108 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7109 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7111 /* all limits are fine here. */
7112 if (mte_is_root(mas->node))
7115 for (i = 0; i < mt_slots[type]; i++) {
7118 piv = mas_safe_pivot(mas, pivots, i, type);
7120 if (!piv && (i != 0))
7123 if (!mte_is_leaf(mas->node)) {
7124 void *entry = mas_slot(mas, slots, i);
7127 pr_err("%p[%u] cannot be null\n",
7130 MT_BUG_ON(mas->tree, !entry);
7133 if (prev_piv > piv) {
7134 pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7135 mas_mn(mas), i, piv, prev_piv);
7136 MT_BUG_ON(mas->tree, piv < prev_piv);
7139 if (piv < mas->min) {
7140 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7142 MT_BUG_ON(mas->tree, piv < mas->min);
7144 if (piv > mas->max) {
7145 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7147 MT_BUG_ON(mas->tree, piv > mas->max);
7150 if (piv == mas->max)
7153 for (i += 1; i < mt_slots[type]; i++) {
7154 void *entry = mas_slot(mas, slots, i);
7156 if (entry && (i != mt_slots[type] - 1)) {
7157 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7159 MT_BUG_ON(mas->tree, entry != NULL);
7162 if (i < mt_pivots[type]) {
7163 unsigned long piv = pivots[i];
7168 pr_err("%p[%u] should not have piv %lu\n",
7169 mas_mn(mas), i, piv);
7170 MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1);
7175 static void mt_validate_nulls(struct maple_tree *mt)
7177 void *entry, *last = (void *)1;
7178 unsigned char offset = 0;
7180 MA_STATE(mas, mt, 0, 0);
7183 if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7186 while (!mte_is_leaf(mas.node))
7189 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7191 entry = mas_slot(&mas, slots, offset);
7192 if (!last && !entry) {
7193 pr_err("Sequential nulls end at %p[%u]\n",
7194 mas_mn(&mas), offset);
7196 MT_BUG_ON(mt, !last && !entry);
7198 if (offset == mas_data_end(&mas)) {
7199 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7200 if (mas_is_none(&mas))
7203 slots = ma_slots(mte_to_node(mas.node),
7204 mte_node_type(mas.node));
7209 } while (!mas_is_none(&mas));
7213 * validate a maple tree by checking:
7214 * 1. The limits (pivots are within mas->min to mas->max)
7215 * 2. The gap is correctly set in the parents
7217 void mt_validate(struct maple_tree *mt)
7221 MA_STATE(mas, mt, 0, 0);
7224 if (!mas_searchable(&mas))
7227 mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
7228 while (!mas_is_none(&mas)) {
7229 MT_BUG_ON(mas.tree, mte_dead_node(mas.node));
7230 if (!mte_is_root(mas.node)) {
7231 end = mas_data_end(&mas);
7232 if ((end < mt_min_slot_count(mas.node)) &&
7233 (mas.max != ULONG_MAX)) {
7234 pr_err("Invalid size %u of %p\n", end,
7236 MT_BUG_ON(mas.tree, 1);
7240 mas_validate_parent_slot(&mas);
7241 mas_validate_child_slot(&mas);
7242 mas_validate_limits(&mas);
7243 if (mt_is_alloc(mt))
7244 mas_validate_gaps(&mas);
7245 mas_dfs_postorder(&mas, ULONG_MAX);
7247 mt_validate_nulls(mt);
7252 EXPORT_SYMBOL_GPL(mt_validate);
7254 #endif /* CONFIG_DEBUG_MAPLE_TREE */