Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2001 Momchil Velikov | |
3 | * Portions Copyright (C) 2001 Christoph Hellwig | |
cde53535 | 4 | * Copyright (C) 2005 SGI, Christoph Lameter |
7cf9c2c7 | 5 | * Copyright (C) 2006 Nick Piggin |
78c1d784 | 6 | * Copyright (C) 2012 Konstantin Khlebnikov |
6b053b8e MW |
7 | * Copyright (C) 2016 Intel, Matthew Wilcox |
8 | * Copyright (C) 2016 Intel, Ross Zwisler | |
1da177e4 LT |
9 | * |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License as | |
12 | * published by the Free Software Foundation; either version 2, or (at | |
13 | * your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | */ | |
24 | ||
0a835c4f MW |
25 | #include <linux/bitmap.h> |
26 | #include <linux/bitops.h> | |
460488c5 | 27 | #include <linux/bug.h> |
e157b555 | 28 | #include <linux/cpu.h> |
1da177e4 | 29 | #include <linux/errno.h> |
0a835c4f MW |
30 | #include <linux/export.h> |
31 | #include <linux/idr.h> | |
1da177e4 LT |
32 | #include <linux/init.h> |
33 | #include <linux/kernel.h> | |
0a835c4f | 34 | #include <linux/kmemleak.h> |
1da177e4 | 35 | #include <linux/percpu.h> |
0a835c4f MW |
36 | #include <linux/preempt.h> /* in_interrupt() */ |
37 | #include <linux/radix-tree.h> | |
38 | #include <linux/rcupdate.h> | |
1da177e4 | 39 | #include <linux/slab.h> |
1da177e4 | 40 | #include <linux/string.h> |
1da177e4 LT |
41 | |
42 | ||
c78c66d1 KS |
43 | /* Number of nodes in fully populated tree of given height */ |
44 | static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; | |
45 | ||
1da177e4 LT |
46 | /* |
47 | * Radix tree node cache. | |
48 | */ | |
e18b890b | 49 | static struct kmem_cache *radix_tree_node_cachep; |
1da177e4 | 50 | |
55368052 NP |
51 | /* |
52 | * The radix tree is variable-height, so an insert operation not only has | |
53 | * to build the branch to its corresponding item, it also has to build the | |
54 | * branch to existing items if the size has to be increased (by | |
55 | * radix_tree_extend). | |
56 | * | |
57 | * The worst case is a zero height tree with just a single item at index 0, | |
58 | * and then inserting an item at index ULONG_MAX. This requires 2 new branches | |
59 | * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. | |
60 | * Hence: | |
61 | */ | |
62 | #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) | |
63 | ||
0a835c4f MW |
64 | /* |
65 | * The IDR does not have to be as high as the radix tree since it uses | |
66 | * signed integers, not unsigned longs. | |
67 | */ | |
68 | #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1) | |
69 | #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \ | |
70 | RADIX_TREE_MAP_SHIFT)) | |
71 | #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1) | |
72 | ||
7ad3d4d8 MW |
73 | /* |
74 | * The IDA is even shorter since it uses a bitmap at the last level. | |
75 | */ | |
76 | #define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS)) | |
77 | #define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \ | |
78 | RADIX_TREE_MAP_SHIFT)) | |
79 | #define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1) | |
80 | ||
1da177e4 LT |
81 | /* |
82 | * Per-cpu pool of preloaded nodes | |
83 | */ | |
84 | struct radix_tree_preload { | |
2fcd9005 | 85 | unsigned nr; |
1293d5c5 | 86 | /* nodes->parent points to next preallocated node */ |
9d2a8da0 | 87 | struct radix_tree_node *nodes; |
1da177e4 | 88 | }; |
8cef7d57 | 89 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
1da177e4 | 90 | |
148deab2 MW |
91 | static inline struct radix_tree_node *entry_to_node(void *ptr) |
92 | { | |
93 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); | |
94 | } | |
95 | ||
a4db4dce | 96 | static inline void *node_to_entry(void *ptr) |
27d20fdd | 97 | { |
30ff46cc | 98 | return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); |
27d20fdd NP |
99 | } |
100 | ||
a4db4dce | 101 | #define RADIX_TREE_RETRY node_to_entry(NULL) |
afe0e395 | 102 | |
db050f29 MW |
103 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
104 | /* Sibling slots point directly to another slot in the same node */ | |
35534c86 MW |
105 | static inline |
106 | bool is_sibling_entry(const struct radix_tree_node *parent, void *node) | |
db050f29 | 107 | { |
d7b62727 | 108 | void __rcu **ptr = node; |
db050f29 MW |
109 | return (parent->slots <= ptr) && |
110 | (ptr < parent->slots + RADIX_TREE_MAP_SIZE); | |
111 | } | |
112 | #else | |
35534c86 MW |
113 | static inline |
114 | bool is_sibling_entry(const struct radix_tree_node *parent, void *node) | |
db050f29 MW |
115 | { |
116 | return false; | |
117 | } | |
118 | #endif | |
119 | ||
d7b62727 MW |
120 | static inline unsigned long |
121 | get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot) | |
db050f29 MW |
122 | { |
123 | return slot - parent->slots; | |
124 | } | |
125 | ||
35534c86 | 126 | static unsigned int radix_tree_descend(const struct radix_tree_node *parent, |
9e85d811 | 127 | struct radix_tree_node **nodep, unsigned long index) |
db050f29 | 128 | { |
9e85d811 | 129 | unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; |
d7b62727 | 130 | void __rcu **entry = rcu_dereference_raw(parent->slots[offset]); |
db050f29 MW |
131 | |
132 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
b194d16c | 133 | if (radix_tree_is_internal_node(entry)) { |
8d2c0d36 | 134 | if (is_sibling_entry(parent, entry)) { |
d7b62727 MW |
135 | void __rcu **sibentry; |
136 | sibentry = (void __rcu **) entry_to_node(entry); | |
8d2c0d36 LT |
137 | offset = get_slot_offset(parent, sibentry); |
138 | entry = rcu_dereference_raw(*sibentry); | |
db050f29 MW |
139 | } |
140 | } | |
141 | #endif | |
142 | ||
143 | *nodep = (void *)entry; | |
144 | return offset; | |
145 | } | |
146 | ||
35534c86 | 147 | static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) |
612d6c19 NP |
148 | { |
149 | return root->gfp_mask & __GFP_BITS_MASK; | |
150 | } | |
151 | ||
643b52b9 NP |
152 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, |
153 | int offset) | |
154 | { | |
155 | __set_bit(offset, node->tags[tag]); | |
156 | } | |
157 | ||
158 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, | |
159 | int offset) | |
160 | { | |
161 | __clear_bit(offset, node->tags[tag]); | |
162 | } | |
163 | ||
35534c86 | 164 | static inline int tag_get(const struct radix_tree_node *node, unsigned int tag, |
643b52b9 NP |
165 | int offset) |
166 | { | |
167 | return test_bit(offset, node->tags[tag]); | |
168 | } | |
169 | ||
35534c86 | 170 | static inline void root_tag_set(struct radix_tree_root *root, unsigned tag) |
643b52b9 | 171 | { |
0a835c4f | 172 | root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); |
643b52b9 NP |
173 | } |
174 | ||
2fcd9005 | 175 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) |
643b52b9 | 176 | { |
0a835c4f | 177 | root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); |
643b52b9 NP |
178 | } |
179 | ||
180 | static inline void root_tag_clear_all(struct radix_tree_root *root) | |
181 | { | |
0a835c4f | 182 | root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1; |
643b52b9 NP |
183 | } |
184 | ||
35534c86 | 185 | static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) |
643b52b9 | 186 | { |
0a835c4f | 187 | return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT)); |
643b52b9 NP |
188 | } |
189 | ||
35534c86 | 190 | static inline unsigned root_tags_get(const struct radix_tree_root *root) |
643b52b9 | 191 | { |
0a835c4f | 192 | return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT; |
643b52b9 NP |
193 | } |
194 | ||
0a835c4f | 195 | static inline bool is_idr(const struct radix_tree_root *root) |
7b60e9ad | 196 | { |
0a835c4f | 197 | return !!(root->gfp_mask & ROOT_IS_IDR); |
7b60e9ad MW |
198 | } |
199 | ||
643b52b9 NP |
200 | /* |
201 | * Returns 1 if any slot in the node has this tag set. | |
202 | * Otherwise returns 0. | |
203 | */ | |
35534c86 MW |
204 | static inline int any_tag_set(const struct radix_tree_node *node, |
205 | unsigned int tag) | |
643b52b9 | 206 | { |
2fcd9005 | 207 | unsigned idx; |
643b52b9 NP |
208 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { |
209 | if (node->tags[tag][idx]) | |
210 | return 1; | |
211 | } | |
212 | return 0; | |
213 | } | |
78c1d784 | 214 | |
0a835c4f MW |
215 | static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) |
216 | { | |
217 | bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE); | |
218 | } | |
219 | ||
78c1d784 KK |
220 | /** |
221 | * radix_tree_find_next_bit - find the next set bit in a memory region | |
222 | * | |
223 | * @addr: The address to base the search on | |
224 | * @size: The bitmap size in bits | |
225 | * @offset: The bitnumber to start searching at | |
226 | * | |
227 | * Unrollable variant of find_next_bit() for constant size arrays. | |
228 | * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. | |
229 | * Returns next bit offset, or size if nothing found. | |
230 | */ | |
231 | static __always_inline unsigned long | |
bc412fca MW |
232 | radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, |
233 | unsigned long offset) | |
78c1d784 | 234 | { |
bc412fca | 235 | const unsigned long *addr = node->tags[tag]; |
78c1d784 | 236 | |
bc412fca | 237 | if (offset < RADIX_TREE_MAP_SIZE) { |
78c1d784 KK |
238 | unsigned long tmp; |
239 | ||
240 | addr += offset / BITS_PER_LONG; | |
241 | tmp = *addr >> (offset % BITS_PER_LONG); | |
242 | if (tmp) | |
243 | return __ffs(tmp) + offset; | |
244 | offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); | |
bc412fca | 245 | while (offset < RADIX_TREE_MAP_SIZE) { |
78c1d784 KK |
246 | tmp = *++addr; |
247 | if (tmp) | |
248 | return __ffs(tmp) + offset; | |
249 | offset += BITS_PER_LONG; | |
250 | } | |
251 | } | |
bc412fca | 252 | return RADIX_TREE_MAP_SIZE; |
78c1d784 KK |
253 | } |
254 | ||
268f42de MW |
255 | static unsigned int iter_offset(const struct radix_tree_iter *iter) |
256 | { | |
257 | return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK; | |
258 | } | |
259 | ||
218ed750 MW |
260 | /* |
261 | * The maximum index which can be stored in a radix tree | |
262 | */ | |
263 | static inline unsigned long shift_maxindex(unsigned int shift) | |
264 | { | |
265 | return (RADIX_TREE_MAP_SIZE << shift) - 1; | |
266 | } | |
267 | ||
35534c86 | 268 | static inline unsigned long node_maxindex(const struct radix_tree_node *node) |
218ed750 MW |
269 | { |
270 | return shift_maxindex(node->shift); | |
271 | } | |
272 | ||
0a835c4f MW |
273 | static unsigned long next_index(unsigned long index, |
274 | const struct radix_tree_node *node, | |
275 | unsigned long offset) | |
276 | { | |
277 | return (index & ~node_maxindex(node)) + (offset << node->shift); | |
278 | } | |
279 | ||
0796c583 | 280 | #ifndef __KERNEL__ |
d0891265 | 281 | static void dump_node(struct radix_tree_node *node, unsigned long index) |
7cf19af4 | 282 | { |
0796c583 | 283 | unsigned long i; |
7cf19af4 | 284 | |
218ed750 MW |
285 | pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n", |
286 | node, node->offset, index, index | node_maxindex(node), | |
287 | node->parent, | |
0796c583 | 288 | node->tags[0][0], node->tags[1][0], node->tags[2][0], |
218ed750 | 289 | node->shift, node->count, node->exceptional); |
0796c583 RZ |
290 | |
291 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { | |
d0891265 MW |
292 | unsigned long first = index | (i << node->shift); |
293 | unsigned long last = first | ((1UL << node->shift) - 1); | |
0796c583 RZ |
294 | void *entry = node->slots[i]; |
295 | if (!entry) | |
296 | continue; | |
218ed750 MW |
297 | if (entry == RADIX_TREE_RETRY) { |
298 | pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n", | |
299 | i, first, last, node); | |
b194d16c | 300 | } else if (!radix_tree_is_internal_node(entry)) { |
218ed750 MW |
301 | pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n", |
302 | entry, i, first, last, node); | |
303 | } else if (is_sibling_entry(node, entry)) { | |
304 | pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n", | |
305 | entry, i, first, last, node, | |
306 | *(void **)entry_to_node(entry)); | |
0796c583 | 307 | } else { |
4dd6c098 | 308 | dump_node(entry_to_node(entry), first); |
0796c583 RZ |
309 | } |
310 | } | |
7cf19af4 MW |
311 | } |
312 | ||
313 | /* For debug */ | |
314 | static void radix_tree_dump(struct radix_tree_root *root) | |
315 | { | |
d0891265 MW |
316 | pr_debug("radix root: %p rnode %p tags %x\n", |
317 | root, root->rnode, | |
0a835c4f | 318 | root->gfp_mask >> ROOT_TAG_SHIFT); |
b194d16c | 319 | if (!radix_tree_is_internal_node(root->rnode)) |
7cf19af4 | 320 | return; |
4dd6c098 | 321 | dump_node(entry_to_node(root->rnode), 0); |
7cf19af4 | 322 | } |
0a835c4f MW |
323 | |
324 | static void dump_ida_node(void *entry, unsigned long index) | |
325 | { | |
326 | unsigned long i; | |
327 | ||
328 | if (!entry) | |
329 | return; | |
330 | ||
331 | if (radix_tree_is_internal_node(entry)) { | |
332 | struct radix_tree_node *node = entry_to_node(entry); | |
333 | ||
334 | pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n", | |
335 | node, node->offset, index * IDA_BITMAP_BITS, | |
336 | ((index | node_maxindex(node)) + 1) * | |
337 | IDA_BITMAP_BITS - 1, | |
338 | node->parent, node->tags[0][0], node->shift, | |
339 | node->count); | |
340 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) | |
341 | dump_ida_node(node->slots[i], | |
342 | index | (i << node->shift)); | |
d37cacc5 MW |
343 | } else if (radix_tree_exceptional_entry(entry)) { |
344 | pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n", | |
345 | entry, (int)(index & RADIX_TREE_MAP_MASK), | |
346 | index * IDA_BITMAP_BITS, | |
347 | index * IDA_BITMAP_BITS + BITS_PER_LONG - | |
348 | RADIX_TREE_EXCEPTIONAL_SHIFT, | |
349 | (unsigned long)entry >> | |
350 | RADIX_TREE_EXCEPTIONAL_SHIFT); | |
0a835c4f MW |
351 | } else { |
352 | struct ida_bitmap *bitmap = entry; | |
353 | ||
354 | pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap, | |
355 | (int)(index & RADIX_TREE_MAP_MASK), | |
356 | index * IDA_BITMAP_BITS, | |
357 | (index + 1) * IDA_BITMAP_BITS - 1); | |
358 | for (i = 0; i < IDA_BITMAP_LONGS; i++) | |
359 | pr_cont(" %lx", bitmap->bitmap[i]); | |
360 | pr_cont("\n"); | |
361 | } | |
362 | } | |
363 | ||
364 | static void ida_dump(struct ida *ida) | |
365 | { | |
366 | struct radix_tree_root *root = &ida->ida_rt; | |
7ad3d4d8 MW |
367 | pr_debug("ida: %p node %p free %d\n", ida, root->rnode, |
368 | root->gfp_mask >> ROOT_TAG_SHIFT); | |
0a835c4f MW |
369 | dump_ida_node(root->rnode, 0); |
370 | } | |
7cf19af4 MW |
371 | #endif |
372 | ||
1da177e4 LT |
373 | /* |
374 | * This assumes that the caller has performed appropriate preallocation, and | |
375 | * that the caller has pinned this thread of control to the current CPU. | |
376 | */ | |
377 | static struct radix_tree_node * | |
0a835c4f | 378 | radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, |
d58275bc | 379 | struct radix_tree_root *root, |
e8de4340 MW |
380 | unsigned int shift, unsigned int offset, |
381 | unsigned int count, unsigned int exceptional) | |
1da177e4 | 382 | { |
e2848a0e | 383 | struct radix_tree_node *ret = NULL; |
1da177e4 | 384 | |
5e4c0d97 | 385 | /* |
2fcd9005 MW |
386 | * Preload code isn't irq safe and it doesn't make sense to use |
387 | * preloading during an interrupt anyway as all the allocations have | |
388 | * to be atomic. So just do normal allocation when in interrupt. | |
5e4c0d97 | 389 | */ |
d0164adc | 390 | if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { |
1da177e4 LT |
391 | struct radix_tree_preload *rtp; |
392 | ||
58e698af VD |
393 | /* |
394 | * Even if the caller has preloaded, try to allocate from the | |
05eb6e72 VD |
395 | * cache first for the new node to get accounted to the memory |
396 | * cgroup. | |
58e698af VD |
397 | */ |
398 | ret = kmem_cache_alloc(radix_tree_node_cachep, | |
05eb6e72 | 399 | gfp_mask | __GFP_NOWARN); |
58e698af VD |
400 | if (ret) |
401 | goto out; | |
402 | ||
e2848a0e NP |
403 | /* |
404 | * Provided the caller has preloaded here, we will always | |
405 | * succeed in getting a node here (and never reach | |
406 | * kmem_cache_alloc) | |
407 | */ | |
7c8e0181 | 408 | rtp = this_cpu_ptr(&radix_tree_preloads); |
1da177e4 | 409 | if (rtp->nr) { |
9d2a8da0 | 410 | ret = rtp->nodes; |
1293d5c5 | 411 | rtp->nodes = ret->parent; |
1da177e4 LT |
412 | rtp->nr--; |
413 | } | |
ce80b067 CM |
414 | /* |
415 | * Update the allocation stack trace as this is more useful | |
416 | * for debugging. | |
417 | */ | |
418 | kmemleak_update_trace(ret); | |
58e698af | 419 | goto out; |
1da177e4 | 420 | } |
05eb6e72 | 421 | ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
58e698af | 422 | out: |
b194d16c | 423 | BUG_ON(radix_tree_is_internal_node(ret)); |
e8de4340 | 424 | if (ret) { |
e8de4340 MW |
425 | ret->shift = shift; |
426 | ret->offset = offset; | |
427 | ret->count = count; | |
428 | ret->exceptional = exceptional; | |
d58275bc MW |
429 | ret->parent = parent; |
430 | ret->root = root; | |
e8de4340 | 431 | } |
1da177e4 LT |
432 | return ret; |
433 | } | |
434 | ||
7cf9c2c7 NP |
435 | static void radix_tree_node_rcu_free(struct rcu_head *head) |
436 | { | |
437 | struct radix_tree_node *node = | |
438 | container_of(head, struct radix_tree_node, rcu_head); | |
643b52b9 NP |
439 | |
440 | /* | |
175542f5 MW |
441 | * Must only free zeroed nodes into the slab. We can be left with |
442 | * non-NULL entries by radix_tree_free_nodes, so clear the entries | |
443 | * and tags here. | |
643b52b9 | 444 | */ |
175542f5 MW |
445 | memset(node->slots, 0, sizeof(node->slots)); |
446 | memset(node->tags, 0, sizeof(node->tags)); | |
91d9c05a | 447 | INIT_LIST_HEAD(&node->private_list); |
643b52b9 | 448 | |
7cf9c2c7 NP |
449 | kmem_cache_free(radix_tree_node_cachep, node); |
450 | } | |
451 | ||
1da177e4 LT |
452 | static inline void |
453 | radix_tree_node_free(struct radix_tree_node *node) | |
454 | { | |
7cf9c2c7 | 455 | call_rcu(&node->rcu_head, radix_tree_node_rcu_free); |
1da177e4 LT |
456 | } |
457 | ||
458 | /* | |
459 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | |
460 | * ensure that the addition of a single element in the tree cannot fail. On | |
461 | * success, return zero, with preemption disabled. On error, return -ENOMEM | |
462 | * with preemption not disabled. | |
b34df792 DH |
463 | * |
464 | * To make use of this facility, the radix tree must be initialised without | |
d0164adc | 465 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
1da177e4 | 466 | */ |
bc9ae224 | 467 | static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) |
1da177e4 LT |
468 | { |
469 | struct radix_tree_preload *rtp; | |
470 | struct radix_tree_node *node; | |
471 | int ret = -ENOMEM; | |
472 | ||
05eb6e72 VD |
473 | /* |
474 | * Nodes preloaded by one cgroup can be be used by another cgroup, so | |
475 | * they should never be accounted to any particular memory cgroup. | |
476 | */ | |
477 | gfp_mask &= ~__GFP_ACCOUNT; | |
478 | ||
1da177e4 | 479 | preempt_disable(); |
7c8e0181 | 480 | rtp = this_cpu_ptr(&radix_tree_preloads); |
c78c66d1 | 481 | while (rtp->nr < nr) { |
1da177e4 | 482 | preempt_enable(); |
488514d1 | 483 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
1da177e4 LT |
484 | if (node == NULL) |
485 | goto out; | |
486 | preempt_disable(); | |
7c8e0181 | 487 | rtp = this_cpu_ptr(&radix_tree_preloads); |
c78c66d1 | 488 | if (rtp->nr < nr) { |
1293d5c5 | 489 | node->parent = rtp->nodes; |
9d2a8da0 KS |
490 | rtp->nodes = node; |
491 | rtp->nr++; | |
492 | } else { | |
1da177e4 | 493 | kmem_cache_free(radix_tree_node_cachep, node); |
9d2a8da0 | 494 | } |
1da177e4 LT |
495 | } |
496 | ret = 0; | |
497 | out: | |
498 | return ret; | |
499 | } | |
5e4c0d97 JK |
500 | |
501 | /* | |
502 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | |
503 | * ensure that the addition of a single element in the tree cannot fail. On | |
504 | * success, return zero, with preemption disabled. On error, return -ENOMEM | |
505 | * with preemption not disabled. | |
506 | * | |
507 | * To make use of this facility, the radix tree must be initialised without | |
d0164adc | 508 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
5e4c0d97 JK |
509 | */ |
510 | int radix_tree_preload(gfp_t gfp_mask) | |
511 | { | |
512 | /* Warn on non-sensical use... */ | |
d0164adc | 513 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); |
c78c66d1 | 514 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
5e4c0d97 | 515 | } |
d7f0923d | 516 | EXPORT_SYMBOL(radix_tree_preload); |
1da177e4 | 517 | |
5e4c0d97 JK |
518 | /* |
519 | * The same as above function, except we don't guarantee preloading happens. | |
520 | * We do it, if we decide it helps. On success, return zero with preemption | |
521 | * disabled. On error, return -ENOMEM with preemption not disabled. | |
522 | */ | |
523 | int radix_tree_maybe_preload(gfp_t gfp_mask) | |
524 | { | |
d0164adc | 525 | if (gfpflags_allow_blocking(gfp_mask)) |
c78c66d1 | 526 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
5e4c0d97 JK |
527 | /* Preloading doesn't help anything with this gfp mask, skip it */ |
528 | preempt_disable(); | |
529 | return 0; | |
530 | } | |
531 | EXPORT_SYMBOL(radix_tree_maybe_preload); | |
532 | ||
2791653a MW |
533 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
534 | /* | |
535 | * Preload with enough objects to ensure that we can split a single entry | |
536 | * of order @old_order into many entries of size @new_order | |
537 | */ | |
538 | int radix_tree_split_preload(unsigned int old_order, unsigned int new_order, | |
539 | gfp_t gfp_mask) | |
540 | { | |
541 | unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT); | |
542 | unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) - | |
543 | (new_order / RADIX_TREE_MAP_SHIFT); | |
544 | unsigned nr = 0; | |
545 | ||
546 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); | |
547 | BUG_ON(new_order >= old_order); | |
548 | ||
549 | while (layers--) | |
550 | nr = nr * RADIX_TREE_MAP_SIZE + 1; | |
551 | return __radix_tree_preload(gfp_mask, top * nr); | |
552 | } | |
553 | #endif | |
554 | ||
c78c66d1 KS |
555 | /* |
556 | * The same as function above, but preload number of nodes required to insert | |
557 | * (1 << order) continuous naturally-aligned elements. | |
558 | */ | |
559 | int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) | |
560 | { | |
561 | unsigned long nr_subtrees; | |
562 | int nr_nodes, subtree_height; | |
563 | ||
564 | /* Preloading doesn't help anything with this gfp mask, skip it */ | |
565 | if (!gfpflags_allow_blocking(gfp_mask)) { | |
566 | preempt_disable(); | |
567 | return 0; | |
568 | } | |
569 | ||
570 | /* | |
571 | * Calculate number and height of fully populated subtrees it takes to | |
572 | * store (1 << order) elements. | |
573 | */ | |
574 | nr_subtrees = 1 << order; | |
575 | for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE; | |
576 | subtree_height++) | |
577 | nr_subtrees >>= RADIX_TREE_MAP_SHIFT; | |
578 | ||
579 | /* | |
580 | * The worst case is zero height tree with a single item at index 0 and | |
581 | * then inserting items starting at ULONG_MAX - (1 << order). | |
582 | * | |
583 | * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to | |
584 | * 0-index item. | |
585 | */ | |
586 | nr_nodes = RADIX_TREE_MAX_PATH; | |
587 | ||
588 | /* Plus branch to fully populated subtrees. */ | |
589 | nr_nodes += RADIX_TREE_MAX_PATH - subtree_height; | |
590 | ||
591 | /* Root node is shared. */ | |
592 | nr_nodes--; | |
593 | ||
594 | /* Plus nodes required to build subtrees. */ | |
595 | nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height]; | |
596 | ||
597 | return __radix_tree_preload(gfp_mask, nr_nodes); | |
598 | } | |
599 | ||
35534c86 | 600 | static unsigned radix_tree_load_root(const struct radix_tree_root *root, |
1456a439 MW |
601 | struct radix_tree_node **nodep, unsigned long *maxindex) |
602 | { | |
603 | struct radix_tree_node *node = rcu_dereference_raw(root->rnode); | |
604 | ||
605 | *nodep = node; | |
606 | ||
b194d16c | 607 | if (likely(radix_tree_is_internal_node(node))) { |
4dd6c098 | 608 | node = entry_to_node(node); |
1456a439 | 609 | *maxindex = node_maxindex(node); |
c12e51b0 | 610 | return node->shift + RADIX_TREE_MAP_SHIFT; |
1456a439 MW |
611 | } |
612 | ||
613 | *maxindex = 0; | |
614 | return 0; | |
615 | } | |
616 | ||
1da177e4 LT |
617 | /* |
618 | * Extend a radix tree so it can store key @index. | |
619 | */ | |
0a835c4f | 620 | static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, |
d0891265 | 621 | unsigned long index, unsigned int shift) |
1da177e4 | 622 | { |
d7b62727 | 623 | void *entry; |
d0891265 | 624 | unsigned int maxshift; |
1da177e4 LT |
625 | int tag; |
626 | ||
d0891265 MW |
627 | /* Figure out what the shift should be. */ |
628 | maxshift = shift; | |
629 | while (index > shift_maxindex(maxshift)) | |
630 | maxshift += RADIX_TREE_MAP_SHIFT; | |
1da177e4 | 631 | |
d7b62727 MW |
632 | entry = rcu_dereference_raw(root->rnode); |
633 | if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE))) | |
1da177e4 | 634 | goto out; |
1da177e4 | 635 | |
1da177e4 | 636 | do { |
0a835c4f | 637 | struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL, |
d58275bc | 638 | root, shift, 0, 1, 0); |
2fcd9005 | 639 | if (!node) |
1da177e4 LT |
640 | return -ENOMEM; |
641 | ||
0a835c4f MW |
642 | if (is_idr(root)) { |
643 | all_tag_set(node, IDR_FREE); | |
644 | if (!root_tag_get(root, IDR_FREE)) { | |
645 | tag_clear(node, IDR_FREE, 0); | |
646 | root_tag_set(root, IDR_FREE); | |
647 | } | |
648 | } else { | |
649 | /* Propagate the aggregated tag info to the new child */ | |
650 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { | |
651 | if (root_tag_get(root, tag)) | |
652 | tag_set(node, tag, 0); | |
653 | } | |
1da177e4 LT |
654 | } |
655 | ||
d0891265 | 656 | BUG_ON(shift > BITS_PER_LONG); |
d7b62727 MW |
657 | if (radix_tree_is_internal_node(entry)) { |
658 | entry_to_node(entry)->parent = node; | |
659 | } else if (radix_tree_exceptional_entry(entry)) { | |
f7942430 | 660 | /* Moving an exceptional root->rnode to a node */ |
e8de4340 | 661 | node->exceptional = 1; |
f7942430 | 662 | } |
d7b62727 MW |
663 | /* |
664 | * entry was already in the radix tree, so we do not need | |
665 | * rcu_assign_pointer here | |
666 | */ | |
667 | node->slots[0] = (void __rcu *)entry; | |
668 | entry = node_to_entry(node); | |
669 | rcu_assign_pointer(root->rnode, entry); | |
d0891265 | 670 | shift += RADIX_TREE_MAP_SHIFT; |
d0891265 | 671 | } while (shift <= maxshift); |
1da177e4 | 672 | out: |
d0891265 | 673 | return maxshift + RADIX_TREE_MAP_SHIFT; |
1da177e4 LT |
674 | } |
675 | ||
f4b109c6 JW |
676 | /** |
677 | * radix_tree_shrink - shrink radix tree to minimum height | |
678 | * @root radix tree root | |
679 | */ | |
0ac398ef | 680 | static inline bool radix_tree_shrink(struct radix_tree_root *root, |
c7df8ad2 | 681 | radix_tree_update_node_t update_node) |
f4b109c6 | 682 | { |
0ac398ef MW |
683 | bool shrunk = false; |
684 | ||
f4b109c6 | 685 | for (;;) { |
12320d0f | 686 | struct radix_tree_node *node = rcu_dereference_raw(root->rnode); |
f4b109c6 JW |
687 | struct radix_tree_node *child; |
688 | ||
689 | if (!radix_tree_is_internal_node(node)) | |
690 | break; | |
691 | node = entry_to_node(node); | |
692 | ||
693 | /* | |
694 | * The candidate node has more than one child, or its child | |
695 | * is not at the leftmost slot, or the child is a multiorder | |
696 | * entry, we cannot shrink. | |
697 | */ | |
698 | if (node->count != 1) | |
699 | break; | |
12320d0f | 700 | child = rcu_dereference_raw(node->slots[0]); |
f4b109c6 JW |
701 | if (!child) |
702 | break; | |
703 | if (!radix_tree_is_internal_node(child) && node->shift) | |
704 | break; | |
705 | ||
706 | if (radix_tree_is_internal_node(child)) | |
707 | entry_to_node(child)->parent = NULL; | |
708 | ||
709 | /* | |
710 | * We don't need rcu_assign_pointer(), since we are simply | |
711 | * moving the node from one part of the tree to another: if it | |
712 | * was safe to dereference the old pointer to it | |
713 | * (node->slots[0]), it will be safe to dereference the new | |
714 | * one (root->rnode) as far as dependent read barriers go. | |
715 | */ | |
d7b62727 | 716 | root->rnode = (void __rcu *)child; |
0a835c4f MW |
717 | if (is_idr(root) && !tag_get(node, IDR_FREE, 0)) |
718 | root_tag_clear(root, IDR_FREE); | |
f4b109c6 JW |
719 | |
720 | /* | |
721 | * We have a dilemma here. The node's slot[0] must not be | |
722 | * NULLed in case there are concurrent lookups expecting to | |
723 | * find the item. However if this was a bottom-level node, | |
724 | * then it may be subject to the slot pointer being visible | |
725 | * to callers dereferencing it. If item corresponding to | |
726 | * slot[0] is subsequently deleted, these callers would expect | |
727 | * their slot to become empty sooner or later. | |
728 | * | |
729 | * For example, lockless pagecache will look up a slot, deref | |
730 | * the page pointer, and if the page has 0 refcount it means it | |
731 | * was concurrently deleted from pagecache so try the deref | |
732 | * again. Fortunately there is already a requirement for logic | |
733 | * to retry the entire slot lookup -- the indirect pointer | |
734 | * problem (replacing direct root node with an indirect pointer | |
735 | * also results in a stale slot). So tag the slot as indirect | |
736 | * to force callers to retry. | |
737 | */ | |
4d693d08 JW |
738 | node->count = 0; |
739 | if (!radix_tree_is_internal_node(child)) { | |
d7b62727 | 740 | node->slots[0] = (void __rcu *)RADIX_TREE_RETRY; |
4d693d08 | 741 | if (update_node) |
c7df8ad2 | 742 | update_node(node); |
4d693d08 | 743 | } |
f4b109c6 | 744 | |
ea07b862 | 745 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
f4b109c6 | 746 | radix_tree_node_free(node); |
0ac398ef | 747 | shrunk = true; |
f4b109c6 | 748 | } |
0ac398ef MW |
749 | |
750 | return shrunk; | |
f4b109c6 JW |
751 | } |
752 | ||
0ac398ef | 753 | static bool delete_node(struct radix_tree_root *root, |
4d693d08 | 754 | struct radix_tree_node *node, |
c7df8ad2 | 755 | radix_tree_update_node_t update_node) |
f4b109c6 | 756 | { |
0ac398ef MW |
757 | bool deleted = false; |
758 | ||
f4b109c6 JW |
759 | do { |
760 | struct radix_tree_node *parent; | |
761 | ||
762 | if (node->count) { | |
12320d0f MW |
763 | if (node_to_entry(node) == |
764 | rcu_dereference_raw(root->rnode)) | |
c7df8ad2 MG |
765 | deleted |= radix_tree_shrink(root, |
766 | update_node); | |
0ac398ef | 767 | return deleted; |
f4b109c6 JW |
768 | } |
769 | ||
770 | parent = node->parent; | |
771 | if (parent) { | |
772 | parent->slots[node->offset] = NULL; | |
773 | parent->count--; | |
774 | } else { | |
0a835c4f MW |
775 | /* |
776 | * Shouldn't the tags already have all been cleared | |
777 | * by the caller? | |
778 | */ | |
779 | if (!is_idr(root)) | |
780 | root_tag_clear_all(root); | |
f4b109c6 JW |
781 | root->rnode = NULL; |
782 | } | |
783 | ||
ea07b862 | 784 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
f4b109c6 | 785 | radix_tree_node_free(node); |
0ac398ef | 786 | deleted = true; |
f4b109c6 JW |
787 | |
788 | node = parent; | |
789 | } while (node); | |
0ac398ef MW |
790 | |
791 | return deleted; | |
f4b109c6 JW |
792 | } |
793 | ||
1da177e4 | 794 | /** |
139e5616 | 795 | * __radix_tree_create - create a slot in a radix tree |
1da177e4 LT |
796 | * @root: radix tree root |
797 | * @index: index key | |
e6145236 | 798 | * @order: index occupies 2^order aligned slots |
139e5616 JW |
799 | * @nodep: returns node |
800 | * @slotp: returns slot | |
1da177e4 | 801 | * |
139e5616 JW |
802 | * Create, if necessary, and return the node and slot for an item |
803 | * at position @index in the radix tree @root. | |
804 | * | |
805 | * Until there is more than one item in the tree, no nodes are | |
806 | * allocated and @root->rnode is used as a direct slot instead of | |
807 | * pointing to a node, in which case *@nodep will be NULL. | |
808 | * | |
809 | * Returns -ENOMEM, or 0 for success. | |
1da177e4 | 810 | */ |
139e5616 | 811 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, |
e6145236 | 812 | unsigned order, struct radix_tree_node **nodep, |
d7b62727 | 813 | void __rcu ***slotp) |
1da177e4 | 814 | { |
89148aa4 | 815 | struct radix_tree_node *node = NULL, *child; |
d7b62727 | 816 | void __rcu **slot = (void __rcu **)&root->rnode; |
49ea6ebc | 817 | unsigned long maxindex; |
89148aa4 | 818 | unsigned int shift, offset = 0; |
49ea6ebc | 819 | unsigned long max = index | ((1UL << order) - 1); |
0a835c4f | 820 | gfp_t gfp = root_gfp_mask(root); |
49ea6ebc | 821 | |
89148aa4 | 822 | shift = radix_tree_load_root(root, &child, &maxindex); |
1da177e4 LT |
823 | |
824 | /* Make sure the tree is high enough. */ | |
175542f5 MW |
825 | if (order > 0 && max == ((1UL << order) - 1)) |
826 | max++; | |
49ea6ebc | 827 | if (max > maxindex) { |
0a835c4f | 828 | int error = radix_tree_extend(root, gfp, max, shift); |
49ea6ebc | 829 | if (error < 0) |
1da177e4 | 830 | return error; |
49ea6ebc | 831 | shift = error; |
12320d0f | 832 | child = rcu_dereference_raw(root->rnode); |
1da177e4 LT |
833 | } |
834 | ||
e6145236 | 835 | while (shift > order) { |
c12e51b0 | 836 | shift -= RADIX_TREE_MAP_SHIFT; |
89148aa4 | 837 | if (child == NULL) { |
1da177e4 | 838 | /* Have to add a child node. */ |
d58275bc | 839 | child = radix_tree_node_alloc(gfp, node, root, shift, |
e8de4340 | 840 | offset, 0, 0); |
89148aa4 | 841 | if (!child) |
1da177e4 | 842 | return -ENOMEM; |
89148aa4 MW |
843 | rcu_assign_pointer(*slot, node_to_entry(child)); |
844 | if (node) | |
1da177e4 | 845 | node->count++; |
89148aa4 | 846 | } else if (!radix_tree_is_internal_node(child)) |
e6145236 | 847 | break; |
1da177e4 LT |
848 | |
849 | /* Go a level down */ | |
89148aa4 | 850 | node = entry_to_node(child); |
9e85d811 | 851 | offset = radix_tree_descend(node, &child, index); |
89148aa4 | 852 | slot = &node->slots[offset]; |
e6145236 MW |
853 | } |
854 | ||
175542f5 MW |
855 | if (nodep) |
856 | *nodep = node; | |
857 | if (slotp) | |
858 | *slotp = slot; | |
859 | return 0; | |
860 | } | |
861 | ||
175542f5 MW |
862 | /* |
863 | * Free any nodes below this node. The tree is presumed to not need | |
864 | * shrinking, and any user data in the tree is presumed to not need a | |
865 | * destructor called on it. If we need to add a destructor, we can | |
866 | * add that functionality later. Note that we may not clear tags or | |
867 | * slots from the tree as an RCU walker may still have a pointer into | |
868 | * this subtree. We could replace the entries with RADIX_TREE_RETRY, | |
869 | * but we'll still have to clear those in rcu_free. | |
870 | */ | |
871 | static void radix_tree_free_nodes(struct radix_tree_node *node) | |
872 | { | |
873 | unsigned offset = 0; | |
874 | struct radix_tree_node *child = entry_to_node(node); | |
875 | ||
876 | for (;;) { | |
12320d0f | 877 | void *entry = rcu_dereference_raw(child->slots[offset]); |
175542f5 MW |
878 | if (radix_tree_is_internal_node(entry) && |
879 | !is_sibling_entry(child, entry)) { | |
880 | child = entry_to_node(entry); | |
881 | offset = 0; | |
882 | continue; | |
883 | } | |
884 | offset++; | |
885 | while (offset == RADIX_TREE_MAP_SIZE) { | |
886 | struct radix_tree_node *old = child; | |
887 | offset = child->offset + 1; | |
888 | child = child->parent; | |
dd040b6f | 889 | WARN_ON_ONCE(!list_empty(&old->private_list)); |
175542f5 MW |
890 | radix_tree_node_free(old); |
891 | if (old == entry_to_node(node)) | |
892 | return; | |
893 | } | |
894 | } | |
895 | } | |
896 | ||
0a835c4f | 897 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
d7b62727 MW |
898 | static inline int insert_entries(struct radix_tree_node *node, |
899 | void __rcu **slot, void *item, unsigned order, bool replace) | |
175542f5 MW |
900 | { |
901 | struct radix_tree_node *child; | |
902 | unsigned i, n, tag, offset, tags = 0; | |
903 | ||
904 | if (node) { | |
e157b555 MW |
905 | if (order > node->shift) |
906 | n = 1 << (order - node->shift); | |
907 | else | |
908 | n = 1; | |
175542f5 MW |
909 | offset = get_slot_offset(node, slot); |
910 | } else { | |
911 | n = 1; | |
912 | offset = 0; | |
913 | } | |
914 | ||
915 | if (n > 1) { | |
e6145236 | 916 | offset = offset & ~(n - 1); |
89148aa4 | 917 | slot = &node->slots[offset]; |
175542f5 MW |
918 | } |
919 | child = node_to_entry(slot); | |
920 | ||
921 | for (i = 0; i < n; i++) { | |
922 | if (slot[i]) { | |
923 | if (replace) { | |
924 | node->count--; | |
925 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
926 | if (tag_get(node, tag, offset + i)) | |
927 | tags |= 1 << tag; | |
928 | } else | |
e6145236 MW |
929 | return -EEXIST; |
930 | } | |
175542f5 | 931 | } |
e6145236 | 932 | |
175542f5 | 933 | for (i = 0; i < n; i++) { |
12320d0f | 934 | struct radix_tree_node *old = rcu_dereference_raw(slot[i]); |
175542f5 | 935 | if (i) { |
89148aa4 | 936 | rcu_assign_pointer(slot[i], child); |
175542f5 MW |
937 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
938 | if (tags & (1 << tag)) | |
939 | tag_clear(node, tag, offset + i); | |
940 | } else { | |
941 | rcu_assign_pointer(slot[i], item); | |
942 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
943 | if (tags & (1 << tag)) | |
944 | tag_set(node, tag, offset); | |
e6145236 | 945 | } |
175542f5 | 946 | if (radix_tree_is_internal_node(old) && |
e157b555 MW |
947 | !is_sibling_entry(node, old) && |
948 | (old != RADIX_TREE_RETRY)) | |
175542f5 MW |
949 | radix_tree_free_nodes(old); |
950 | if (radix_tree_exceptional_entry(old)) | |
951 | node->exceptional--; | |
612d6c19 | 952 | } |
175542f5 MW |
953 | if (node) { |
954 | node->count += n; | |
955 | if (radix_tree_exceptional_entry(item)) | |
956 | node->exceptional += n; | |
957 | } | |
958 | return n; | |
139e5616 | 959 | } |
175542f5 | 960 | #else |
d7b62727 MW |
961 | static inline int insert_entries(struct radix_tree_node *node, |
962 | void __rcu **slot, void *item, unsigned order, bool replace) | |
175542f5 MW |
963 | { |
964 | if (*slot) | |
965 | return -EEXIST; | |
966 | rcu_assign_pointer(*slot, item); | |
967 | if (node) { | |
968 | node->count++; | |
969 | if (radix_tree_exceptional_entry(item)) | |
970 | node->exceptional++; | |
971 | } | |
972 | return 1; | |
973 | } | |
974 | #endif | |
139e5616 JW |
975 | |
976 | /** | |
e6145236 | 977 | * __radix_tree_insert - insert into a radix tree |
139e5616 JW |
978 | * @root: radix tree root |
979 | * @index: index key | |
e6145236 | 980 | * @order: key covers the 2^order indices around index |
139e5616 JW |
981 | * @item: item to insert |
982 | * | |
983 | * Insert an item into the radix tree at position @index. | |
984 | */ | |
e6145236 MW |
985 | int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, |
986 | unsigned order, void *item) | |
139e5616 JW |
987 | { |
988 | struct radix_tree_node *node; | |
d7b62727 | 989 | void __rcu **slot; |
139e5616 JW |
990 | int error; |
991 | ||
b194d16c | 992 | BUG_ON(radix_tree_is_internal_node(item)); |
139e5616 | 993 | |
e6145236 | 994 | error = __radix_tree_create(root, index, order, &node, &slot); |
139e5616 JW |
995 | if (error) |
996 | return error; | |
175542f5 MW |
997 | |
998 | error = insert_entries(node, slot, item, order, false); | |
999 | if (error < 0) | |
1000 | return error; | |
201b6264 | 1001 | |
612d6c19 | 1002 | if (node) { |
7b60e9ad | 1003 | unsigned offset = get_slot_offset(node, slot); |
7b60e9ad MW |
1004 | BUG_ON(tag_get(node, 0, offset)); |
1005 | BUG_ON(tag_get(node, 1, offset)); | |
1006 | BUG_ON(tag_get(node, 2, offset)); | |
612d6c19 | 1007 | } else { |
7b60e9ad | 1008 | BUG_ON(root_tags_get(root)); |
612d6c19 | 1009 | } |
1da177e4 | 1010 | |
1da177e4 LT |
1011 | return 0; |
1012 | } | |
e6145236 | 1013 | EXPORT_SYMBOL(__radix_tree_insert); |
1da177e4 | 1014 | |
139e5616 JW |
1015 | /** |
1016 | * __radix_tree_lookup - lookup an item in a radix tree | |
1017 | * @root: radix tree root | |
1018 | * @index: index key | |
1019 | * @nodep: returns node | |
1020 | * @slotp: returns slot | |
1021 | * | |
1022 | * Lookup and return the item at position @index in the radix | |
1023 | * tree @root. | |
1024 | * | |
1025 | * Until there is more than one item in the tree, no nodes are | |
1026 | * allocated and @root->rnode is used as a direct slot instead of | |
1027 | * pointing to a node, in which case *@nodep will be NULL. | |
7cf9c2c7 | 1028 | */ |
35534c86 MW |
1029 | void *__radix_tree_lookup(const struct radix_tree_root *root, |
1030 | unsigned long index, struct radix_tree_node **nodep, | |
d7b62727 | 1031 | void __rcu ***slotp) |
1da177e4 | 1032 | { |
139e5616 | 1033 | struct radix_tree_node *node, *parent; |
85829954 | 1034 | unsigned long maxindex; |
d7b62727 | 1035 | void __rcu **slot; |
612d6c19 | 1036 | |
85829954 MW |
1037 | restart: |
1038 | parent = NULL; | |
d7b62727 | 1039 | slot = (void __rcu **)&root->rnode; |
9e85d811 | 1040 | radix_tree_load_root(root, &node, &maxindex); |
85829954 | 1041 | if (index > maxindex) |
1da177e4 LT |
1042 | return NULL; |
1043 | ||
b194d16c | 1044 | while (radix_tree_is_internal_node(node)) { |
85829954 | 1045 | unsigned offset; |
1da177e4 | 1046 | |
85829954 MW |
1047 | if (node == RADIX_TREE_RETRY) |
1048 | goto restart; | |
4dd6c098 | 1049 | parent = entry_to_node(node); |
9e85d811 | 1050 | offset = radix_tree_descend(parent, &node, index); |
85829954 MW |
1051 | slot = parent->slots + offset; |
1052 | } | |
1da177e4 | 1053 | |
139e5616 JW |
1054 | if (nodep) |
1055 | *nodep = parent; | |
1056 | if (slotp) | |
1057 | *slotp = slot; | |
1058 | return node; | |
b72b71c6 HS |
1059 | } |
1060 | ||
1061 | /** | |
1062 | * radix_tree_lookup_slot - lookup a slot in a radix tree | |
1063 | * @root: radix tree root | |
1064 | * @index: index key | |
1065 | * | |
1066 | * Returns: the slot corresponding to the position @index in the | |
1067 | * radix tree @root. This is useful for update-if-exists operations. | |
1068 | * | |
1069 | * This function can be called under rcu_read_lock iff the slot is not | |
1070 | * modified by radix_tree_replace_slot, otherwise it must be called | |
1071 | * exclusive from other writers. Any dereference of the slot must be done | |
1072 | * using radix_tree_deref_slot. | |
1073 | */ | |
d7b62727 | 1074 | void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root, |
35534c86 | 1075 | unsigned long index) |
b72b71c6 | 1076 | { |
d7b62727 | 1077 | void __rcu **slot; |
139e5616 JW |
1078 | |
1079 | if (!__radix_tree_lookup(root, index, NULL, &slot)) | |
1080 | return NULL; | |
1081 | return slot; | |
a4331366 | 1082 | } |
a4331366 HR |
1083 | EXPORT_SYMBOL(radix_tree_lookup_slot); |
1084 | ||
1085 | /** | |
1086 | * radix_tree_lookup - perform lookup operation on a radix tree | |
1087 | * @root: radix tree root | |
1088 | * @index: index key | |
1089 | * | |
1090 | * Lookup the item at the position @index in the radix tree @root. | |
7cf9c2c7 NP |
1091 | * |
1092 | * This function can be called under rcu_read_lock, however the caller | |
1093 | * must manage lifetimes of leaf nodes (eg. RCU may also be used to free | |
1094 | * them safely). No RCU barriers are required to access or modify the | |
1095 | * returned item, however. | |
a4331366 | 1096 | */ |
35534c86 | 1097 | void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index) |
a4331366 | 1098 | { |
139e5616 | 1099 | return __radix_tree_lookup(root, index, NULL, NULL); |
1da177e4 LT |
1100 | } |
1101 | EXPORT_SYMBOL(radix_tree_lookup); | |
1102 | ||
0a835c4f | 1103 | static inline void replace_sibling_entries(struct radix_tree_node *node, |
d7b62727 | 1104 | void __rcu **slot, int count, int exceptional) |
a90eb3a2 | 1105 | { |
a90eb3a2 MW |
1106 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
1107 | void *ptr = node_to_entry(slot); | |
0a835c4f | 1108 | unsigned offset = get_slot_offset(node, slot) + 1; |
a90eb3a2 | 1109 | |
0a835c4f | 1110 | while (offset < RADIX_TREE_MAP_SIZE) { |
12320d0f | 1111 | if (rcu_dereference_raw(node->slots[offset]) != ptr) |
a90eb3a2 | 1112 | break; |
0a835c4f MW |
1113 | if (count < 0) { |
1114 | node->slots[offset] = NULL; | |
1115 | node->count--; | |
1116 | } | |
1117 | node->exceptional += exceptional; | |
1118 | offset++; | |
a90eb3a2 MW |
1119 | } |
1120 | #endif | |
a90eb3a2 MW |
1121 | } |
1122 | ||
d7b62727 MW |
1123 | static void replace_slot(void __rcu **slot, void *item, |
1124 | struct radix_tree_node *node, int count, int exceptional) | |
f7942430 | 1125 | { |
0a835c4f MW |
1126 | if (WARN_ON_ONCE(radix_tree_is_internal_node(item))) |
1127 | return; | |
f7942430 | 1128 | |
0a835c4f | 1129 | if (node && (count || exceptional)) { |
f4b109c6 | 1130 | node->count += count; |
0a835c4f MW |
1131 | node->exceptional += exceptional; |
1132 | replace_sibling_entries(node, slot, count, exceptional); | |
f4b109c6 | 1133 | } |
f7942430 JW |
1134 | |
1135 | rcu_assign_pointer(*slot, item); | |
1136 | } | |
1137 | ||
0a835c4f MW |
1138 | static bool node_tag_get(const struct radix_tree_root *root, |
1139 | const struct radix_tree_node *node, | |
1140 | unsigned int tag, unsigned int offset) | |
a90eb3a2 | 1141 | { |
0a835c4f MW |
1142 | if (node) |
1143 | return tag_get(node, tag, offset); | |
1144 | return root_tag_get(root, tag); | |
1145 | } | |
a90eb3a2 | 1146 | |
0a835c4f MW |
1147 | /* |
1148 | * IDR users want to be able to store NULL in the tree, so if the slot isn't | |
1149 | * free, don't adjust the count, even if it's transitioning between NULL and | |
1150 | * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still | |
1151 | * have empty bits, but it only stores NULL in slots when they're being | |
1152 | * deleted. | |
1153 | */ | |
1154 | static int calculate_count(struct radix_tree_root *root, | |
d7b62727 | 1155 | struct radix_tree_node *node, void __rcu **slot, |
0a835c4f MW |
1156 | void *item, void *old) |
1157 | { | |
1158 | if (is_idr(root)) { | |
1159 | unsigned offset = get_slot_offset(node, slot); | |
1160 | bool free = node_tag_get(root, node, IDR_FREE, offset); | |
1161 | if (!free) | |
1162 | return 0; | |
1163 | if (!old) | |
1164 | return 1; | |
a90eb3a2 | 1165 | } |
0a835c4f | 1166 | return !!item - !!old; |
a90eb3a2 MW |
1167 | } |
1168 | ||
6d75f366 JW |
1169 | /** |
1170 | * __radix_tree_replace - replace item in a slot | |
4d693d08 JW |
1171 | * @root: radix tree root |
1172 | * @node: pointer to tree node | |
1173 | * @slot: pointer to slot in @node | |
1174 | * @item: new item to store in the slot. | |
1175 | * @update_node: callback for changing leaf nodes | |
6d75f366 JW |
1176 | * |
1177 | * For use with __radix_tree_lookup(). Caller must hold tree write locked | |
1178 | * across slot lookup and replacement. | |
1179 | */ | |
1180 | void __radix_tree_replace(struct radix_tree_root *root, | |
1181 | struct radix_tree_node *node, | |
d7b62727 | 1182 | void __rcu **slot, void *item, |
c7df8ad2 | 1183 | radix_tree_update_node_t update_node) |
6d75f366 | 1184 | { |
0a835c4f MW |
1185 | void *old = rcu_dereference_raw(*slot); |
1186 | int exceptional = !!radix_tree_exceptional_entry(item) - | |
1187 | !!radix_tree_exceptional_entry(old); | |
1188 | int count = calculate_count(root, node, slot, item, old); | |
1189 | ||
6d75f366 | 1190 | /* |
f4b109c6 JW |
1191 | * This function supports replacing exceptional entries and |
1192 | * deleting entries, but that needs accounting against the | |
1193 | * node unless the slot is root->rnode. | |
6d75f366 | 1194 | */ |
d7b62727 | 1195 | WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->rnode) && |
0a835c4f MW |
1196 | (count || exceptional)); |
1197 | replace_slot(slot, item, node, count, exceptional); | |
f4b109c6 | 1198 | |
4d693d08 JW |
1199 | if (!node) |
1200 | return; | |
1201 | ||
1202 | if (update_node) | |
c7df8ad2 | 1203 | update_node(node); |
4d693d08 | 1204 | |
c7df8ad2 | 1205 | delete_node(root, node, update_node); |
6d75f366 JW |
1206 | } |
1207 | ||
1208 | /** | |
1209 | * radix_tree_replace_slot - replace item in a slot | |
1210 | * @root: radix tree root | |
1211 | * @slot: pointer to slot | |
1212 | * @item: new item to store in the slot. | |
1213 | * | |
1214 | * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(), | |
1215 | * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked | |
1216 | * across slot lookup and replacement. | |
1217 | * | |
1218 | * NOTE: This cannot be used to switch between non-entries (empty slots), | |
1219 | * regular entries, and exceptional entries, as that requires accounting | |
f4b109c6 | 1220 | * inside the radix tree node. When switching from one type of entry or |
e157b555 MW |
1221 | * deleting, use __radix_tree_lookup() and __radix_tree_replace() or |
1222 | * radix_tree_iter_replace(). | |
6d75f366 JW |
1223 | */ |
1224 | void radix_tree_replace_slot(struct radix_tree_root *root, | |
d7b62727 | 1225 | void __rcu **slot, void *item) |
6d75f366 | 1226 | { |
c7df8ad2 | 1227 | __radix_tree_replace(root, NULL, slot, item, NULL); |
6d75f366 | 1228 | } |
10257d71 | 1229 | EXPORT_SYMBOL(radix_tree_replace_slot); |
6d75f366 | 1230 | |
e157b555 MW |
1231 | /** |
1232 | * radix_tree_iter_replace - replace item in a slot | |
1233 | * @root: radix tree root | |
1234 | * @slot: pointer to slot | |
1235 | * @item: new item to store in the slot. | |
1236 | * | |
1237 | * For use with radix_tree_split() and radix_tree_for_each_slot(). | |
1238 | * Caller must hold tree write locked across split and replacement. | |
1239 | */ | |
1240 | void radix_tree_iter_replace(struct radix_tree_root *root, | |
d7b62727 MW |
1241 | const struct radix_tree_iter *iter, |
1242 | void __rcu **slot, void *item) | |
e157b555 | 1243 | { |
c7df8ad2 | 1244 | __radix_tree_replace(root, iter->node, slot, item, NULL); |
e157b555 MW |
1245 | } |
1246 | ||
175542f5 MW |
1247 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
1248 | /** | |
1249 | * radix_tree_join - replace multiple entries with one multiorder entry | |
1250 | * @root: radix tree root | |
1251 | * @index: an index inside the new entry | |
1252 | * @order: order of the new entry | |
1253 | * @item: new entry | |
1254 | * | |
1255 | * Call this function to replace several entries with one larger entry. | |
1256 | * The existing entries are presumed to not need freeing as a result of | |
1257 | * this call. | |
1258 | * | |
1259 | * The replacement entry will have all the tags set on it that were set | |
1260 | * on any of the entries it is replacing. | |
1261 | */ | |
1262 | int radix_tree_join(struct radix_tree_root *root, unsigned long index, | |
1263 | unsigned order, void *item) | |
1264 | { | |
1265 | struct radix_tree_node *node; | |
d7b62727 | 1266 | void __rcu **slot; |
175542f5 MW |
1267 | int error; |
1268 | ||
1269 | BUG_ON(radix_tree_is_internal_node(item)); | |
1270 | ||
1271 | error = __radix_tree_create(root, index, order, &node, &slot); | |
1272 | if (!error) | |
1273 | error = insert_entries(node, slot, item, order, true); | |
1274 | if (error > 0) | |
1275 | error = 0; | |
1276 | ||
1277 | return error; | |
1278 | } | |
e157b555 MW |
1279 | |
1280 | /** | |
1281 | * radix_tree_split - Split an entry into smaller entries | |
1282 | * @root: radix tree root | |
1283 | * @index: An index within the large entry | |
1284 | * @order: Order of new entries | |
1285 | * | |
1286 | * Call this function as the first step in replacing a multiorder entry | |
1287 | * with several entries of lower order. After this function returns, | |
1288 | * loop over the relevant portion of the tree using radix_tree_for_each_slot() | |
1289 | * and call radix_tree_iter_replace() to set up each new entry. | |
1290 | * | |
1291 | * The tags from this entry are replicated to all the new entries. | |
1292 | * | |
1293 | * The radix tree should be locked against modification during the entire | |
1294 | * replacement operation. Lock-free lookups will see RADIX_TREE_RETRY which | |
1295 | * should prompt RCU walkers to restart the lookup from the root. | |
1296 | */ | |
1297 | int radix_tree_split(struct radix_tree_root *root, unsigned long index, | |
1298 | unsigned order) | |
1299 | { | |
1300 | struct radix_tree_node *parent, *node, *child; | |
d7b62727 | 1301 | void __rcu **slot; |
e157b555 MW |
1302 | unsigned int offset, end; |
1303 | unsigned n, tag, tags = 0; | |
0a835c4f | 1304 | gfp_t gfp = root_gfp_mask(root); |
e157b555 MW |
1305 | |
1306 | if (!__radix_tree_lookup(root, index, &parent, &slot)) | |
1307 | return -ENOENT; | |
1308 | if (!parent) | |
1309 | return -ENOENT; | |
1310 | ||
1311 | offset = get_slot_offset(parent, slot); | |
1312 | ||
1313 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1314 | if (tag_get(parent, tag, offset)) | |
1315 | tags |= 1 << tag; | |
1316 | ||
1317 | for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) { | |
12320d0f MW |
1318 | if (!is_sibling_entry(parent, |
1319 | rcu_dereference_raw(parent->slots[end]))) | |
e157b555 MW |
1320 | break; |
1321 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1322 | if (tags & (1 << tag)) | |
1323 | tag_set(parent, tag, end); | |
1324 | /* rcu_assign_pointer ensures tags are set before RETRY */ | |
1325 | rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY); | |
1326 | } | |
1327 | rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY); | |
1328 | parent->exceptional -= (end - offset); | |
1329 | ||
1330 | if (order == parent->shift) | |
1331 | return 0; | |
1332 | if (order > parent->shift) { | |
1333 | while (offset < end) | |
1334 | offset += insert_entries(parent, &parent->slots[offset], | |
1335 | RADIX_TREE_RETRY, order, true); | |
1336 | return 0; | |
1337 | } | |
1338 | ||
1339 | node = parent; | |
1340 | ||
1341 | for (;;) { | |
1342 | if (node->shift > order) { | |
d58275bc | 1343 | child = radix_tree_node_alloc(gfp, node, root, |
e8de4340 MW |
1344 | node->shift - RADIX_TREE_MAP_SHIFT, |
1345 | offset, 0, 0); | |
e157b555 MW |
1346 | if (!child) |
1347 | goto nomem; | |
e157b555 MW |
1348 | if (node != parent) { |
1349 | node->count++; | |
12320d0f MW |
1350 | rcu_assign_pointer(node->slots[offset], |
1351 | node_to_entry(child)); | |
e157b555 MW |
1352 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
1353 | if (tags & (1 << tag)) | |
1354 | tag_set(node, tag, offset); | |
1355 | } | |
1356 | ||
1357 | node = child; | |
1358 | offset = 0; | |
1359 | continue; | |
1360 | } | |
1361 | ||
1362 | n = insert_entries(node, &node->slots[offset], | |
1363 | RADIX_TREE_RETRY, order, false); | |
1364 | BUG_ON(n > RADIX_TREE_MAP_SIZE); | |
1365 | ||
1366 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1367 | if (tags & (1 << tag)) | |
1368 | tag_set(node, tag, offset); | |
1369 | offset += n; | |
1370 | ||
1371 | while (offset == RADIX_TREE_MAP_SIZE) { | |
1372 | if (node == parent) | |
1373 | break; | |
1374 | offset = node->offset; | |
1375 | child = node; | |
1376 | node = node->parent; | |
1377 | rcu_assign_pointer(node->slots[offset], | |
1378 | node_to_entry(child)); | |
1379 | offset++; | |
1380 | } | |
1381 | if ((node == parent) && (offset == end)) | |
1382 | return 0; | |
1383 | } | |
1384 | ||
1385 | nomem: | |
1386 | /* Shouldn't happen; did user forget to preload? */ | |
1387 | /* TODO: free all the allocated nodes */ | |
1388 | WARN_ON(1); | |
1389 | return -ENOMEM; | |
1390 | } | |
175542f5 MW |
1391 | #endif |
1392 | ||
30b888ba MW |
1393 | static void node_tag_set(struct radix_tree_root *root, |
1394 | struct radix_tree_node *node, | |
1395 | unsigned int tag, unsigned int offset) | |
1396 | { | |
1397 | while (node) { | |
1398 | if (tag_get(node, tag, offset)) | |
1399 | return; | |
1400 | tag_set(node, tag, offset); | |
1401 | offset = node->offset; | |
1402 | node = node->parent; | |
1403 | } | |
1404 | ||
1405 | if (!root_tag_get(root, tag)) | |
1406 | root_tag_set(root, tag); | |
1407 | } | |
1408 | ||
1da177e4 LT |
1409 | /** |
1410 | * radix_tree_tag_set - set a tag on a radix tree node | |
1411 | * @root: radix tree root | |
1412 | * @index: index key | |
2fcd9005 | 1413 | * @tag: tag index |
1da177e4 | 1414 | * |
daff89f3 JC |
1415 | * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) |
1416 | * corresponding to @index in the radix tree. From | |
1da177e4 LT |
1417 | * the root all the way down to the leaf node. |
1418 | * | |
2fcd9005 | 1419 | * Returns the address of the tagged item. Setting a tag on a not-present |
1da177e4 LT |
1420 | * item is a bug. |
1421 | */ | |
1422 | void *radix_tree_tag_set(struct radix_tree_root *root, | |
daff89f3 | 1423 | unsigned long index, unsigned int tag) |
1da177e4 | 1424 | { |
fb969909 RZ |
1425 | struct radix_tree_node *node, *parent; |
1426 | unsigned long maxindex; | |
1da177e4 | 1427 | |
9e85d811 | 1428 | radix_tree_load_root(root, &node, &maxindex); |
fb969909 | 1429 | BUG_ON(index > maxindex); |
1da177e4 | 1430 | |
b194d16c | 1431 | while (radix_tree_is_internal_node(node)) { |
fb969909 | 1432 | unsigned offset; |
1da177e4 | 1433 | |
4dd6c098 | 1434 | parent = entry_to_node(node); |
9e85d811 | 1435 | offset = radix_tree_descend(parent, &node, index); |
fb969909 RZ |
1436 | BUG_ON(!node); |
1437 | ||
1438 | if (!tag_get(parent, tag, offset)) | |
1439 | tag_set(parent, tag, offset); | |
1da177e4 LT |
1440 | } |
1441 | ||
612d6c19 | 1442 | /* set the root's tag bit */ |
fb969909 | 1443 | if (!root_tag_get(root, tag)) |
612d6c19 NP |
1444 | root_tag_set(root, tag); |
1445 | ||
fb969909 | 1446 | return node; |
1da177e4 LT |
1447 | } |
1448 | EXPORT_SYMBOL(radix_tree_tag_set); | |
1449 | ||
30b888ba MW |
1450 | /** |
1451 | * radix_tree_iter_tag_set - set a tag on the current iterator entry | |
1452 | * @root: radix tree root | |
1453 | * @iter: iterator state | |
1454 | * @tag: tag to set | |
1455 | */ | |
1456 | void radix_tree_iter_tag_set(struct radix_tree_root *root, | |
1457 | const struct radix_tree_iter *iter, unsigned int tag) | |
1458 | { | |
1459 | node_tag_set(root, iter->node, tag, iter_offset(iter)); | |
1460 | } | |
1461 | ||
d604c324 MW |
1462 | static void node_tag_clear(struct radix_tree_root *root, |
1463 | struct radix_tree_node *node, | |
1464 | unsigned int tag, unsigned int offset) | |
1465 | { | |
1466 | while (node) { | |
1467 | if (!tag_get(node, tag, offset)) | |
1468 | return; | |
1469 | tag_clear(node, tag, offset); | |
1470 | if (any_tag_set(node, tag)) | |
1471 | return; | |
1472 | ||
1473 | offset = node->offset; | |
1474 | node = node->parent; | |
1475 | } | |
1476 | ||
1477 | /* clear the root's tag bit */ | |
1478 | if (root_tag_get(root, tag)) | |
1479 | root_tag_clear(root, tag); | |
1480 | } | |
1481 | ||
1da177e4 LT |
1482 | /** |
1483 | * radix_tree_tag_clear - clear a tag on a radix tree node | |
1484 | * @root: radix tree root | |
1485 | * @index: index key | |
2fcd9005 | 1486 | * @tag: tag index |
1da177e4 | 1487 | * |
daff89f3 | 1488 | * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) |
2fcd9005 MW |
1489 | * corresponding to @index in the radix tree. If this causes |
1490 | * the leaf node to have no tags set then clear the tag in the | |
1da177e4 LT |
1491 | * next-to-leaf node, etc. |
1492 | * | |
1493 | * Returns the address of the tagged item on success, else NULL. ie: | |
1494 | * has the same return value and semantics as radix_tree_lookup(). | |
1495 | */ | |
1496 | void *radix_tree_tag_clear(struct radix_tree_root *root, | |
daff89f3 | 1497 | unsigned long index, unsigned int tag) |
1da177e4 | 1498 | { |
00f47b58 RZ |
1499 | struct radix_tree_node *node, *parent; |
1500 | unsigned long maxindex; | |
e2bdb933 | 1501 | int uninitialized_var(offset); |
1da177e4 | 1502 | |
9e85d811 | 1503 | radix_tree_load_root(root, &node, &maxindex); |
00f47b58 RZ |
1504 | if (index > maxindex) |
1505 | return NULL; | |
1da177e4 | 1506 | |
00f47b58 | 1507 | parent = NULL; |
1da177e4 | 1508 | |
b194d16c | 1509 | while (radix_tree_is_internal_node(node)) { |
4dd6c098 | 1510 | parent = entry_to_node(node); |
9e85d811 | 1511 | offset = radix_tree_descend(parent, &node, index); |
1da177e4 LT |
1512 | } |
1513 | ||
d604c324 MW |
1514 | if (node) |
1515 | node_tag_clear(root, parent, tag, offset); | |
1da177e4 | 1516 | |
00f47b58 | 1517 | return node; |
1da177e4 LT |
1518 | } |
1519 | EXPORT_SYMBOL(radix_tree_tag_clear); | |
1520 | ||
30b888ba MW |
1521 | /** |
1522 | * radix_tree_iter_tag_clear - clear a tag on the current iterator entry | |
1523 | * @root: radix tree root | |
1524 | * @iter: iterator state | |
1525 | * @tag: tag to clear | |
1526 | */ | |
1527 | void radix_tree_iter_tag_clear(struct radix_tree_root *root, | |
1528 | const struct radix_tree_iter *iter, unsigned int tag) | |
1529 | { | |
1530 | node_tag_clear(root, iter->node, tag, iter_offset(iter)); | |
1531 | } | |
1532 | ||
1da177e4 | 1533 | /** |
32605a18 MT |
1534 | * radix_tree_tag_get - get a tag on a radix tree node |
1535 | * @root: radix tree root | |
1536 | * @index: index key | |
2fcd9005 | 1537 | * @tag: tag index (< RADIX_TREE_MAX_TAGS) |
1da177e4 | 1538 | * |
32605a18 | 1539 | * Return values: |
1da177e4 | 1540 | * |
612d6c19 NP |
1541 | * 0: tag not present or not set |
1542 | * 1: tag set | |
ce82653d DH |
1543 | * |
1544 | * Note that the return value of this function may not be relied on, even if | |
1545 | * the RCU lock is held, unless tag modification and node deletion are excluded | |
1546 | * from concurrency. | |
1da177e4 | 1547 | */ |
35534c86 | 1548 | int radix_tree_tag_get(const struct radix_tree_root *root, |
daff89f3 | 1549 | unsigned long index, unsigned int tag) |
1da177e4 | 1550 | { |
4589ba6d RZ |
1551 | struct radix_tree_node *node, *parent; |
1552 | unsigned long maxindex; | |
1da177e4 | 1553 | |
612d6c19 NP |
1554 | if (!root_tag_get(root, tag)) |
1555 | return 0; | |
1556 | ||
9e85d811 | 1557 | radix_tree_load_root(root, &node, &maxindex); |
4589ba6d RZ |
1558 | if (index > maxindex) |
1559 | return 0; | |
7cf9c2c7 | 1560 | |
b194d16c | 1561 | while (radix_tree_is_internal_node(node)) { |
9e85d811 | 1562 | unsigned offset; |
1da177e4 | 1563 | |
4dd6c098 | 1564 | parent = entry_to_node(node); |
9e85d811 | 1565 | offset = radix_tree_descend(parent, &node, index); |
1da177e4 | 1566 | |
4589ba6d | 1567 | if (!tag_get(parent, tag, offset)) |
3fa36acb | 1568 | return 0; |
4589ba6d RZ |
1569 | if (node == RADIX_TREE_RETRY) |
1570 | break; | |
1da177e4 | 1571 | } |
4589ba6d RZ |
1572 | |
1573 | return 1; | |
1da177e4 LT |
1574 | } |
1575 | EXPORT_SYMBOL(radix_tree_tag_get); | |
1da177e4 | 1576 | |
21ef5339 RZ |
1577 | static inline void __set_iter_shift(struct radix_tree_iter *iter, |
1578 | unsigned int shift) | |
1579 | { | |
1580 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
1581 | iter->shift = shift; | |
1582 | #endif | |
1583 | } | |
1584 | ||
148deab2 MW |
1585 | /* Construct iter->tags bit-mask from node->tags[tag] array */ |
1586 | static void set_iter_tags(struct radix_tree_iter *iter, | |
1587 | struct radix_tree_node *node, unsigned offset, | |
1588 | unsigned tag) | |
1589 | { | |
1590 | unsigned tag_long = offset / BITS_PER_LONG; | |
1591 | unsigned tag_bit = offset % BITS_PER_LONG; | |
1592 | ||
0a835c4f MW |
1593 | if (!node) { |
1594 | iter->tags = 1; | |
1595 | return; | |
1596 | } | |
1597 | ||
148deab2 MW |
1598 | iter->tags = node->tags[tag][tag_long] >> tag_bit; |
1599 | ||
1600 | /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ | |
1601 | if (tag_long < RADIX_TREE_TAG_LONGS - 1) { | |
1602 | /* Pick tags from next element */ | |
1603 | if (tag_bit) | |
1604 | iter->tags |= node->tags[tag][tag_long + 1] << | |
1605 | (BITS_PER_LONG - tag_bit); | |
1606 | /* Clip chunk size, here only BITS_PER_LONG tags */ | |
1607 | iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG); | |
1608 | } | |
1609 | } | |
1610 | ||
1611 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
d7b62727 MW |
1612 | static void __rcu **skip_siblings(struct radix_tree_node **nodep, |
1613 | void __rcu **slot, struct radix_tree_iter *iter) | |
148deab2 MW |
1614 | { |
1615 | void *sib = node_to_entry(slot - 1); | |
1616 | ||
1617 | while (iter->index < iter->next_index) { | |
1618 | *nodep = rcu_dereference_raw(*slot); | |
1619 | if (*nodep && *nodep != sib) | |
1620 | return slot; | |
1621 | slot++; | |
1622 | iter->index = __radix_tree_iter_add(iter, 1); | |
1623 | iter->tags >>= 1; | |
1624 | } | |
1625 | ||
1626 | *nodep = NULL; | |
1627 | return NULL; | |
1628 | } | |
1629 | ||
d7b62727 MW |
1630 | void __rcu **__radix_tree_next_slot(void __rcu **slot, |
1631 | struct radix_tree_iter *iter, unsigned flags) | |
148deab2 MW |
1632 | { |
1633 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; | |
1634 | struct radix_tree_node *node = rcu_dereference_raw(*slot); | |
1635 | ||
1636 | slot = skip_siblings(&node, slot, iter); | |
1637 | ||
1638 | while (radix_tree_is_internal_node(node)) { | |
1639 | unsigned offset; | |
1640 | unsigned long next_index; | |
1641 | ||
1642 | if (node == RADIX_TREE_RETRY) | |
1643 | return slot; | |
1644 | node = entry_to_node(node); | |
268f42de | 1645 | iter->node = node; |
148deab2 MW |
1646 | iter->shift = node->shift; |
1647 | ||
1648 | if (flags & RADIX_TREE_ITER_TAGGED) { | |
1649 | offset = radix_tree_find_next_bit(node, tag, 0); | |
1650 | if (offset == RADIX_TREE_MAP_SIZE) | |
1651 | return NULL; | |
1652 | slot = &node->slots[offset]; | |
1653 | iter->index = __radix_tree_iter_add(iter, offset); | |
1654 | set_iter_tags(iter, node, offset, tag); | |
1655 | node = rcu_dereference_raw(*slot); | |
1656 | } else { | |
1657 | offset = 0; | |
1658 | slot = &node->slots[0]; | |
1659 | for (;;) { | |
1660 | node = rcu_dereference_raw(*slot); | |
1661 | if (node) | |
1662 | break; | |
1663 | slot++; | |
1664 | offset++; | |
1665 | if (offset == RADIX_TREE_MAP_SIZE) | |
1666 | return NULL; | |
1667 | } | |
1668 | iter->index = __radix_tree_iter_add(iter, offset); | |
1669 | } | |
1670 | if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0)) | |
1671 | goto none; | |
1672 | next_index = (iter->index | shift_maxindex(iter->shift)) + 1; | |
1673 | if (next_index < iter->next_index) | |
1674 | iter->next_index = next_index; | |
1675 | } | |
1676 | ||
1677 | return slot; | |
1678 | none: | |
1679 | iter->next_index = 0; | |
1680 | return NULL; | |
1681 | } | |
1682 | EXPORT_SYMBOL(__radix_tree_next_slot); | |
1683 | #else | |
d7b62727 MW |
1684 | static void __rcu **skip_siblings(struct radix_tree_node **nodep, |
1685 | void __rcu **slot, struct radix_tree_iter *iter) | |
148deab2 MW |
1686 | { |
1687 | return slot; | |
1688 | } | |
1689 | #endif | |
1690 | ||
d7b62727 MW |
1691 | void __rcu **radix_tree_iter_resume(void __rcu **slot, |
1692 | struct radix_tree_iter *iter) | |
148deab2 MW |
1693 | { |
1694 | struct radix_tree_node *node; | |
1695 | ||
1696 | slot++; | |
1697 | iter->index = __radix_tree_iter_add(iter, 1); | |
148deab2 MW |
1698 | skip_siblings(&node, slot, iter); |
1699 | iter->next_index = iter->index; | |
1700 | iter->tags = 0; | |
1701 | return NULL; | |
1702 | } | |
1703 | EXPORT_SYMBOL(radix_tree_iter_resume); | |
1704 | ||
78c1d784 KK |
1705 | /** |
1706 | * radix_tree_next_chunk - find next chunk of slots for iteration | |
1707 | * | |
1708 | * @root: radix tree root | |
1709 | * @iter: iterator state | |
1710 | * @flags: RADIX_TREE_ITER_* flags and tag index | |
1711 | * Returns: pointer to chunk first slot, or NULL if iteration is over | |
1712 | */ | |
d7b62727 | 1713 | void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, |
78c1d784 KK |
1714 | struct radix_tree_iter *iter, unsigned flags) |
1715 | { | |
9e85d811 | 1716 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; |
8c1244de | 1717 | struct radix_tree_node *node, *child; |
21ef5339 | 1718 | unsigned long index, offset, maxindex; |
78c1d784 KK |
1719 | |
1720 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) | |
1721 | return NULL; | |
1722 | ||
1723 | /* | |
1724 | * Catch next_index overflow after ~0UL. iter->index never overflows | |
1725 | * during iterating; it can be zero only at the beginning. | |
1726 | * And we cannot overflow iter->next_index in a single step, | |
1727 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. | |
fffaee36 KK |
1728 | * |
1729 | * This condition also used by radix_tree_next_slot() to stop | |
91b9677c | 1730 | * contiguous iterating, and forbid switching to the next chunk. |
78c1d784 KK |
1731 | */ |
1732 | index = iter->next_index; | |
1733 | if (!index && iter->index) | |
1734 | return NULL; | |
1735 | ||
21ef5339 | 1736 | restart: |
9e85d811 | 1737 | radix_tree_load_root(root, &child, &maxindex); |
21ef5339 RZ |
1738 | if (index > maxindex) |
1739 | return NULL; | |
8c1244de MW |
1740 | if (!child) |
1741 | return NULL; | |
21ef5339 | 1742 | |
8c1244de | 1743 | if (!radix_tree_is_internal_node(child)) { |
78c1d784 | 1744 | /* Single-slot tree */ |
21ef5339 RZ |
1745 | iter->index = index; |
1746 | iter->next_index = maxindex + 1; | |
78c1d784 | 1747 | iter->tags = 1; |
268f42de | 1748 | iter->node = NULL; |
8c1244de | 1749 | __set_iter_shift(iter, 0); |
d7b62727 | 1750 | return (void __rcu **)&root->rnode; |
8c1244de | 1751 | } |
21ef5339 | 1752 | |
8c1244de MW |
1753 | do { |
1754 | node = entry_to_node(child); | |
9e85d811 | 1755 | offset = radix_tree_descend(node, &child, index); |
21ef5339 | 1756 | |
78c1d784 | 1757 | if ((flags & RADIX_TREE_ITER_TAGGED) ? |
8c1244de | 1758 | !tag_get(node, tag, offset) : !child) { |
78c1d784 KK |
1759 | /* Hole detected */ |
1760 | if (flags & RADIX_TREE_ITER_CONTIG) | |
1761 | return NULL; | |
1762 | ||
1763 | if (flags & RADIX_TREE_ITER_TAGGED) | |
bc412fca | 1764 | offset = radix_tree_find_next_bit(node, tag, |
78c1d784 KK |
1765 | offset + 1); |
1766 | else | |
1767 | while (++offset < RADIX_TREE_MAP_SIZE) { | |
12320d0f MW |
1768 | void *slot = rcu_dereference_raw( |
1769 | node->slots[offset]); | |
21ef5339 RZ |
1770 | if (is_sibling_entry(node, slot)) |
1771 | continue; | |
1772 | if (slot) | |
78c1d784 KK |
1773 | break; |
1774 | } | |
8c1244de | 1775 | index &= ~node_maxindex(node); |
9e85d811 | 1776 | index += offset << node->shift; |
78c1d784 KK |
1777 | /* Overflow after ~0UL */ |
1778 | if (!index) | |
1779 | return NULL; | |
1780 | if (offset == RADIX_TREE_MAP_SIZE) | |
1781 | goto restart; | |
8c1244de | 1782 | child = rcu_dereference_raw(node->slots[offset]); |
78c1d784 KK |
1783 | } |
1784 | ||
e157b555 | 1785 | if (!child) |
78c1d784 | 1786 | goto restart; |
e157b555 MW |
1787 | if (child == RADIX_TREE_RETRY) |
1788 | break; | |
8c1244de | 1789 | } while (radix_tree_is_internal_node(child)); |
78c1d784 KK |
1790 | |
1791 | /* Update the iterator state */ | |
8c1244de MW |
1792 | iter->index = (index &~ node_maxindex(node)) | (offset << node->shift); |
1793 | iter->next_index = (index | node_maxindex(node)) + 1; | |
268f42de | 1794 | iter->node = node; |
9e85d811 | 1795 | __set_iter_shift(iter, node->shift); |
78c1d784 | 1796 | |
148deab2 MW |
1797 | if (flags & RADIX_TREE_ITER_TAGGED) |
1798 | set_iter_tags(iter, node, offset, tag); | |
78c1d784 KK |
1799 | |
1800 | return node->slots + offset; | |
1801 | } | |
1802 | EXPORT_SYMBOL(radix_tree_next_chunk); | |
1803 | ||
1da177e4 LT |
1804 | /** |
1805 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree | |
1806 | * @root: radix tree root | |
1807 | * @results: where the results of the lookup are placed | |
1808 | * @first_index: start the lookup from this key | |
1809 | * @max_items: place up to this many items at *results | |
1810 | * | |
1811 | * Performs an index-ascending scan of the tree for present items. Places | |
1812 | * them at *@results and returns the number of items which were placed at | |
1813 | * *@results. | |
1814 | * | |
1815 | * The implementation is naive. | |
7cf9c2c7 NP |
1816 | * |
1817 | * Like radix_tree_lookup, radix_tree_gang_lookup may be called under | |
1818 | * rcu_read_lock. In this case, rather than the returned results being | |
2fcd9005 MW |
1819 | * an atomic snapshot of the tree at a single point in time, the |
1820 | * semantics of an RCU protected gang lookup are as though multiple | |
1821 | * radix_tree_lookups have been issued in individual locks, and results | |
1822 | * stored in 'results'. | |
1da177e4 LT |
1823 | */ |
1824 | unsigned int | |
35534c86 | 1825 | radix_tree_gang_lookup(const struct radix_tree_root *root, void **results, |
1da177e4 LT |
1826 | unsigned long first_index, unsigned int max_items) |
1827 | { | |
cebbd29e | 1828 | struct radix_tree_iter iter; |
d7b62727 | 1829 | void __rcu **slot; |
cebbd29e | 1830 | unsigned int ret = 0; |
7cf9c2c7 | 1831 | |
cebbd29e | 1832 | if (unlikely(!max_items)) |
7cf9c2c7 | 1833 | return 0; |
1da177e4 | 1834 | |
cebbd29e | 1835 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
46437f9a | 1836 | results[ret] = rcu_dereference_raw(*slot); |
cebbd29e KK |
1837 | if (!results[ret]) |
1838 | continue; | |
b194d16c | 1839 | if (radix_tree_is_internal_node(results[ret])) { |
46437f9a MW |
1840 | slot = radix_tree_iter_retry(&iter); |
1841 | continue; | |
1842 | } | |
cebbd29e | 1843 | if (++ret == max_items) |
1da177e4 | 1844 | break; |
1da177e4 | 1845 | } |
7cf9c2c7 | 1846 | |
1da177e4 LT |
1847 | return ret; |
1848 | } | |
1849 | EXPORT_SYMBOL(radix_tree_gang_lookup); | |
1850 | ||
47feff2c NP |
1851 | /** |
1852 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree | |
1853 | * @root: radix tree root | |
1854 | * @results: where the results of the lookup are placed | |
6328650b | 1855 | * @indices: where their indices should be placed (but usually NULL) |
47feff2c NP |
1856 | * @first_index: start the lookup from this key |
1857 | * @max_items: place up to this many items at *results | |
1858 | * | |
1859 | * Performs an index-ascending scan of the tree for present items. Places | |
1860 | * their slots at *@results and returns the number of items which were | |
1861 | * placed at *@results. | |
1862 | * | |
1863 | * The implementation is naive. | |
1864 | * | |
1865 | * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must | |
1866 | * be dereferenced with radix_tree_deref_slot, and if using only RCU | |
1867 | * protection, radix_tree_deref_slot may fail requiring a retry. | |
1868 | */ | |
1869 | unsigned int | |
35534c86 | 1870 | radix_tree_gang_lookup_slot(const struct radix_tree_root *root, |
d7b62727 | 1871 | void __rcu ***results, unsigned long *indices, |
47feff2c NP |
1872 | unsigned long first_index, unsigned int max_items) |
1873 | { | |
cebbd29e | 1874 | struct radix_tree_iter iter; |
d7b62727 | 1875 | void __rcu **slot; |
cebbd29e | 1876 | unsigned int ret = 0; |
47feff2c | 1877 | |
cebbd29e | 1878 | if (unlikely(!max_items)) |
47feff2c NP |
1879 | return 0; |
1880 | ||
cebbd29e KK |
1881 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
1882 | results[ret] = slot; | |
6328650b | 1883 | if (indices) |
cebbd29e KK |
1884 | indices[ret] = iter.index; |
1885 | if (++ret == max_items) | |
47feff2c | 1886 | break; |
47feff2c NP |
1887 | } |
1888 | ||
1889 | return ret; | |
1890 | } | |
1891 | EXPORT_SYMBOL(radix_tree_gang_lookup_slot); | |
1892 | ||
1da177e4 LT |
1893 | /** |
1894 | * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree | |
1895 | * based on a tag | |
1896 | * @root: radix tree root | |
1897 | * @results: where the results of the lookup are placed | |
1898 | * @first_index: start the lookup from this key | |
1899 | * @max_items: place up to this many items at *results | |
daff89f3 | 1900 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) |
1da177e4 LT |
1901 | * |
1902 | * Performs an index-ascending scan of the tree for present items which | |
1903 | * have the tag indexed by @tag set. Places the items at *@results and | |
1904 | * returns the number of items which were placed at *@results. | |
1905 | */ | |
1906 | unsigned int | |
35534c86 | 1907 | radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results, |
daff89f3 JC |
1908 | unsigned long first_index, unsigned int max_items, |
1909 | unsigned int tag) | |
1da177e4 | 1910 | { |
cebbd29e | 1911 | struct radix_tree_iter iter; |
d7b62727 | 1912 | void __rcu **slot; |
cebbd29e | 1913 | unsigned int ret = 0; |
612d6c19 | 1914 | |
cebbd29e | 1915 | if (unlikely(!max_items)) |
7cf9c2c7 NP |
1916 | return 0; |
1917 | ||
cebbd29e | 1918 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
46437f9a | 1919 | results[ret] = rcu_dereference_raw(*slot); |
cebbd29e KK |
1920 | if (!results[ret]) |
1921 | continue; | |
b194d16c | 1922 | if (radix_tree_is_internal_node(results[ret])) { |
46437f9a MW |
1923 | slot = radix_tree_iter_retry(&iter); |
1924 | continue; | |
1925 | } | |
cebbd29e | 1926 | if (++ret == max_items) |
1da177e4 | 1927 | break; |
1da177e4 | 1928 | } |
7cf9c2c7 | 1929 | |
1da177e4 LT |
1930 | return ret; |
1931 | } | |
1932 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); | |
1933 | ||
47feff2c NP |
1934 | /** |
1935 | * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a | |
1936 | * radix tree based on a tag | |
1937 | * @root: radix tree root | |
1938 | * @results: where the results of the lookup are placed | |
1939 | * @first_index: start the lookup from this key | |
1940 | * @max_items: place up to this many items at *results | |
1941 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) | |
1942 | * | |
1943 | * Performs an index-ascending scan of the tree for present items which | |
1944 | * have the tag indexed by @tag set. Places the slots at *@results and | |
1945 | * returns the number of slots which were placed at *@results. | |
1946 | */ | |
1947 | unsigned int | |
35534c86 | 1948 | radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root, |
d7b62727 | 1949 | void __rcu ***results, unsigned long first_index, |
35534c86 | 1950 | unsigned int max_items, unsigned int tag) |
47feff2c | 1951 | { |
cebbd29e | 1952 | struct radix_tree_iter iter; |
d7b62727 | 1953 | void __rcu **slot; |
cebbd29e | 1954 | unsigned int ret = 0; |
47feff2c | 1955 | |
cebbd29e | 1956 | if (unlikely(!max_items)) |
47feff2c NP |
1957 | return 0; |
1958 | ||
cebbd29e KK |
1959 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
1960 | results[ret] = slot; | |
1961 | if (++ret == max_items) | |
47feff2c | 1962 | break; |
47feff2c NP |
1963 | } |
1964 | ||
1965 | return ret; | |
1966 | } | |
1967 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | |
1968 | ||
139e5616 JW |
1969 | /** |
1970 | * __radix_tree_delete_node - try to free node after clearing a slot | |
1971 | * @root: radix tree root | |
139e5616 | 1972 | * @node: node containing @index |
ea07b862 | 1973 | * @update_node: callback for changing leaf nodes |
139e5616 JW |
1974 | * |
1975 | * After clearing the slot at @index in @node from radix tree | |
1976 | * rooted at @root, call this function to attempt freeing the | |
1977 | * node and shrinking the tree. | |
139e5616 | 1978 | */ |
14b46879 | 1979 | void __radix_tree_delete_node(struct radix_tree_root *root, |
ea07b862 | 1980 | struct radix_tree_node *node, |
c7df8ad2 | 1981 | radix_tree_update_node_t update_node) |
139e5616 | 1982 | { |
c7df8ad2 | 1983 | delete_node(root, node, update_node); |
139e5616 JW |
1984 | } |
1985 | ||
0ac398ef | 1986 | static bool __radix_tree_delete(struct radix_tree_root *root, |
d7b62727 | 1987 | struct radix_tree_node *node, void __rcu **slot) |
0ac398ef | 1988 | { |
0a835c4f MW |
1989 | void *old = rcu_dereference_raw(*slot); |
1990 | int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0; | |
0ac398ef MW |
1991 | unsigned offset = get_slot_offset(node, slot); |
1992 | int tag; | |
1993 | ||
0a835c4f MW |
1994 | if (is_idr(root)) |
1995 | node_tag_set(root, node, IDR_FREE, offset); | |
1996 | else | |
1997 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1998 | node_tag_clear(root, node, tag, offset); | |
0ac398ef | 1999 | |
0a835c4f | 2000 | replace_slot(slot, NULL, node, -1, exceptional); |
c7df8ad2 | 2001 | return node && delete_node(root, node, NULL); |
0ac398ef MW |
2002 | } |
2003 | ||
1da177e4 | 2004 | /** |
0ac398ef MW |
2005 | * radix_tree_iter_delete - delete the entry at this iterator position |
2006 | * @root: radix tree root | |
2007 | * @iter: iterator state | |
2008 | * @slot: pointer to slot | |
1da177e4 | 2009 | * |
0ac398ef MW |
2010 | * Delete the entry at the position currently pointed to by the iterator. |
2011 | * This may result in the current node being freed; if it is, the iterator | |
2012 | * is advanced so that it will not reference the freed memory. This | |
2013 | * function may be called without any locking if there are no other threads | |
2014 | * which can access this tree. | |
2015 | */ | |
2016 | void radix_tree_iter_delete(struct radix_tree_root *root, | |
d7b62727 | 2017 | struct radix_tree_iter *iter, void __rcu **slot) |
0ac398ef MW |
2018 | { |
2019 | if (__radix_tree_delete(root, iter->node, slot)) | |
2020 | iter->index = iter->next_index; | |
2021 | } | |
d1b48c1e | 2022 | EXPORT_SYMBOL(radix_tree_iter_delete); |
0ac398ef MW |
2023 | |
2024 | /** | |
2025 | * radix_tree_delete_item - delete an item from a radix tree | |
2026 | * @root: radix tree root | |
2027 | * @index: index key | |
2028 | * @item: expected item | |
1da177e4 | 2029 | * |
0ac398ef | 2030 | * Remove @item at @index from the radix tree rooted at @root. |
1da177e4 | 2031 | * |
0ac398ef MW |
2032 | * Return: the deleted entry, or %NULL if it was not present |
2033 | * or the entry at the given @index was not @item. | |
1da177e4 | 2034 | */ |
53c59f26 JW |
2035 | void *radix_tree_delete_item(struct radix_tree_root *root, |
2036 | unsigned long index, void *item) | |
1da177e4 | 2037 | { |
0a835c4f | 2038 | struct radix_tree_node *node = NULL; |
d7b62727 | 2039 | void __rcu **slot; |
139e5616 | 2040 | void *entry; |
1da177e4 | 2041 | |
139e5616 | 2042 | entry = __radix_tree_lookup(root, index, &node, &slot); |
0a835c4f MW |
2043 | if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, |
2044 | get_slot_offset(node, slot)))) | |
139e5616 | 2045 | return NULL; |
1da177e4 | 2046 | |
139e5616 JW |
2047 | if (item && entry != item) |
2048 | return NULL; | |
2049 | ||
0ac398ef | 2050 | __radix_tree_delete(root, node, slot); |
612d6c19 | 2051 | |
139e5616 | 2052 | return entry; |
1da177e4 | 2053 | } |
53c59f26 JW |
2054 | EXPORT_SYMBOL(radix_tree_delete_item); |
2055 | ||
2056 | /** | |
0ac398ef MW |
2057 | * radix_tree_delete - delete an entry from a radix tree |
2058 | * @root: radix tree root | |
2059 | * @index: index key | |
53c59f26 | 2060 | * |
0ac398ef | 2061 | * Remove the entry at @index from the radix tree rooted at @root. |
53c59f26 | 2062 | * |
0ac398ef | 2063 | * Return: The deleted entry, or %NULL if it was not present. |
53c59f26 JW |
2064 | */ |
2065 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |
2066 | { | |
2067 | return radix_tree_delete_item(root, index, NULL); | |
2068 | } | |
1da177e4 LT |
2069 | EXPORT_SYMBOL(radix_tree_delete); |
2070 | ||
d3798ae8 JW |
2071 | void radix_tree_clear_tags(struct radix_tree_root *root, |
2072 | struct radix_tree_node *node, | |
d7b62727 | 2073 | void __rcu **slot) |
d604c324 | 2074 | { |
d604c324 MW |
2075 | if (node) { |
2076 | unsigned int tag, offset = get_slot_offset(node, slot); | |
2077 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
2078 | node_tag_clear(root, node, tag, offset); | |
2079 | } else { | |
0a835c4f | 2080 | root_tag_clear_all(root); |
d604c324 | 2081 | } |
d604c324 MW |
2082 | } |
2083 | ||
1da177e4 LT |
2084 | /** |
2085 | * radix_tree_tagged - test whether any items in the tree are tagged | |
2086 | * @root: radix tree root | |
2087 | * @tag: tag to test | |
2088 | */ | |
35534c86 | 2089 | int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag) |
1da177e4 | 2090 | { |
612d6c19 | 2091 | return root_tag_get(root, tag); |
1da177e4 LT |
2092 | } |
2093 | EXPORT_SYMBOL(radix_tree_tagged); | |
2094 | ||
0a835c4f MW |
2095 | /** |
2096 | * idr_preload - preload for idr_alloc() | |
2097 | * @gfp_mask: allocation mask to use for preloading | |
2098 | * | |
2099 | * Preallocate memory to use for the next call to idr_alloc(). This function | |
2100 | * returns with preemption disabled. It will be enabled by idr_preload_end(). | |
2101 | */ | |
2102 | void idr_preload(gfp_t gfp_mask) | |
2103 | { | |
bc9ae224 ED |
2104 | if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) |
2105 | preempt_disable(); | |
0a835c4f MW |
2106 | } |
2107 | EXPORT_SYMBOL(idr_preload); | |
2108 | ||
7ad3d4d8 MW |
2109 | /** |
2110 | * ida_pre_get - reserve resources for ida allocation | |
2111 | * @ida: ida handle | |
2112 | * @gfp: memory allocation flags | |
2113 | * | |
2114 | * This function should be called before calling ida_get_new_above(). If it | |
2115 | * is unable to allocate memory, it will return %0. On success, it returns %1. | |
2116 | */ | |
2117 | int ida_pre_get(struct ida *ida, gfp_t gfp) | |
2118 | { | |
7ad3d4d8 MW |
2119 | /* |
2120 | * The IDA API has no preload_end() equivalent. Instead, | |
2121 | * ida_get_new() can return -EAGAIN, prompting the caller | |
2122 | * to return to the ida_pre_get() step. | |
2123 | */ | |
bc9ae224 ED |
2124 | if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE)) |
2125 | preempt_enable(); | |
7ad3d4d8 MW |
2126 | |
2127 | if (!this_cpu_read(ida_bitmap)) { | |
b1a8a7a7 | 2128 | struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp); |
7ad3d4d8 MW |
2129 | if (!bitmap) |
2130 | return 0; | |
4ecd9542 MW |
2131 | if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap)) |
2132 | kfree(bitmap); | |
7ad3d4d8 MW |
2133 | } |
2134 | ||
2135 | return 1; | |
2136 | } | |
2137 | EXPORT_SYMBOL(ida_pre_get); | |
2138 | ||
460488c5 | 2139 | void __rcu **idr_get_free(struct radix_tree_root *root, |
388f79fd CM |
2140 | struct radix_tree_iter *iter, gfp_t gfp, |
2141 | unsigned long max) | |
0a835c4f MW |
2142 | { |
2143 | struct radix_tree_node *node = NULL, *child; | |
d7b62727 | 2144 | void __rcu **slot = (void __rcu **)&root->rnode; |
0a835c4f | 2145 | unsigned long maxindex, start = iter->next_index; |
0a835c4f MW |
2146 | unsigned int shift, offset = 0; |
2147 | ||
2148 | grow: | |
2149 | shift = radix_tree_load_root(root, &child, &maxindex); | |
2150 | if (!radix_tree_tagged(root, IDR_FREE)) | |
2151 | start = max(start, maxindex + 1); | |
2152 | if (start > max) | |
2153 | return ERR_PTR(-ENOSPC); | |
2154 | ||
2155 | if (start > maxindex) { | |
2156 | int error = radix_tree_extend(root, gfp, start, shift); | |
2157 | if (error < 0) | |
2158 | return ERR_PTR(error); | |
2159 | shift = error; | |
2160 | child = rcu_dereference_raw(root->rnode); | |
2161 | } | |
2162 | ||
2163 | while (shift) { | |
2164 | shift -= RADIX_TREE_MAP_SHIFT; | |
2165 | if (child == NULL) { | |
2166 | /* Have to add a child node. */ | |
d58275bc MW |
2167 | child = radix_tree_node_alloc(gfp, node, root, shift, |
2168 | offset, 0, 0); | |
0a835c4f MW |
2169 | if (!child) |
2170 | return ERR_PTR(-ENOMEM); | |
2171 | all_tag_set(child, IDR_FREE); | |
2172 | rcu_assign_pointer(*slot, node_to_entry(child)); | |
2173 | if (node) | |
2174 | node->count++; | |
2175 | } else if (!radix_tree_is_internal_node(child)) | |
2176 | break; | |
2177 | ||
2178 | node = entry_to_node(child); | |
2179 | offset = radix_tree_descend(node, &child, start); | |
2180 | if (!tag_get(node, IDR_FREE, offset)) { | |
2181 | offset = radix_tree_find_next_bit(node, IDR_FREE, | |
2182 | offset + 1); | |
2183 | start = next_index(start, node, offset); | |
2184 | if (start > max) | |
2185 | return ERR_PTR(-ENOSPC); | |
2186 | while (offset == RADIX_TREE_MAP_SIZE) { | |
2187 | offset = node->offset + 1; | |
2188 | node = node->parent; | |
2189 | if (!node) | |
2190 | goto grow; | |
2191 | shift = node->shift; | |
2192 | } | |
2193 | child = rcu_dereference_raw(node->slots[offset]); | |
2194 | } | |
2195 | slot = &node->slots[offset]; | |
2196 | } | |
2197 | ||
2198 | iter->index = start; | |
2199 | if (node) | |
2200 | iter->next_index = 1 + min(max, (start | node_maxindex(node))); | |
2201 | else | |
2202 | iter->next_index = 1; | |
2203 | iter->node = node; | |
2204 | __set_iter_shift(iter, shift); | |
2205 | set_iter_tags(iter, node, offset, IDR_FREE); | |
2206 | ||
2207 | return slot; | |
2208 | } | |
2209 | ||
2210 | /** | |
2211 | * idr_destroy - release all internal memory from an IDR | |
2212 | * @idr: idr handle | |
2213 | * | |
2214 | * After this function is called, the IDR is empty, and may be reused or | |
2215 | * the data structure containing it may be freed. | |
2216 | * | |
2217 | * A typical clean-up sequence for objects stored in an idr tree will use | |
2218 | * idr_for_each() to free all objects, if necessary, then idr_destroy() to | |
2219 | * free the memory used to keep track of those objects. | |
2220 | */ | |
2221 | void idr_destroy(struct idr *idr) | |
2222 | { | |
2223 | struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode); | |
2224 | if (radix_tree_is_internal_node(node)) | |
2225 | radix_tree_free_nodes(node); | |
2226 | idr->idr_rt.rnode = NULL; | |
2227 | root_tag_set(&idr->idr_rt, IDR_FREE); | |
2228 | } | |
2229 | EXPORT_SYMBOL(idr_destroy); | |
2230 | ||
1da177e4 | 2231 | static void |
449dd698 | 2232 | radix_tree_node_ctor(void *arg) |
1da177e4 | 2233 | { |
449dd698 JW |
2234 | struct radix_tree_node *node = arg; |
2235 | ||
2236 | memset(node, 0, sizeof(*node)); | |
2237 | INIT_LIST_HEAD(&node->private_list); | |
1da177e4 LT |
2238 | } |
2239 | ||
c78c66d1 KS |
2240 | static __init unsigned long __maxindex(unsigned int height) |
2241 | { | |
2242 | unsigned int width = height * RADIX_TREE_MAP_SHIFT; | |
2243 | int shift = RADIX_TREE_INDEX_BITS - width; | |
2244 | ||
2245 | if (shift < 0) | |
2246 | return ~0UL; | |
2247 | if (shift >= BITS_PER_LONG) | |
2248 | return 0UL; | |
2249 | return ~0UL >> shift; | |
2250 | } | |
2251 | ||
2252 | static __init void radix_tree_init_maxnodes(void) | |
2253 | { | |
2254 | unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1]; | |
2255 | unsigned int i, j; | |
2256 | ||
2257 | for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) | |
2258 | height_to_maxindex[i] = __maxindex(i); | |
2259 | for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) { | |
2260 | for (j = i; j > 0; j--) | |
2261 | height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1; | |
2262 | } | |
2263 | } | |
2264 | ||
d544abd5 | 2265 | static int radix_tree_cpu_dead(unsigned int cpu) |
1da177e4 | 2266 | { |
2fcd9005 MW |
2267 | struct radix_tree_preload *rtp; |
2268 | struct radix_tree_node *node; | |
2269 | ||
2270 | /* Free per-cpu pool of preloaded nodes */ | |
d544abd5 SAS |
2271 | rtp = &per_cpu(radix_tree_preloads, cpu); |
2272 | while (rtp->nr) { | |
2273 | node = rtp->nodes; | |
1293d5c5 | 2274 | rtp->nodes = node->parent; |
d544abd5 SAS |
2275 | kmem_cache_free(radix_tree_node_cachep, node); |
2276 | rtp->nr--; | |
2fcd9005 | 2277 | } |
7ad3d4d8 MW |
2278 | kfree(per_cpu(ida_bitmap, cpu)); |
2279 | per_cpu(ida_bitmap, cpu) = NULL; | |
d544abd5 | 2280 | return 0; |
1da177e4 | 2281 | } |
1da177e4 LT |
2282 | |
2283 | void __init radix_tree_init(void) | |
2284 | { | |
d544abd5 | 2285 | int ret; |
7e784422 MH |
2286 | |
2287 | BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); | |
1da177e4 LT |
2288 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", |
2289 | sizeof(struct radix_tree_node), 0, | |
488514d1 CL |
2290 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
2291 | radix_tree_node_ctor); | |
c78c66d1 | 2292 | radix_tree_init_maxnodes(); |
d544abd5 SAS |
2293 | ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", |
2294 | NULL, radix_tree_cpu_dead); | |
2295 | WARN_ON(ret < 0); | |
1da177e4 | 2296 | } |