Commit | Line | Data |
---|---|---|
f8d5d0cc MW |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * XArray implementation | |
4 | * Copyright (c) 2017 Microsoft Corporation | |
5 | * Author: Matthew Wilcox <willy@infradead.org> | |
6 | */ | |
7 | ||
9b89a035 | 8 | #include <linux/bitmap.h> |
f8d5d0cc | 9 | #include <linux/export.h> |
58d6ea30 MW |
10 | #include <linux/list.h> |
11 | #include <linux/slab.h> | |
f8d5d0cc MW |
12 | #include <linux/xarray.h> |
13 | ||
14 | /* | |
15 | * Coding conventions in this file: | |
16 | * | |
17 | * @xa is used to refer to the entire xarray. | |
18 | * @xas is the 'xarray operation state'. It may be either a pointer to | |
19 | * an xa_state, or an xa_state stored on the stack. This is an unfortunate | |
20 | * ambiguity. | |
21 | * @index is the index of the entry being operated on | |
22 | * @mark is an xa_mark_t; a small number indicating one of the mark bits. | |
23 | * @node refers to an xa_node; usually the primary one being operated on by | |
24 | * this function. | |
25 | * @offset is the index into the slots array inside an xa_node. | |
26 | * @parent refers to the @xa_node closer to the head than @node. | |
27 | * @entry refers to something stored in a slot in the xarray | |
28 | */ | |
29 | ||
58d6ea30 MW |
30 | static inline unsigned int xa_lock_type(const struct xarray *xa) |
31 | { | |
32 | return (__force unsigned int)xa->xa_flags & 3; | |
33 | } | |
34 | ||
35 | static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) | |
36 | { | |
37 | if (lock_type == XA_LOCK_IRQ) | |
38 | xas_lock_irq(xas); | |
39 | else if (lock_type == XA_LOCK_BH) | |
40 | xas_lock_bh(xas); | |
41 | else | |
42 | xas_lock(xas); | |
43 | } | |
44 | ||
45 | static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) | |
46 | { | |
47 | if (lock_type == XA_LOCK_IRQ) | |
48 | xas_unlock_irq(xas); | |
49 | else if (lock_type == XA_LOCK_BH) | |
50 | xas_unlock_bh(xas); | |
51 | else | |
52 | xas_unlock(xas); | |
53 | } | |
54 | ||
371c752d MW |
55 | static inline bool xa_track_free(const struct xarray *xa) |
56 | { | |
57 | return xa->xa_flags & XA_FLAGS_TRACK_FREE; | |
58 | } | |
59 | ||
3ccaf57a MW |
60 | static inline bool xa_zero_busy(const struct xarray *xa) |
61 | { | |
62 | return xa->xa_flags & XA_FLAGS_ZERO_BUSY; | |
63 | } | |
64 | ||
9b89a035 MW |
65 | static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark) |
66 | { | |
67 | if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) | |
68 | xa->xa_flags |= XA_FLAGS_MARK(mark); | |
69 | } | |
70 | ||
71 | static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark) | |
72 | { | |
73 | if (xa->xa_flags & XA_FLAGS_MARK(mark)) | |
74 | xa->xa_flags &= ~(XA_FLAGS_MARK(mark)); | |
75 | } | |
76 | ||
77 | static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark) | |
78 | { | |
79 | return node->marks[(__force unsigned)mark]; | |
80 | } | |
81 | ||
82 | static inline bool node_get_mark(struct xa_node *node, | |
83 | unsigned int offset, xa_mark_t mark) | |
84 | { | |
85 | return test_bit(offset, node_marks(node, mark)); | |
86 | } | |
87 | ||
88 | /* returns true if the bit was set */ | |
89 | static inline bool node_set_mark(struct xa_node *node, unsigned int offset, | |
90 | xa_mark_t mark) | |
91 | { | |
92 | return __test_and_set_bit(offset, node_marks(node, mark)); | |
93 | } | |
94 | ||
95 | /* returns true if the bit was set */ | |
96 | static inline bool node_clear_mark(struct xa_node *node, unsigned int offset, | |
97 | xa_mark_t mark) | |
98 | { | |
99 | return __test_and_clear_bit(offset, node_marks(node, mark)); | |
100 | } | |
101 | ||
102 | static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark) | |
103 | { | |
104 | return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE); | |
105 | } | |
106 | ||
371c752d MW |
107 | static inline void node_mark_all(struct xa_node *node, xa_mark_t mark) |
108 | { | |
109 | bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE); | |
110 | } | |
111 | ||
58d6ea30 MW |
112 | #define mark_inc(mark) do { \ |
113 | mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \ | |
114 | } while (0) | |
115 | ||
116 | /* | |
117 | * xas_squash_marks() - Merge all marks to the first entry | |
118 | * @xas: Array operation state. | |
119 | * | |
120 | * Set a mark on the first entry if any entry has it set. Clear marks on | |
121 | * all sibling entries. | |
122 | */ | |
123 | static void xas_squash_marks(const struct xa_state *xas) | |
124 | { | |
125 | unsigned int mark = 0; | |
126 | unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; | |
127 | ||
128 | if (!xas->xa_sibs) | |
129 | return; | |
130 | ||
131 | do { | |
132 | unsigned long *marks = xas->xa_node->marks[mark]; | |
133 | if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit) | |
134 | continue; | |
135 | __set_bit(xas->xa_offset, marks); | |
136 | bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs); | |
137 | } while (mark++ != (__force unsigned)XA_MARK_MAX); | |
138 | } | |
139 | ||
ad3d6c72 MW |
140 | /* extracts the offset within this node from the index */ |
141 | static unsigned int get_offset(unsigned long index, struct xa_node *node) | |
142 | { | |
143 | return (index >> node->shift) & XA_CHUNK_MASK; | |
144 | } | |
145 | ||
b803b428 MW |
146 | static void xas_set_offset(struct xa_state *xas) |
147 | { | |
148 | xas->xa_offset = get_offset(xas->xa_index, xas->xa_node); | |
149 | } | |
150 | ||
ad3d6c72 MW |
151 | /* move the index either forwards (find) or backwards (sibling slot) */ |
152 | static void xas_move_index(struct xa_state *xas, unsigned long offset) | |
153 | { | |
154 | unsigned int shift = xas->xa_node->shift; | |
155 | xas->xa_index &= ~XA_CHUNK_MASK << shift; | |
156 | xas->xa_index += offset << shift; | |
157 | } | |
158 | ||
b803b428 MW |
159 | static void xas_advance(struct xa_state *xas) |
160 | { | |
161 | xas->xa_offset++; | |
162 | xas_move_index(xas, xas->xa_offset); | |
163 | } | |
164 | ||
ad3d6c72 MW |
165 | static void *set_bounds(struct xa_state *xas) |
166 | { | |
167 | xas->xa_node = XAS_BOUNDS; | |
168 | return NULL; | |
169 | } | |
170 | ||
171 | /* | |
172 | * Starts a walk. If the @xas is already valid, we assume that it's on | |
173 | * the right path and just return where we've got to. If we're in an | |
174 | * error state, return NULL. If the index is outside the current scope | |
175 | * of the xarray, return NULL without changing @xas->xa_node. Otherwise | |
176 | * set @xas->xa_node to NULL and return the current head of the array. | |
177 | */ | |
178 | static void *xas_start(struct xa_state *xas) | |
179 | { | |
180 | void *entry; | |
181 | ||
182 | if (xas_valid(xas)) | |
183 | return xas_reload(xas); | |
184 | if (xas_error(xas)) | |
185 | return NULL; | |
186 | ||
187 | entry = xa_head(xas->xa); | |
188 | if (!xa_is_node(entry)) { | |
189 | if (xas->xa_index) | |
190 | return set_bounds(xas); | |
191 | } else { | |
192 | if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK) | |
193 | return set_bounds(xas); | |
194 | } | |
195 | ||
196 | xas->xa_node = NULL; | |
197 | return entry; | |
198 | } | |
199 | ||
200 | static void *xas_descend(struct xa_state *xas, struct xa_node *node) | |
201 | { | |
202 | unsigned int offset = get_offset(xas->xa_index, node); | |
203 | void *entry = xa_entry(xas->xa, node, offset); | |
204 | ||
205 | xas->xa_node = node; | |
206 | if (xa_is_sibling(entry)) { | |
207 | offset = xa_to_sibling(entry); | |
208 | entry = xa_entry(xas->xa, node, offset); | |
209 | } | |
210 | ||
211 | xas->xa_offset = offset; | |
212 | return entry; | |
213 | } | |
214 | ||
215 | /** | |
216 | * xas_load() - Load an entry from the XArray (advanced). | |
217 | * @xas: XArray operation state. | |
218 | * | |
219 | * Usually walks the @xas to the appropriate state to load the entry | |
220 | * stored at xa_index. However, it will do nothing and return %NULL if | |
221 | * @xas is in an error state. xas_load() will never expand the tree. | |
222 | * | |
223 | * If the xa_state is set up to operate on a multi-index entry, xas_load() | |
224 | * may return %NULL or an internal entry, even if there are entries | |
225 | * present within the range specified by @xas. | |
226 | * | |
227 | * Context: Any context. The caller should hold the xa_lock or the RCU lock. | |
228 | * Return: Usually an entry in the XArray, but see description for exceptions. | |
229 | */ | |
230 | void *xas_load(struct xa_state *xas) | |
231 | { | |
232 | void *entry = xas_start(xas); | |
233 | ||
234 | while (xa_is_node(entry)) { | |
235 | struct xa_node *node = xa_to_node(entry); | |
236 | ||
237 | if (xas->xa_shift > node->shift) | |
238 | break; | |
239 | entry = xas_descend(xas, node); | |
76b4e529 MW |
240 | if (node->shift == 0) |
241 | break; | |
ad3d6c72 MW |
242 | } |
243 | return entry; | |
244 | } | |
245 | EXPORT_SYMBOL_GPL(xas_load); | |
246 | ||
58d6ea30 MW |
247 | /* Move the radix tree node cache here */ |
248 | extern struct kmem_cache *radix_tree_node_cachep; | |
249 | extern void radix_tree_node_rcu_free(struct rcu_head *head); | |
250 | ||
251 | #define XA_RCU_FREE ((struct xarray *)1) | |
252 | ||
253 | static void xa_node_free(struct xa_node *node) | |
254 | { | |
255 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); | |
256 | node->array = XA_RCU_FREE; | |
257 | call_rcu(&node->rcu_head, radix_tree_node_rcu_free); | |
258 | } | |
259 | ||
260 | /* | |
261 | * xas_destroy() - Free any resources allocated during the XArray operation. | |
262 | * @xas: XArray operation state. | |
263 | * | |
264 | * This function is now internal-only. | |
265 | */ | |
266 | static void xas_destroy(struct xa_state *xas) | |
267 | { | |
268 | struct xa_node *node = xas->xa_alloc; | |
269 | ||
270 | if (!node) | |
271 | return; | |
272 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); | |
273 | kmem_cache_free(radix_tree_node_cachep, node); | |
274 | xas->xa_alloc = NULL; | |
275 | } | |
276 | ||
277 | /** | |
278 | * xas_nomem() - Allocate memory if needed. | |
279 | * @xas: XArray operation state. | |
280 | * @gfp: Memory allocation flags. | |
281 | * | |
282 | * If we need to add new nodes to the XArray, we try to allocate memory | |
283 | * with GFP_NOWAIT while holding the lock, which will usually succeed. | |
284 | * If it fails, @xas is flagged as needing memory to continue. The caller | |
285 | * should drop the lock and call xas_nomem(). If xas_nomem() succeeds, | |
286 | * the caller should retry the operation. | |
287 | * | |
288 | * Forward progress is guaranteed as one node is allocated here and | |
289 | * stored in the xa_state where it will be found by xas_alloc(). More | |
290 | * nodes will likely be found in the slab allocator, but we do not tie | |
291 | * them up here. | |
292 | * | |
293 | * Return: true if memory was needed, and was successfully allocated. | |
294 | */ | |
295 | bool xas_nomem(struct xa_state *xas, gfp_t gfp) | |
296 | { | |
297 | if (xas->xa_node != XA_ERROR(-ENOMEM)) { | |
298 | xas_destroy(xas); | |
299 | return false; | |
300 | } | |
7b785645 JW |
301 | if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) |
302 | gfp |= __GFP_ACCOUNT; | |
58d6ea30 MW |
303 | xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); |
304 | if (!xas->xa_alloc) | |
305 | return false; | |
306 | XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); | |
307 | xas->xa_node = XAS_RESTART; | |
308 | return true; | |
309 | } | |
310 | EXPORT_SYMBOL_GPL(xas_nomem); | |
311 | ||
312 | /* | |
313 | * __xas_nomem() - Drop locks and allocate memory if needed. | |
314 | * @xas: XArray operation state. | |
315 | * @gfp: Memory allocation flags. | |
316 | * | |
317 | * Internal variant of xas_nomem(). | |
318 | * | |
319 | * Return: true if memory was needed, and was successfully allocated. | |
320 | */ | |
321 | static bool __xas_nomem(struct xa_state *xas, gfp_t gfp) | |
322 | __must_hold(xas->xa->xa_lock) | |
323 | { | |
324 | unsigned int lock_type = xa_lock_type(xas->xa); | |
325 | ||
326 | if (xas->xa_node != XA_ERROR(-ENOMEM)) { | |
327 | xas_destroy(xas); | |
328 | return false; | |
329 | } | |
7b785645 JW |
330 | if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) |
331 | gfp |= __GFP_ACCOUNT; | |
58d6ea30 MW |
332 | if (gfpflags_allow_blocking(gfp)) { |
333 | xas_unlock_type(xas, lock_type); | |
334 | xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); | |
335 | xas_lock_type(xas, lock_type); | |
336 | } else { | |
337 | xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); | |
338 | } | |
339 | if (!xas->xa_alloc) | |
340 | return false; | |
341 | XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); | |
342 | xas->xa_node = XAS_RESTART; | |
343 | return true; | |
344 | } | |
345 | ||
346 | static void xas_update(struct xa_state *xas, struct xa_node *node) | |
347 | { | |
348 | if (xas->xa_update) | |
349 | xas->xa_update(node); | |
350 | else | |
351 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); | |
352 | } | |
353 | ||
354 | static void *xas_alloc(struct xa_state *xas, unsigned int shift) | |
355 | { | |
356 | struct xa_node *parent = xas->xa_node; | |
357 | struct xa_node *node = xas->xa_alloc; | |
358 | ||
359 | if (xas_invalid(xas)) | |
360 | return NULL; | |
361 | ||
362 | if (node) { | |
363 | xas->xa_alloc = NULL; | |
364 | } else { | |
7b785645 JW |
365 | gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; |
366 | ||
367 | if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) | |
368 | gfp |= __GFP_ACCOUNT; | |
369 | ||
370 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp); | |
58d6ea30 MW |
371 | if (!node) { |
372 | xas_set_err(xas, -ENOMEM); | |
373 | return NULL; | |
374 | } | |
375 | } | |
376 | ||
377 | if (parent) { | |
378 | node->offset = xas->xa_offset; | |
379 | parent->count++; | |
380 | XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE); | |
381 | xas_update(xas, parent); | |
382 | } | |
383 | XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); | |
384 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); | |
385 | node->shift = shift; | |
386 | node->count = 0; | |
387 | node->nr_values = 0; | |
388 | RCU_INIT_POINTER(node->parent, xas->xa_node); | |
389 | node->array = xas->xa; | |
390 | ||
391 | return node; | |
392 | } | |
393 | ||
0e9446c3 MW |
394 | #ifdef CONFIG_XARRAY_MULTI |
395 | /* Returns the number of indices covered by a given xa_state */ | |
396 | static unsigned long xas_size(const struct xa_state *xas) | |
397 | { | |
398 | return (xas->xa_sibs + 1UL) << xas->xa_shift; | |
399 | } | |
400 | #endif | |
401 | ||
58d6ea30 MW |
402 | /* |
403 | * Use this to calculate the maximum index that will need to be created | |
404 | * in order to add the entry described by @xas. Because we cannot store a | |
405 | * multiple-index entry at index 0, the calculation is a little more complex | |
406 | * than you might expect. | |
407 | */ | |
408 | static unsigned long xas_max(struct xa_state *xas) | |
409 | { | |
410 | unsigned long max = xas->xa_index; | |
411 | ||
412 | #ifdef CONFIG_XARRAY_MULTI | |
413 | if (xas->xa_shift || xas->xa_sibs) { | |
0e9446c3 | 414 | unsigned long mask = xas_size(xas) - 1; |
58d6ea30 MW |
415 | max |= mask; |
416 | if (mask == max) | |
417 | max++; | |
418 | } | |
419 | #endif | |
420 | ||
421 | return max; | |
422 | } | |
423 | ||
424 | /* The maximum index that can be contained in the array without expanding it */ | |
425 | static unsigned long max_index(void *entry) | |
426 | { | |
427 | if (!xa_is_node(entry)) | |
428 | return 0; | |
429 | return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1; | |
430 | } | |
431 | ||
432 | static void xas_shrink(struct xa_state *xas) | |
433 | { | |
434 | struct xarray *xa = xas->xa; | |
435 | struct xa_node *node = xas->xa_node; | |
436 | ||
437 | for (;;) { | |
438 | void *entry; | |
439 | ||
440 | XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); | |
441 | if (node->count != 1) | |
442 | break; | |
443 | entry = xa_entry_locked(xa, node, 0); | |
444 | if (!entry) | |
445 | break; | |
446 | if (!xa_is_node(entry) && node->shift) | |
447 | break; | |
3ccaf57a MW |
448 | if (xa_is_zero(entry) && xa_zero_busy(xa)) |
449 | entry = NULL; | |
58d6ea30 MW |
450 | xas->xa_node = XAS_BOUNDS; |
451 | ||
452 | RCU_INIT_POINTER(xa->xa_head, entry); | |
371c752d MW |
453 | if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK)) |
454 | xa_mark_clear(xa, XA_FREE_MARK); | |
58d6ea30 MW |
455 | |
456 | node->count = 0; | |
457 | node->nr_values = 0; | |
458 | if (!xa_is_node(entry)) | |
459 | RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY); | |
460 | xas_update(xas, node); | |
461 | xa_node_free(node); | |
462 | if (!xa_is_node(entry)) | |
463 | break; | |
464 | node = xa_to_node(entry); | |
465 | node->parent = NULL; | |
466 | } | |
467 | } | |
468 | ||
469 | /* | |
470 | * xas_delete_node() - Attempt to delete an xa_node | |
471 | * @xas: Array operation state. | |
472 | * | |
473 | * Attempts to delete the @xas->xa_node. This will fail if xa->node has | |
474 | * a non-zero reference count. | |
475 | */ | |
476 | static void xas_delete_node(struct xa_state *xas) | |
477 | { | |
478 | struct xa_node *node = xas->xa_node; | |
479 | ||
480 | for (;;) { | |
481 | struct xa_node *parent; | |
482 | ||
483 | XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); | |
484 | if (node->count) | |
485 | break; | |
486 | ||
487 | parent = xa_parent_locked(xas->xa, node); | |
488 | xas->xa_node = parent; | |
489 | xas->xa_offset = node->offset; | |
490 | xa_node_free(node); | |
491 | ||
492 | if (!parent) { | |
493 | xas->xa->xa_head = NULL; | |
494 | xas->xa_node = XAS_BOUNDS; | |
495 | return; | |
496 | } | |
497 | ||
498 | parent->slots[xas->xa_offset] = NULL; | |
499 | parent->count--; | |
500 | XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE); | |
501 | node = parent; | |
502 | xas_update(xas, node); | |
503 | } | |
504 | ||
505 | if (!node->parent) | |
506 | xas_shrink(xas); | |
507 | } | |
508 | ||
509 | /** | |
510 | * xas_free_nodes() - Free this node and all nodes that it references | |
511 | * @xas: Array operation state. | |
512 | * @top: Node to free | |
513 | * | |
514 | * This node has been removed from the tree. We must now free it and all | |
515 | * of its subnodes. There may be RCU walkers with references into the tree, | |
516 | * so we must replace all entries with retry markers. | |
517 | */ | |
518 | static void xas_free_nodes(struct xa_state *xas, struct xa_node *top) | |
519 | { | |
520 | unsigned int offset = 0; | |
521 | struct xa_node *node = top; | |
522 | ||
523 | for (;;) { | |
524 | void *entry = xa_entry_locked(xas->xa, node, offset); | |
525 | ||
76b4e529 | 526 | if (node->shift && xa_is_node(entry)) { |
58d6ea30 MW |
527 | node = xa_to_node(entry); |
528 | offset = 0; | |
529 | continue; | |
530 | } | |
531 | if (entry) | |
532 | RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY); | |
533 | offset++; | |
534 | while (offset == XA_CHUNK_SIZE) { | |
535 | struct xa_node *parent; | |
536 | ||
537 | parent = xa_parent_locked(xas->xa, node); | |
538 | offset = node->offset + 1; | |
539 | node->count = 0; | |
540 | node->nr_values = 0; | |
541 | xas_update(xas, node); | |
542 | xa_node_free(node); | |
543 | if (node == top) | |
544 | return; | |
545 | node = parent; | |
546 | } | |
547 | } | |
548 | } | |
549 | ||
550 | /* | |
551 | * xas_expand adds nodes to the head of the tree until it has reached | |
552 | * sufficient height to be able to contain @xas->xa_index | |
553 | */ | |
554 | static int xas_expand(struct xa_state *xas, void *head) | |
555 | { | |
556 | struct xarray *xa = xas->xa; | |
557 | struct xa_node *node = NULL; | |
558 | unsigned int shift = 0; | |
559 | unsigned long max = xas_max(xas); | |
560 | ||
561 | if (!head) { | |
562 | if (max == 0) | |
563 | return 0; | |
564 | while ((max >> shift) >= XA_CHUNK_SIZE) | |
565 | shift += XA_CHUNK_SHIFT; | |
566 | return shift + XA_CHUNK_SHIFT; | |
567 | } else if (xa_is_node(head)) { | |
568 | node = xa_to_node(head); | |
569 | shift = node->shift + XA_CHUNK_SHIFT; | |
570 | } | |
571 | xas->xa_node = NULL; | |
572 | ||
573 | while (max > max_index(head)) { | |
574 | xa_mark_t mark = 0; | |
575 | ||
576 | XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); | |
577 | node = xas_alloc(xas, shift); | |
578 | if (!node) | |
579 | return -ENOMEM; | |
580 | ||
581 | node->count = 1; | |
582 | if (xa_is_value(head)) | |
583 | node->nr_values = 1; | |
584 | RCU_INIT_POINTER(node->slots[0], head); | |
585 | ||
586 | /* Propagate the aggregated mark info to the new child */ | |
587 | for (;;) { | |
371c752d MW |
588 | if (xa_track_free(xa) && mark == XA_FREE_MARK) { |
589 | node_mark_all(node, XA_FREE_MARK); | |
590 | if (!xa_marked(xa, XA_FREE_MARK)) { | |
591 | node_clear_mark(node, 0, XA_FREE_MARK); | |
592 | xa_mark_set(xa, XA_FREE_MARK); | |
593 | } | |
594 | } else if (xa_marked(xa, mark)) { | |
58d6ea30 | 595 | node_set_mark(node, 0, mark); |
371c752d | 596 | } |
58d6ea30 MW |
597 | if (mark == XA_MARK_MAX) |
598 | break; | |
599 | mark_inc(mark); | |
600 | } | |
601 | ||
602 | /* | |
603 | * Now that the new node is fully initialised, we can add | |
604 | * it to the tree | |
605 | */ | |
606 | if (xa_is_node(head)) { | |
607 | xa_to_node(head)->offset = 0; | |
608 | rcu_assign_pointer(xa_to_node(head)->parent, node); | |
609 | } | |
610 | head = xa_mk_node(node); | |
611 | rcu_assign_pointer(xa->xa_head, head); | |
612 | xas_update(xas, node); | |
613 | ||
614 | shift += XA_CHUNK_SHIFT; | |
615 | } | |
616 | ||
617 | xas->xa_node = node; | |
618 | return shift; | |
619 | } | |
620 | ||
621 | /* | |
622 | * xas_create() - Create a slot to store an entry in. | |
623 | * @xas: XArray operation state. | |
76b4e529 | 624 | * @allow_root: %true if we can store the entry in the root directly |
58d6ea30 MW |
625 | * |
626 | * Most users will not need to call this function directly, as it is called | |
627 | * by xas_store(). It is useful for doing conditional store operations | |
628 | * (see the xa_cmpxchg() implementation for an example). | |
629 | * | |
630 | * Return: If the slot already existed, returns the contents of this slot. | |
804dfaf0 MW |
631 | * If the slot was newly created, returns %NULL. If it failed to create the |
632 | * slot, returns %NULL and indicates the error in @xas. | |
58d6ea30 | 633 | */ |
76b4e529 | 634 | static void *xas_create(struct xa_state *xas, bool allow_root) |
58d6ea30 MW |
635 | { |
636 | struct xarray *xa = xas->xa; | |
637 | void *entry; | |
638 | void __rcu **slot; | |
639 | struct xa_node *node = xas->xa_node; | |
640 | int shift; | |
641 | unsigned int order = xas->xa_shift; | |
642 | ||
643 | if (xas_top(node)) { | |
644 | entry = xa_head_locked(xa); | |
645 | xas->xa_node = NULL; | |
3ccaf57a MW |
646 | if (!entry && xa_zero_busy(xa)) |
647 | entry = XA_ZERO_ENTRY; | |
58d6ea30 MW |
648 | shift = xas_expand(xas, entry); |
649 | if (shift < 0) | |
650 | return NULL; | |
76b4e529 MW |
651 | if (!shift && !allow_root) |
652 | shift = XA_CHUNK_SHIFT; | |
58d6ea30 MW |
653 | entry = xa_head_locked(xa); |
654 | slot = &xa->xa_head; | |
655 | } else if (xas_error(xas)) { | |
656 | return NULL; | |
657 | } else if (node) { | |
658 | unsigned int offset = xas->xa_offset; | |
659 | ||
660 | shift = node->shift; | |
661 | entry = xa_entry_locked(xa, node, offset); | |
662 | slot = &node->slots[offset]; | |
663 | } else { | |
664 | shift = 0; | |
665 | entry = xa_head_locked(xa); | |
666 | slot = &xa->xa_head; | |
667 | } | |
668 | ||
669 | while (shift > order) { | |
670 | shift -= XA_CHUNK_SHIFT; | |
671 | if (!entry) { | |
672 | node = xas_alloc(xas, shift); | |
673 | if (!node) | |
674 | break; | |
371c752d MW |
675 | if (xa_track_free(xa)) |
676 | node_mark_all(node, XA_FREE_MARK); | |
58d6ea30 MW |
677 | rcu_assign_pointer(*slot, xa_mk_node(node)); |
678 | } else if (xa_is_node(entry)) { | |
679 | node = xa_to_node(entry); | |
680 | } else { | |
681 | break; | |
682 | } | |
683 | entry = xas_descend(xas, node); | |
684 | slot = &node->slots[xas->xa_offset]; | |
685 | } | |
686 | ||
687 | return entry; | |
688 | } | |
689 | ||
2264f513 MW |
690 | /** |
691 | * xas_create_range() - Ensure that stores to this range will succeed | |
692 | * @xas: XArray operation state. | |
693 | * | |
694 | * Creates all of the slots in the range covered by @xas. Sets @xas to | |
695 | * create single-index entries and positions it at the beginning of the | |
696 | * range. This is for the benefit of users which have not yet been | |
697 | * converted to use multi-index entries. | |
698 | */ | |
699 | void xas_create_range(struct xa_state *xas) | |
700 | { | |
701 | unsigned long index = xas->xa_index; | |
702 | unsigned char shift = xas->xa_shift; | |
703 | unsigned char sibs = xas->xa_sibs; | |
704 | ||
705 | xas->xa_index |= ((sibs + 1) << shift) - 1; | |
706 | if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift) | |
707 | xas->xa_offset |= sibs; | |
708 | xas->xa_shift = 0; | |
709 | xas->xa_sibs = 0; | |
710 | ||
711 | for (;;) { | |
76b4e529 | 712 | xas_create(xas, true); |
2264f513 MW |
713 | if (xas_error(xas)) |
714 | goto restore; | |
715 | if (xas->xa_index <= (index | XA_CHUNK_MASK)) | |
716 | goto success; | |
717 | xas->xa_index -= XA_CHUNK_SIZE; | |
718 | ||
719 | for (;;) { | |
720 | struct xa_node *node = xas->xa_node; | |
721 | xas->xa_node = xa_parent_locked(xas->xa, node); | |
722 | xas->xa_offset = node->offset - 1; | |
723 | if (node->offset != 0) | |
724 | break; | |
725 | } | |
726 | } | |
727 | ||
728 | restore: | |
729 | xas->xa_shift = shift; | |
730 | xas->xa_sibs = sibs; | |
731 | xas->xa_index = index; | |
732 | return; | |
733 | success: | |
734 | xas->xa_index = index; | |
735 | if (xas->xa_node) | |
736 | xas_set_offset(xas); | |
737 | } | |
738 | EXPORT_SYMBOL_GPL(xas_create_range); | |
739 | ||
58d6ea30 MW |
740 | static void update_node(struct xa_state *xas, struct xa_node *node, |
741 | int count, int values) | |
742 | { | |
743 | if (!node || (!count && !values)) | |
744 | return; | |
745 | ||
746 | node->count += count; | |
747 | node->nr_values += values; | |
748 | XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); | |
749 | XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE); | |
750 | xas_update(xas, node); | |
751 | if (count < 0) | |
752 | xas_delete_node(xas); | |
753 | } | |
754 | ||
755 | /** | |
756 | * xas_store() - Store this entry in the XArray. | |
757 | * @xas: XArray operation state. | |
758 | * @entry: New entry. | |
759 | * | |
760 | * If @xas is operating on a multi-index entry, the entry returned by this | |
761 | * function is essentially meaningless (it may be an internal entry or it | |
762 | * may be %NULL, even if there are non-NULL entries at some of the indices | |
763 | * covered by the range). This is not a problem for any current users, | |
764 | * and can be changed if needed. | |
765 | * | |
766 | * Return: The old entry at this index. | |
767 | */ | |
768 | void *xas_store(struct xa_state *xas, void *entry) | |
769 | { | |
770 | struct xa_node *node; | |
771 | void __rcu **slot = &xas->xa->xa_head; | |
772 | unsigned int offset, max; | |
773 | int count = 0; | |
774 | int values = 0; | |
775 | void *first, *next; | |
776 | bool value = xa_is_value(entry); | |
777 | ||
4a5c8d89 MW |
778 | if (entry) { |
779 | bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry); | |
780 | first = xas_create(xas, allow_root); | |
781 | } else { | |
58d6ea30 | 782 | first = xas_load(xas); |
4a5c8d89 | 783 | } |
58d6ea30 MW |
784 | |
785 | if (xas_invalid(xas)) | |
786 | return first; | |
787 | node = xas->xa_node; | |
788 | if (node && (xas->xa_shift < node->shift)) | |
789 | xas->xa_sibs = 0; | |
790 | if ((first == entry) && !xas->xa_sibs) | |
791 | return first; | |
792 | ||
793 | next = first; | |
794 | offset = xas->xa_offset; | |
795 | max = xas->xa_offset + xas->xa_sibs; | |
796 | if (node) { | |
797 | slot = &node->slots[offset]; | |
798 | if (xas->xa_sibs) | |
799 | xas_squash_marks(xas); | |
800 | } | |
801 | if (!entry) | |
802 | xas_init_marks(xas); | |
803 | ||
804 | for (;;) { | |
805 | /* | |
806 | * Must clear the marks before setting the entry to NULL, | |
807 | * otherwise xas_for_each_marked may find a NULL entry and | |
808 | * stop early. rcu_assign_pointer contains a release barrier | |
809 | * so the mark clearing will appear to happen before the | |
810 | * entry is set to NULL. | |
811 | */ | |
812 | rcu_assign_pointer(*slot, entry); | |
2fbe967b | 813 | if (xa_is_node(next) && (!node || node->shift)) |
58d6ea30 MW |
814 | xas_free_nodes(xas, xa_to_node(next)); |
815 | if (!node) | |
816 | break; | |
817 | count += !next - !entry; | |
818 | values += !xa_is_value(first) - !value; | |
819 | if (entry) { | |
820 | if (offset == max) | |
821 | break; | |
822 | if (!xa_is_sibling(entry)) | |
823 | entry = xa_mk_sibling(xas->xa_offset); | |
824 | } else { | |
825 | if (offset == XA_CHUNK_MASK) | |
826 | break; | |
827 | } | |
828 | next = xa_entry_locked(xas->xa, node, ++offset); | |
829 | if (!xa_is_sibling(next)) { | |
830 | if (!entry && (offset > max)) | |
831 | break; | |
832 | first = next; | |
833 | } | |
834 | slot++; | |
835 | } | |
836 | ||
837 | update_node(xas, node, count, values); | |
838 | return first; | |
839 | } | |
840 | EXPORT_SYMBOL_GPL(xas_store); | |
841 | ||
9b89a035 MW |
842 | /** |
843 | * xas_get_mark() - Returns the state of this mark. | |
844 | * @xas: XArray operation state. | |
845 | * @mark: Mark number. | |
846 | * | |
847 | * Return: true if the mark is set, false if the mark is clear or @xas | |
848 | * is in an error state. | |
849 | */ | |
850 | bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark) | |
851 | { | |
852 | if (xas_invalid(xas)) | |
853 | return false; | |
854 | if (!xas->xa_node) | |
855 | return xa_marked(xas->xa, mark); | |
856 | return node_get_mark(xas->xa_node, xas->xa_offset, mark); | |
857 | } | |
858 | EXPORT_SYMBOL_GPL(xas_get_mark); | |
859 | ||
860 | /** | |
861 | * xas_set_mark() - Sets the mark on this entry and its parents. | |
862 | * @xas: XArray operation state. | |
863 | * @mark: Mark number. | |
864 | * | |
865 | * Sets the specified mark on this entry, and walks up the tree setting it | |
866 | * on all the ancestor entries. Does nothing if @xas has not been walked to | |
867 | * an entry, or is in an error state. | |
868 | */ | |
869 | void xas_set_mark(const struct xa_state *xas, xa_mark_t mark) | |
870 | { | |
871 | struct xa_node *node = xas->xa_node; | |
872 | unsigned int offset = xas->xa_offset; | |
873 | ||
874 | if (xas_invalid(xas)) | |
875 | return; | |
876 | ||
877 | while (node) { | |
878 | if (node_set_mark(node, offset, mark)) | |
879 | return; | |
880 | offset = node->offset; | |
881 | node = xa_parent_locked(xas->xa, node); | |
882 | } | |
883 | ||
884 | if (!xa_marked(xas->xa, mark)) | |
885 | xa_mark_set(xas->xa, mark); | |
886 | } | |
887 | EXPORT_SYMBOL_GPL(xas_set_mark); | |
888 | ||
889 | /** | |
890 | * xas_clear_mark() - Clears the mark on this entry and its parents. | |
891 | * @xas: XArray operation state. | |
892 | * @mark: Mark number. | |
893 | * | |
894 | * Clears the specified mark on this entry, and walks back to the head | |
895 | * attempting to clear it on all the ancestor entries. Does nothing if | |
896 | * @xas has not been walked to an entry, or is in an error state. | |
897 | */ | |
898 | void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark) | |
899 | { | |
900 | struct xa_node *node = xas->xa_node; | |
901 | unsigned int offset = xas->xa_offset; | |
902 | ||
903 | if (xas_invalid(xas)) | |
904 | return; | |
905 | ||
906 | while (node) { | |
907 | if (!node_clear_mark(node, offset, mark)) | |
908 | return; | |
909 | if (node_any_mark(node, mark)) | |
910 | return; | |
911 | ||
912 | offset = node->offset; | |
913 | node = xa_parent_locked(xas->xa, node); | |
914 | } | |
915 | ||
916 | if (xa_marked(xas->xa, mark)) | |
917 | xa_mark_clear(xas->xa, mark); | |
918 | } | |
919 | EXPORT_SYMBOL_GPL(xas_clear_mark); | |
920 | ||
58d6ea30 MW |
921 | /** |
922 | * xas_init_marks() - Initialise all marks for the entry | |
923 | * @xas: Array operations state. | |
924 | * | |
925 | * Initialise all marks for the entry specified by @xas. If we're tracking | |
926 | * free entries with a mark, we need to set it on all entries. All other | |
927 | * marks are cleared. | |
928 | * | |
929 | * This implementation is not as efficient as it could be; we may walk | |
930 | * up the tree multiple times. | |
931 | */ | |
932 | void xas_init_marks(const struct xa_state *xas) | |
933 | { | |
934 | xa_mark_t mark = 0; | |
935 | ||
936 | for (;;) { | |
371c752d MW |
937 | if (xa_track_free(xas->xa) && mark == XA_FREE_MARK) |
938 | xas_set_mark(xas, mark); | |
939 | else | |
940 | xas_clear_mark(xas, mark); | |
58d6ea30 MW |
941 | if (mark == XA_MARK_MAX) |
942 | break; | |
943 | mark_inc(mark); | |
944 | } | |
945 | } | |
946 | EXPORT_SYMBOL_GPL(xas_init_marks); | |
947 | ||
b803b428 MW |
948 | /** |
949 | * xas_pause() - Pause a walk to drop a lock. | |
950 | * @xas: XArray operation state. | |
951 | * | |
952 | * Some users need to pause a walk and drop the lock they're holding in | |
953 | * order to yield to a higher priority thread or carry out an operation | |
954 | * on an entry. Those users should call this function before they drop | |
955 | * the lock. It resets the @xas to be suitable for the next iteration | |
956 | * of the loop after the user has reacquired the lock. If most entries | |
957 | * found during a walk require you to call xas_pause(), the xa_for_each() | |
958 | * iterator may be more appropriate. | |
959 | * | |
960 | * Note that xas_pause() only works for forward iteration. If a user needs | |
961 | * to pause a reverse iteration, we will need a xas_pause_rev(). | |
962 | */ | |
963 | void xas_pause(struct xa_state *xas) | |
964 | { | |
965 | struct xa_node *node = xas->xa_node; | |
966 | ||
967 | if (xas_invalid(xas)) | |
968 | return; | |
969 | ||
970 | if (node) { | |
971 | unsigned int offset = xas->xa_offset; | |
972 | while (++offset < XA_CHUNK_SIZE) { | |
973 | if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) | |
974 | break; | |
975 | } | |
976 | xas->xa_index += (offset - xas->xa_offset) << node->shift; | |
977 | } else { | |
978 | xas->xa_index++; | |
979 | } | |
980 | xas->xa_node = XAS_RESTART; | |
981 | } | |
982 | EXPORT_SYMBOL_GPL(xas_pause); | |
983 | ||
64d3e9a9 MW |
984 | /* |
985 | * __xas_prev() - Find the previous entry in the XArray. | |
986 | * @xas: XArray operation state. | |
987 | * | |
988 | * Helper function for xas_prev() which handles all the complex cases | |
989 | * out of line. | |
990 | */ | |
991 | void *__xas_prev(struct xa_state *xas) | |
992 | { | |
993 | void *entry; | |
994 | ||
995 | if (!xas_frozen(xas->xa_node)) | |
996 | xas->xa_index--; | |
91abab83 MWO |
997 | if (!xas->xa_node) |
998 | return set_bounds(xas); | |
64d3e9a9 MW |
999 | if (xas_not_node(xas->xa_node)) |
1000 | return xas_load(xas); | |
1001 | ||
1002 | if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) | |
1003 | xas->xa_offset--; | |
1004 | ||
1005 | while (xas->xa_offset == 255) { | |
1006 | xas->xa_offset = xas->xa_node->offset - 1; | |
1007 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); | |
1008 | if (!xas->xa_node) | |
1009 | return set_bounds(xas); | |
1010 | } | |
1011 | ||
1012 | for (;;) { | |
1013 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | |
1014 | if (!xa_is_node(entry)) | |
1015 | return entry; | |
1016 | ||
1017 | xas->xa_node = xa_to_node(entry); | |
1018 | xas_set_offset(xas); | |
1019 | } | |
1020 | } | |
1021 | EXPORT_SYMBOL_GPL(__xas_prev); | |
1022 | ||
1023 | /* | |
1024 | * __xas_next() - Find the next entry in the XArray. | |
1025 | * @xas: XArray operation state. | |
1026 | * | |
1027 | * Helper function for xas_next() which handles all the complex cases | |
1028 | * out of line. | |
1029 | */ | |
1030 | void *__xas_next(struct xa_state *xas) | |
1031 | { | |
1032 | void *entry; | |
1033 | ||
1034 | if (!xas_frozen(xas->xa_node)) | |
1035 | xas->xa_index++; | |
91abab83 MWO |
1036 | if (!xas->xa_node) |
1037 | return set_bounds(xas); | |
64d3e9a9 MW |
1038 | if (xas_not_node(xas->xa_node)) |
1039 | return xas_load(xas); | |
1040 | ||
1041 | if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) | |
1042 | xas->xa_offset++; | |
1043 | ||
1044 | while (xas->xa_offset == XA_CHUNK_SIZE) { | |
1045 | xas->xa_offset = xas->xa_node->offset + 1; | |
1046 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); | |
1047 | if (!xas->xa_node) | |
1048 | return set_bounds(xas); | |
1049 | } | |
1050 | ||
1051 | for (;;) { | |
1052 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | |
1053 | if (!xa_is_node(entry)) | |
1054 | return entry; | |
1055 | ||
1056 | xas->xa_node = xa_to_node(entry); | |
1057 | xas_set_offset(xas); | |
1058 | } | |
1059 | } | |
1060 | EXPORT_SYMBOL_GPL(__xas_next); | |
1061 | ||
b803b428 MW |
1062 | /** |
1063 | * xas_find() - Find the next present entry in the XArray. | |
1064 | * @xas: XArray operation state. | |
1065 | * @max: Highest index to return. | |
1066 | * | |
1067 | * If the @xas has not yet been walked to an entry, return the entry | |
1068 | * which has an index >= xas.xa_index. If it has been walked, the entry | |
1069 | * currently being pointed at has been processed, and so we move to the | |
1070 | * next entry. | |
1071 | * | |
1072 | * If no entry is found and the array is smaller than @max, the iterator | |
1073 | * is set to the smallest index not yet in the array. This allows @xas | |
1074 | * to be immediately passed to xas_store(). | |
1075 | * | |
1076 | * Return: The entry, if found, otherwise %NULL. | |
1077 | */ | |
1078 | void *xas_find(struct xa_state *xas, unsigned long max) | |
1079 | { | |
1080 | void *entry; | |
1081 | ||
1082 | if (xas_error(xas)) | |
1083 | return NULL; | |
1084 | ||
1085 | if (!xas->xa_node) { | |
1086 | xas->xa_index = 1; | |
1087 | return set_bounds(xas); | |
1088 | } else if (xas_top(xas->xa_node)) { | |
1089 | entry = xas_load(xas); | |
1090 | if (entry || xas_not_node(xas->xa_node)) | |
1091 | return entry; | |
1092 | } else if (!xas->xa_node->shift && | |
1093 | xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) { | |
1094 | xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1; | |
1095 | } | |
1096 | ||
1097 | xas_advance(xas); | |
1098 | ||
1099 | while (xas->xa_node && (xas->xa_index <= max)) { | |
1100 | if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { | |
1101 | xas->xa_offset = xas->xa_node->offset + 1; | |
1102 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); | |
1103 | continue; | |
1104 | } | |
1105 | ||
1106 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | |
1107 | if (xa_is_node(entry)) { | |
1108 | xas->xa_node = xa_to_node(entry); | |
1109 | xas->xa_offset = 0; | |
1110 | continue; | |
1111 | } | |
1112 | if (entry && !xa_is_sibling(entry)) | |
1113 | return entry; | |
1114 | ||
1115 | xas_advance(xas); | |
1116 | } | |
1117 | ||
1118 | if (!xas->xa_node) | |
1119 | xas->xa_node = XAS_BOUNDS; | |
1120 | return NULL; | |
1121 | } | |
1122 | EXPORT_SYMBOL_GPL(xas_find); | |
1123 | ||
1124 | /** | |
1125 | * xas_find_marked() - Find the next marked entry in the XArray. | |
1126 | * @xas: XArray operation state. | |
1127 | * @max: Highest index to return. | |
1128 | * @mark: Mark number to search for. | |
1129 | * | |
1130 | * If the @xas has not yet been walked to an entry, return the marked entry | |
1131 | * which has an index >= xas.xa_index. If it has been walked, the entry | |
1132 | * currently being pointed at has been processed, and so we return the | |
1133 | * first marked entry with an index > xas.xa_index. | |
1134 | * | |
1135 | * If no marked entry is found and the array is smaller than @max, @xas is | |
1136 | * set to the bounds state and xas->xa_index is set to the smallest index | |
1137 | * not yet in the array. This allows @xas to be immediately passed to | |
1138 | * xas_store(). | |
1139 | * | |
1140 | * If no entry is found before @max is reached, @xas is set to the restart | |
1141 | * state. | |
1142 | * | |
1143 | * Return: The entry, if found, otherwise %NULL. | |
1144 | */ | |
1145 | void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) | |
1146 | { | |
1147 | bool advance = true; | |
1148 | unsigned int offset; | |
1149 | void *entry; | |
1150 | ||
1151 | if (xas_error(xas)) | |
1152 | return NULL; | |
1153 | ||
1154 | if (!xas->xa_node) { | |
1155 | xas->xa_index = 1; | |
1156 | goto out; | |
1157 | } else if (xas_top(xas->xa_node)) { | |
1158 | advance = false; | |
1159 | entry = xa_head(xas->xa); | |
1160 | xas->xa_node = NULL; | |
1161 | if (xas->xa_index > max_index(entry)) | |
48483614 | 1162 | goto out; |
b803b428 MW |
1163 | if (!xa_is_node(entry)) { |
1164 | if (xa_marked(xas->xa, mark)) | |
1165 | return entry; | |
1166 | xas->xa_index = 1; | |
1167 | goto out; | |
1168 | } | |
1169 | xas->xa_node = xa_to_node(entry); | |
1170 | xas->xa_offset = xas->xa_index >> xas->xa_node->shift; | |
1171 | } | |
1172 | ||
1173 | while (xas->xa_index <= max) { | |
1174 | if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { | |
1175 | xas->xa_offset = xas->xa_node->offset + 1; | |
1176 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); | |
1177 | if (!xas->xa_node) | |
1178 | break; | |
1179 | advance = false; | |
1180 | continue; | |
1181 | } | |
1182 | ||
1183 | if (!advance) { | |
1184 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | |
1185 | if (xa_is_sibling(entry)) { | |
1186 | xas->xa_offset = xa_to_sibling(entry); | |
1187 | xas_move_index(xas, xas->xa_offset); | |
1188 | } | |
1189 | } | |
1190 | ||
1191 | offset = xas_find_chunk(xas, advance, mark); | |
1192 | if (offset > xas->xa_offset) { | |
1193 | advance = false; | |
1194 | xas_move_index(xas, offset); | |
1195 | /* Mind the wrap */ | |
1196 | if ((xas->xa_index - 1) >= max) | |
1197 | goto max; | |
1198 | xas->xa_offset = offset; | |
1199 | if (offset == XA_CHUNK_SIZE) | |
1200 | continue; | |
1201 | } | |
1202 | ||
1203 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | |
1204 | if (!xa_is_node(entry)) | |
1205 | return entry; | |
1206 | xas->xa_node = xa_to_node(entry); | |
1207 | xas_set_offset(xas); | |
1208 | } | |
1209 | ||
1210 | out: | |
48483614 | 1211 | if (xas->xa_index > max) |
b803b428 | 1212 | goto max; |
48483614 | 1213 | return set_bounds(xas); |
b803b428 MW |
1214 | max: |
1215 | xas->xa_node = XAS_RESTART; | |
1216 | return NULL; | |
1217 | } | |
1218 | EXPORT_SYMBOL_GPL(xas_find_marked); | |
1219 | ||
4e99d4e9 MW |
1220 | /** |
1221 | * xas_find_conflict() - Find the next present entry in a range. | |
1222 | * @xas: XArray operation state. | |
1223 | * | |
1224 | * The @xas describes both a range and a position within that range. | |
1225 | * | |
1226 | * Context: Any context. Expects xa_lock to be held. | |
1227 | * Return: The next entry in the range covered by @xas or %NULL. | |
1228 | */ | |
1229 | void *xas_find_conflict(struct xa_state *xas) | |
1230 | { | |
1231 | void *curr; | |
1232 | ||
1233 | if (xas_error(xas)) | |
1234 | return NULL; | |
1235 | ||
1236 | if (!xas->xa_node) | |
1237 | return NULL; | |
1238 | ||
1239 | if (xas_top(xas->xa_node)) { | |
1240 | curr = xas_start(xas); | |
1241 | if (!curr) | |
1242 | return NULL; | |
1243 | while (xa_is_node(curr)) { | |
1244 | struct xa_node *node = xa_to_node(curr); | |
1245 | curr = xas_descend(xas, node); | |
1246 | } | |
1247 | if (curr) | |
1248 | return curr; | |
1249 | } | |
1250 | ||
1251 | if (xas->xa_node->shift > xas->xa_shift) | |
1252 | return NULL; | |
1253 | ||
1254 | for (;;) { | |
1255 | if (xas->xa_node->shift == xas->xa_shift) { | |
1256 | if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs) | |
1257 | break; | |
1258 | } else if (xas->xa_offset == XA_CHUNK_MASK) { | |
1259 | xas->xa_offset = xas->xa_node->offset; | |
1260 | xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node); | |
1261 | if (!xas->xa_node) | |
1262 | break; | |
1263 | continue; | |
1264 | } | |
1265 | curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset); | |
1266 | if (xa_is_sibling(curr)) | |
1267 | continue; | |
1268 | while (xa_is_node(curr)) { | |
1269 | xas->xa_node = xa_to_node(curr); | |
1270 | xas->xa_offset = 0; | |
1271 | curr = xa_entry_locked(xas->xa, xas->xa_node, 0); | |
1272 | } | |
1273 | if (curr) | |
1274 | return curr; | |
1275 | } | |
1276 | xas->xa_offset -= xas->xa_sibs; | |
1277 | return NULL; | |
1278 | } | |
1279 | EXPORT_SYMBOL_GPL(xas_find_conflict); | |
1280 | ||
ad3d6c72 MW |
1281 | /** |
1282 | * xa_load() - Load an entry from an XArray. | |
1283 | * @xa: XArray. | |
1284 | * @index: index into array. | |
1285 | * | |
1286 | * Context: Any context. Takes and releases the RCU lock. | |
1287 | * Return: The entry at @index in @xa. | |
1288 | */ | |
1289 | void *xa_load(struct xarray *xa, unsigned long index) | |
1290 | { | |
1291 | XA_STATE(xas, xa, index); | |
1292 | void *entry; | |
1293 | ||
1294 | rcu_read_lock(); | |
1295 | do { | |
1296 | entry = xas_load(&xas); | |
9f14d4f1 MW |
1297 | if (xa_is_zero(entry)) |
1298 | entry = NULL; | |
ad3d6c72 MW |
1299 | } while (xas_retry(&xas, entry)); |
1300 | rcu_read_unlock(); | |
1301 | ||
1302 | return entry; | |
1303 | } | |
1304 | EXPORT_SYMBOL(xa_load); | |
1305 | ||
58d6ea30 MW |
1306 | static void *xas_result(struct xa_state *xas, void *curr) |
1307 | { | |
9f14d4f1 MW |
1308 | if (xa_is_zero(curr)) |
1309 | return NULL; | |
58d6ea30 MW |
1310 | if (xas_error(xas)) |
1311 | curr = xas->xa_node; | |
1312 | return curr; | |
1313 | } | |
1314 | ||
1315 | /** | |
1316 | * __xa_erase() - Erase this entry from the XArray while locked. | |
1317 | * @xa: XArray. | |
1318 | * @index: Index into array. | |
1319 | * | |
809ab937 MW |
1320 | * After this function returns, loading from @index will return %NULL. |
1321 | * If the index is part of a multi-index entry, all indices will be erased | |
1322 | * and none of the entries will be part of a multi-index entry. | |
58d6ea30 | 1323 | * |
809ab937 MW |
1324 | * Context: Any context. Expects xa_lock to be held on entry. |
1325 | * Return: The entry which used to be at this index. | |
58d6ea30 MW |
1326 | */ |
1327 | void *__xa_erase(struct xarray *xa, unsigned long index) | |
1328 | { | |
1329 | XA_STATE(xas, xa, index); | |
1330 | return xas_result(&xas, xas_store(&xas, NULL)); | |
1331 | } | |
9ee5a3b7 | 1332 | EXPORT_SYMBOL(__xa_erase); |
58d6ea30 | 1333 | |
9c16bb88 MW |
1334 | /** |
1335 | * xa_erase() - Erase this entry from the XArray. | |
1336 | * @xa: XArray. | |
1337 | * @index: Index of entry. | |
1338 | * | |
809ab937 MW |
1339 | * After this function returns, loading from @index will return %NULL. |
1340 | * If the index is part of a multi-index entry, all indices will be erased | |
1341 | * and none of the entries will be part of a multi-index entry. | |
9c16bb88 MW |
1342 | * |
1343 | * Context: Any context. Takes and releases the xa_lock. | |
1344 | * Return: The entry which used to be at this index. | |
1345 | */ | |
1346 | void *xa_erase(struct xarray *xa, unsigned long index) | |
1347 | { | |
1348 | void *entry; | |
1349 | ||
1350 | xa_lock(xa); | |
1351 | entry = __xa_erase(xa, index); | |
1352 | xa_unlock(xa); | |
1353 | ||
1354 | return entry; | |
1355 | } | |
1356 | EXPORT_SYMBOL(xa_erase); | |
1357 | ||
58d6ea30 | 1358 | /** |
611f3186 | 1359 | * __xa_store() - Store this entry in the XArray. |
58d6ea30 MW |
1360 | * @xa: XArray. |
1361 | * @index: Index into array. | |
1362 | * @entry: New entry. | |
1363 | * @gfp: Memory allocation flags. | |
1364 | * | |
611f3186 MW |
1365 | * You must already be holding the xa_lock when calling this function. |
1366 | * It will drop the lock if needed to allocate memory, and then reacquire | |
1367 | * it afterwards. | |
58d6ea30 | 1368 | * |
611f3186 MW |
1369 | * Context: Any context. Expects xa_lock to be held on entry. May |
1370 | * release and reacquire xa_lock if @gfp flags permit. | |
1371 | * Return: The old entry at this index or xa_err() if an error happened. | |
58d6ea30 | 1372 | */ |
611f3186 | 1373 | void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) |
58d6ea30 MW |
1374 | { |
1375 | XA_STATE(xas, xa, index); | |
1376 | void *curr; | |
1377 | ||
76b4e529 | 1378 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
58d6ea30 | 1379 | return XA_ERROR(-EINVAL); |
d9c48043 MW |
1380 | if (xa_track_free(xa) && !entry) |
1381 | entry = XA_ZERO_ENTRY; | |
58d6ea30 MW |
1382 | |
1383 | do { | |
58d6ea30 | 1384 | curr = xas_store(&xas, entry); |
d9c48043 | 1385 | if (xa_track_free(xa)) |
371c752d | 1386 | xas_clear_mark(&xas, XA_FREE_MARK); |
611f3186 | 1387 | } while (__xas_nomem(&xas, gfp)); |
58d6ea30 MW |
1388 | |
1389 | return xas_result(&xas, curr); | |
1390 | } | |
611f3186 | 1391 | EXPORT_SYMBOL(__xa_store); |
58d6ea30 MW |
1392 | |
1393 | /** | |
611f3186 | 1394 | * xa_store() - Store this entry in the XArray. |
58d6ea30 MW |
1395 | * @xa: XArray. |
1396 | * @index: Index into array. | |
1397 | * @entry: New entry. | |
1398 | * @gfp: Memory allocation flags. | |
1399 | * | |
611f3186 MW |
1400 | * After this function returns, loads from this index will return @entry. |
1401 | * Storing into an existing multislot entry updates the entry of every index. | |
1402 | * The marks associated with @index are unaffected unless @entry is %NULL. | |
58d6ea30 | 1403 | * |
611f3186 MW |
1404 | * Context: Any context. Takes and releases the xa_lock. |
1405 | * May sleep if the @gfp flags permit. | |
1406 | * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry | |
1407 | * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation | |
1408 | * failed. | |
58d6ea30 | 1409 | */ |
611f3186 | 1410 | void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) |
58d6ea30 | 1411 | { |
58d6ea30 MW |
1412 | void *curr; |
1413 | ||
611f3186 MW |
1414 | xa_lock(xa); |
1415 | curr = __xa_store(xa, index, entry, gfp); | |
1416 | xa_unlock(xa); | |
58d6ea30 | 1417 | |
611f3186 | 1418 | return curr; |
58d6ea30 | 1419 | } |
611f3186 | 1420 | EXPORT_SYMBOL(xa_store); |
58d6ea30 | 1421 | |
41aec91f MW |
1422 | /** |
1423 | * __xa_cmpxchg() - Store this entry in the XArray. | |
1424 | * @xa: XArray. | |
1425 | * @index: Index into array. | |
1426 | * @old: Old value to test against. | |
1427 | * @entry: New entry. | |
1428 | * @gfp: Memory allocation flags. | |
1429 | * | |
1430 | * You must already be holding the xa_lock when calling this function. | |
1431 | * It will drop the lock if needed to allocate memory, and then reacquire | |
1432 | * it afterwards. | |
1433 | * | |
1434 | * Context: Any context. Expects xa_lock to be held on entry. May | |
1435 | * release and reacquire xa_lock if @gfp flags permit. | |
1436 | * Return: The old entry at this index or xa_err() if an error happened. | |
1437 | */ | |
1438 | void *__xa_cmpxchg(struct xarray *xa, unsigned long index, | |
1439 | void *old, void *entry, gfp_t gfp) | |
1440 | { | |
1441 | XA_STATE(xas, xa, index); | |
1442 | void *curr; | |
1443 | ||
76b4e529 | 1444 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
41aec91f MW |
1445 | return XA_ERROR(-EINVAL); |
1446 | ||
1447 | do { | |
1448 | curr = xas_load(&xas); | |
371c752d | 1449 | if (curr == old) { |
41aec91f | 1450 | xas_store(&xas, entry); |
b38f6c50 | 1451 | if (xa_track_free(xa) && entry && !curr) |
371c752d MW |
1452 | xas_clear_mark(&xas, XA_FREE_MARK); |
1453 | } | |
41aec91f MW |
1454 | } while (__xas_nomem(&xas, gfp)); |
1455 | ||
1456 | return xas_result(&xas, curr); | |
1457 | } | |
1458 | EXPORT_SYMBOL(__xa_cmpxchg); | |
1459 | ||
b0606fed MW |
1460 | /** |
1461 | * __xa_insert() - Store this entry in the XArray if no entry is present. | |
1462 | * @xa: XArray. | |
1463 | * @index: Index into array. | |
1464 | * @entry: New entry. | |
1465 | * @gfp: Memory allocation flags. | |
1466 | * | |
1467 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) | |
1468 | * if no entry is present. Inserting will fail if a reserved entry is | |
1469 | * present, even though loading from this index will return NULL. | |
1470 | * | |
1471 | * Context: Any context. Expects xa_lock to be held on entry. May | |
1472 | * release and reacquire xa_lock if @gfp flags permit. | |
fd9dc93e | 1473 | * Return: 0 if the store succeeded. -EBUSY if another entry was present. |
b0606fed MW |
1474 | * -ENOMEM if memory could not be allocated. |
1475 | */ | |
1476 | int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) | |
1477 | { | |
1478 | XA_STATE(xas, xa, index); | |
1479 | void *curr; | |
1480 | ||
1481 | if (WARN_ON_ONCE(xa_is_advanced(entry))) | |
1482 | return -EINVAL; | |
1483 | if (!entry) | |
1484 | entry = XA_ZERO_ENTRY; | |
1485 | ||
1486 | do { | |
1487 | curr = xas_load(&xas); | |
1488 | if (!curr) { | |
1489 | xas_store(&xas, entry); | |
1490 | if (xa_track_free(xa)) | |
1491 | xas_clear_mark(&xas, XA_FREE_MARK); | |
1492 | } else { | |
fd9dc93e | 1493 | xas_set_err(&xas, -EBUSY); |
b0606fed MW |
1494 | } |
1495 | } while (__xas_nomem(&xas, gfp)); | |
1496 | ||
1497 | return xas_error(&xas); | |
1498 | } | |
1499 | EXPORT_SYMBOL(__xa_insert); | |
1500 | ||
0e9446c3 MW |
1501 | #ifdef CONFIG_XARRAY_MULTI |
1502 | static void xas_set_range(struct xa_state *xas, unsigned long first, | |
1503 | unsigned long last) | |
1504 | { | |
1505 | unsigned int shift = 0; | |
1506 | unsigned long sibs = last - first; | |
1507 | unsigned int offset = XA_CHUNK_MASK; | |
1508 | ||
1509 | xas_set(xas, first); | |
1510 | ||
1511 | while ((first & XA_CHUNK_MASK) == 0) { | |
1512 | if (sibs < XA_CHUNK_MASK) | |
1513 | break; | |
1514 | if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK)) | |
1515 | break; | |
1516 | shift += XA_CHUNK_SHIFT; | |
1517 | if (offset == XA_CHUNK_MASK) | |
1518 | offset = sibs & XA_CHUNK_MASK; | |
1519 | sibs >>= XA_CHUNK_SHIFT; | |
1520 | first >>= XA_CHUNK_SHIFT; | |
1521 | } | |
1522 | ||
1523 | offset = first & XA_CHUNK_MASK; | |
1524 | if (offset + sibs > XA_CHUNK_MASK) | |
1525 | sibs = XA_CHUNK_MASK - offset; | |
1526 | if ((((first + sibs + 1) << shift) - 1) > last) | |
1527 | sibs -= 1; | |
1528 | ||
1529 | xas->xa_shift = shift; | |
1530 | xas->xa_sibs = sibs; | |
1531 | } | |
1532 | ||
1533 | /** | |
1534 | * xa_store_range() - Store this entry at a range of indices in the XArray. | |
1535 | * @xa: XArray. | |
1536 | * @first: First index to affect. | |
1537 | * @last: Last index to affect. | |
1538 | * @entry: New entry. | |
1539 | * @gfp: Memory allocation flags. | |
1540 | * | |
1541 | * After this function returns, loads from any index between @first and @last, | |
1542 | * inclusive will return @entry. | |
1543 | * Storing into an existing multislot entry updates the entry of every index. | |
1544 | * The marks associated with @index are unaffected unless @entry is %NULL. | |
1545 | * | |
1546 | * Context: Process context. Takes and releases the xa_lock. May sleep | |
1547 | * if the @gfp flags permit. | |
1548 | * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in | |
1549 | * an XArray, or xa_err(-ENOMEM) if memory allocation failed. | |
1550 | */ | |
1551 | void *xa_store_range(struct xarray *xa, unsigned long first, | |
1552 | unsigned long last, void *entry, gfp_t gfp) | |
1553 | { | |
1554 | XA_STATE(xas, xa, 0); | |
1555 | ||
1556 | if (WARN_ON_ONCE(xa_is_internal(entry))) | |
1557 | return XA_ERROR(-EINVAL); | |
1558 | if (last < first) | |
1559 | return XA_ERROR(-EINVAL); | |
1560 | ||
1561 | do { | |
1562 | xas_lock(&xas); | |
1563 | if (entry) { | |
44a4a66b MW |
1564 | unsigned int order = BITS_PER_LONG; |
1565 | if (last + 1) | |
1566 | order = __ffs(last + 1); | |
0e9446c3 | 1567 | xas_set_order(&xas, last, order); |
76b4e529 | 1568 | xas_create(&xas, true); |
0e9446c3 MW |
1569 | if (xas_error(&xas)) |
1570 | goto unlock; | |
1571 | } | |
1572 | do { | |
1573 | xas_set_range(&xas, first, last); | |
1574 | xas_store(&xas, entry); | |
1575 | if (xas_error(&xas)) | |
1576 | goto unlock; | |
1577 | first += xas_size(&xas); | |
1578 | } while (first <= last); | |
1579 | unlock: | |
1580 | xas_unlock(&xas); | |
1581 | } while (xas_nomem(&xas, gfp)); | |
1582 | ||
1583 | return xas_result(&xas, NULL); | |
1584 | } | |
1585 | EXPORT_SYMBOL(xa_store_range); | |
1586 | #endif /* CONFIG_XARRAY_MULTI */ | |
1587 | ||
371c752d MW |
1588 | /** |
1589 | * __xa_alloc() - Find somewhere to store this entry in the XArray. | |
1590 | * @xa: XArray. | |
1591 | * @id: Pointer to ID. | |
a3e4d3f9 | 1592 | * @limit: Range for allocated ID. |
371c752d MW |
1593 | * @entry: New entry. |
1594 | * @gfp: Memory allocation flags. | |
1595 | * | |
a3e4d3f9 MW |
1596 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
1597 | * stores the index into the @id pointer, then stores the entry at | |
1598 | * that index. A concurrent lookup will not see an uninitialised @id. | |
371c752d MW |
1599 | * |
1600 | * Context: Any context. Expects xa_lock to be held on entry. May | |
1601 | * release and reacquire xa_lock if @gfp flags permit. | |
a3e4d3f9 MW |
1602 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
1603 | * -EBUSY if there are no free entries in @limit. | |
371c752d | 1604 | */ |
a3e4d3f9 MW |
1605 | int __xa_alloc(struct xarray *xa, u32 *id, void *entry, |
1606 | struct xa_limit limit, gfp_t gfp) | |
371c752d MW |
1607 | { |
1608 | XA_STATE(xas, xa, 0); | |
371c752d | 1609 | |
76b4e529 | 1610 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
371c752d MW |
1611 | return -EINVAL; |
1612 | if (WARN_ON_ONCE(!xa_track_free(xa))) | |
1613 | return -EINVAL; | |
1614 | ||
1615 | if (!entry) | |
1616 | entry = XA_ZERO_ENTRY; | |
1617 | ||
1618 | do { | |
a3e4d3f9 MW |
1619 | xas.xa_index = limit.min; |
1620 | xas_find_marked(&xas, limit.max, XA_FREE_MARK); | |
371c752d | 1621 | if (xas.xa_node == XAS_RESTART) |
a3e4d3f9 MW |
1622 | xas_set_err(&xas, -EBUSY); |
1623 | else | |
1624 | *id = xas.xa_index; | |
371c752d MW |
1625 | xas_store(&xas, entry); |
1626 | xas_clear_mark(&xas, XA_FREE_MARK); | |
1627 | } while (__xas_nomem(&xas, gfp)); | |
1628 | ||
a3e4d3f9 | 1629 | return xas_error(&xas); |
371c752d MW |
1630 | } |
1631 | EXPORT_SYMBOL(__xa_alloc); | |
1632 | ||
2fa044e5 MW |
1633 | /** |
1634 | * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. | |
1635 | * @xa: XArray. | |
1636 | * @id: Pointer to ID. | |
1637 | * @entry: New entry. | |
1638 | * @limit: Range of allocated ID. | |
1639 | * @next: Pointer to next ID to allocate. | |
1640 | * @gfp: Memory allocation flags. | |
1641 | * | |
1642 | * Finds an empty entry in @xa between @limit.min and @limit.max, | |
1643 | * stores the index into the @id pointer, then stores the entry at | |
1644 | * that index. A concurrent lookup will not see an uninitialised @id. | |
1645 | * The search for an empty entry will start at @next and will wrap | |
1646 | * around if necessary. | |
1647 | * | |
1648 | * Context: Any context. Expects xa_lock to be held on entry. May | |
1649 | * release and reacquire xa_lock if @gfp flags permit. | |
1650 | * Return: 0 if the allocation succeeded without wrapping. 1 if the | |
1651 | * allocation succeeded after wrapping, -ENOMEM if memory could not be | |
1652 | * allocated or -EBUSY if there are no free entries in @limit. | |
1653 | */ | |
1654 | int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, | |
1655 | struct xa_limit limit, u32 *next, gfp_t gfp) | |
1656 | { | |
1657 | u32 min = limit.min; | |
1658 | int ret; | |
1659 | ||
1660 | limit.min = max(min, *next); | |
1661 | ret = __xa_alloc(xa, id, entry, limit, gfp); | |
1662 | if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) { | |
1663 | xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED; | |
1664 | ret = 1; | |
1665 | } | |
1666 | ||
1667 | if (ret < 0 && limit.min > min) { | |
1668 | limit.min = min; | |
1669 | ret = __xa_alloc(xa, id, entry, limit, gfp); | |
1670 | if (ret == 0) | |
1671 | ret = 1; | |
1672 | } | |
1673 | ||
1674 | if (ret >= 0) { | |
1675 | *next = *id + 1; | |
1676 | if (*next == 0) | |
1677 | xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED; | |
1678 | } | |
1679 | return ret; | |
1680 | } | |
1681 | EXPORT_SYMBOL(__xa_alloc_cyclic); | |
1682 | ||
9b89a035 MW |
1683 | /** |
1684 | * __xa_set_mark() - Set this mark on this entry while locked. | |
1685 | * @xa: XArray. | |
1686 | * @index: Index of entry. | |
1687 | * @mark: Mark number. | |
1688 | * | |
804dfaf0 | 1689 | * Attempting to set a mark on a %NULL entry does not succeed. |
9b89a035 MW |
1690 | * |
1691 | * Context: Any context. Expects xa_lock to be held on entry. | |
1692 | */ | |
1693 | void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | |
1694 | { | |
1695 | XA_STATE(xas, xa, index); | |
1696 | void *entry = xas_load(&xas); | |
1697 | ||
1698 | if (entry) | |
1699 | xas_set_mark(&xas, mark); | |
1700 | } | |
9ee5a3b7 | 1701 | EXPORT_SYMBOL(__xa_set_mark); |
9b89a035 MW |
1702 | |
1703 | /** | |
1704 | * __xa_clear_mark() - Clear this mark on this entry while locked. | |
1705 | * @xa: XArray. | |
1706 | * @index: Index of entry. | |
1707 | * @mark: Mark number. | |
1708 | * | |
1709 | * Context: Any context. Expects xa_lock to be held on entry. | |
1710 | */ | |
1711 | void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | |
1712 | { | |
1713 | XA_STATE(xas, xa, index); | |
1714 | void *entry = xas_load(&xas); | |
1715 | ||
1716 | if (entry) | |
1717 | xas_clear_mark(&xas, mark); | |
1718 | } | |
9ee5a3b7 | 1719 | EXPORT_SYMBOL(__xa_clear_mark); |
9b89a035 MW |
1720 | |
1721 | /** | |
1722 | * xa_get_mark() - Inquire whether this mark is set on this entry. | |
1723 | * @xa: XArray. | |
1724 | * @index: Index of entry. | |
1725 | * @mark: Mark number. | |
1726 | * | |
1727 | * This function uses the RCU read lock, so the result may be out of date | |
1728 | * by the time it returns. If you need the result to be stable, use a lock. | |
1729 | * | |
1730 | * Context: Any context. Takes and releases the RCU lock. | |
1731 | * Return: True if the entry at @index has this mark set, false if it doesn't. | |
1732 | */ | |
1733 | bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | |
1734 | { | |
1735 | XA_STATE(xas, xa, index); | |
1736 | void *entry; | |
1737 | ||
1738 | rcu_read_lock(); | |
1739 | entry = xas_start(&xas); | |
1740 | while (xas_get_mark(&xas, mark)) { | |
1741 | if (!xa_is_node(entry)) | |
1742 | goto found; | |
1743 | entry = xas_descend(&xas, xa_to_node(entry)); | |
1744 | } | |
1745 | rcu_read_unlock(); | |
1746 | return false; | |
1747 | found: | |
1748 | rcu_read_unlock(); | |
1749 | return true; | |
1750 | } | |
1751 | EXPORT_SYMBOL(xa_get_mark); | |
1752 | ||
1753 | /** | |
1754 | * xa_set_mark() - Set this mark on this entry. | |
1755 | * @xa: XArray. | |
1756 | * @index: Index of entry. | |
1757 | * @mark: Mark number. | |
1758 | * | |
804dfaf0 | 1759 | * Attempting to set a mark on a %NULL entry does not succeed. |
9b89a035 MW |
1760 | * |
1761 | * Context: Process context. Takes and releases the xa_lock. | |
1762 | */ | |
1763 | void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | |
1764 | { | |
1765 | xa_lock(xa); | |
1766 | __xa_set_mark(xa, index, mark); | |
1767 | xa_unlock(xa); | |
1768 | } | |
1769 | EXPORT_SYMBOL(xa_set_mark); | |
1770 | ||
1771 | /** | |
1772 | * xa_clear_mark() - Clear this mark on this entry. | |
1773 | * @xa: XArray. | |
1774 | * @index: Index of entry. | |
1775 | * @mark: Mark number. | |
1776 | * | |
1777 | * Clearing a mark always succeeds. | |
1778 | * | |
1779 | * Context: Process context. Takes and releases the xa_lock. | |
1780 | */ | |
1781 | void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | |
1782 | { | |
1783 | xa_lock(xa); | |
1784 | __xa_clear_mark(xa, index, mark); | |
1785 | xa_unlock(xa); | |
1786 | } | |
1787 | EXPORT_SYMBOL(xa_clear_mark); | |
1788 | ||
b803b428 MW |
1789 | /** |
1790 | * xa_find() - Search the XArray for an entry. | |
1791 | * @xa: XArray. | |
1792 | * @indexp: Pointer to an index. | |
1793 | * @max: Maximum index to search to. | |
1794 | * @filter: Selection criterion. | |
1795 | * | |
1796 | * Finds the entry in @xa which matches the @filter, and has the lowest | |
1797 | * index that is at least @indexp and no more than @max. | |
1798 | * If an entry is found, @indexp is updated to be the index of the entry. | |
1799 | * This function is protected by the RCU read lock, so it may not find | |
1800 | * entries which are being simultaneously added. It will not return an | |
1801 | * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). | |
1802 | * | |
1803 | * Context: Any context. Takes and releases the RCU lock. | |
1804 | * Return: The entry, if found, otherwise %NULL. | |
1805 | */ | |
1806 | void *xa_find(struct xarray *xa, unsigned long *indexp, | |
1807 | unsigned long max, xa_mark_t filter) | |
1808 | { | |
1809 | XA_STATE(xas, xa, *indexp); | |
1810 | void *entry; | |
1811 | ||
1812 | rcu_read_lock(); | |
1813 | do { | |
1814 | if ((__force unsigned int)filter < XA_MAX_MARKS) | |
1815 | entry = xas_find_marked(&xas, max, filter); | |
1816 | else | |
1817 | entry = xas_find(&xas, max); | |
1818 | } while (xas_retry(&xas, entry)); | |
1819 | rcu_read_unlock(); | |
1820 | ||
1821 | if (entry) | |
1822 | *indexp = xas.xa_index; | |
1823 | return entry; | |
1824 | } | |
1825 | EXPORT_SYMBOL(xa_find); | |
1826 | ||
1827 | /** | |
1828 | * xa_find_after() - Search the XArray for a present entry. | |
1829 | * @xa: XArray. | |
1830 | * @indexp: Pointer to an index. | |
1831 | * @max: Maximum index to search to. | |
1832 | * @filter: Selection criterion. | |
1833 | * | |
1834 | * Finds the entry in @xa which matches the @filter and has the lowest | |
1835 | * index that is above @indexp and no more than @max. | |
1836 | * If an entry is found, @indexp is updated to be the index of the entry. | |
1837 | * This function is protected by the RCU read lock, so it may miss entries | |
1838 | * which are being simultaneously added. It will not return an | |
1839 | * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). | |
1840 | * | |
1841 | * Context: Any context. Takes and releases the RCU lock. | |
1842 | * Return: The pointer, if found, otherwise %NULL. | |
1843 | */ | |
1844 | void *xa_find_after(struct xarray *xa, unsigned long *indexp, | |
1845 | unsigned long max, xa_mark_t filter) | |
1846 | { | |
1847 | XA_STATE(xas, xa, *indexp + 1); | |
1848 | void *entry; | |
1849 | ||
1850 | rcu_read_lock(); | |
1851 | for (;;) { | |
1852 | if ((__force unsigned int)filter < XA_MAX_MARKS) | |
1853 | entry = xas_find_marked(&xas, max, filter); | |
1854 | else | |
1855 | entry = xas_find(&xas, max); | |
8229706e MW |
1856 | if (xas.xa_node == XAS_BOUNDS) |
1857 | break; | |
b803b428 MW |
1858 | if (xas.xa_shift) { |
1859 | if (xas.xa_index & ((1UL << xas.xa_shift) - 1)) | |
1860 | continue; | |
1861 | } else { | |
1862 | if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK)) | |
1863 | continue; | |
1864 | } | |
1865 | if (!xas_retry(&xas, entry)) | |
1866 | break; | |
1867 | } | |
1868 | rcu_read_unlock(); | |
1869 | ||
1870 | if (entry) | |
1871 | *indexp = xas.xa_index; | |
1872 | return entry; | |
1873 | } | |
1874 | EXPORT_SYMBOL(xa_find_after); | |
1875 | ||
80a0a1a9 MW |
1876 | static unsigned int xas_extract_present(struct xa_state *xas, void **dst, |
1877 | unsigned long max, unsigned int n) | |
1878 | { | |
1879 | void *entry; | |
1880 | unsigned int i = 0; | |
1881 | ||
1882 | rcu_read_lock(); | |
1883 | xas_for_each(xas, entry, max) { | |
1884 | if (xas_retry(xas, entry)) | |
1885 | continue; | |
1886 | dst[i++] = entry; | |
1887 | if (i == n) | |
1888 | break; | |
1889 | } | |
1890 | rcu_read_unlock(); | |
1891 | ||
1892 | return i; | |
1893 | } | |
1894 | ||
1895 | static unsigned int xas_extract_marked(struct xa_state *xas, void **dst, | |
1896 | unsigned long max, unsigned int n, xa_mark_t mark) | |
1897 | { | |
1898 | void *entry; | |
1899 | unsigned int i = 0; | |
1900 | ||
1901 | rcu_read_lock(); | |
1902 | xas_for_each_marked(xas, entry, max, mark) { | |
1903 | if (xas_retry(xas, entry)) | |
1904 | continue; | |
1905 | dst[i++] = entry; | |
1906 | if (i == n) | |
1907 | break; | |
1908 | } | |
1909 | rcu_read_unlock(); | |
1910 | ||
1911 | return i; | |
1912 | } | |
1913 | ||
1914 | /** | |
1915 | * xa_extract() - Copy selected entries from the XArray into a normal array. | |
1916 | * @xa: The source XArray to copy from. | |
1917 | * @dst: The buffer to copy entries into. | |
1918 | * @start: The first index in the XArray eligible to be selected. | |
1919 | * @max: The last index in the XArray eligible to be selected. | |
1920 | * @n: The maximum number of entries to copy. | |
1921 | * @filter: Selection criterion. | |
1922 | * | |
1923 | * Copies up to @n entries that match @filter from the XArray. The | |
1924 | * copied entries will have indices between @start and @max, inclusive. | |
1925 | * | |
1926 | * The @filter may be an XArray mark value, in which case entries which are | |
1927 | * marked with that mark will be copied. It may also be %XA_PRESENT, in | |
804dfaf0 | 1928 | * which case all entries which are not %NULL will be copied. |
80a0a1a9 MW |
1929 | * |
1930 | * The entries returned may not represent a snapshot of the XArray at a | |
1931 | * moment in time. For example, if another thread stores to index 5, then | |
1932 | * index 10, calling xa_extract() may return the old contents of index 5 | |
1933 | * and the new contents of index 10. Indices not modified while this | |
1934 | * function is running will not be skipped. | |
1935 | * | |
1936 | * If you need stronger guarantees, holding the xa_lock across calls to this | |
1937 | * function will prevent concurrent modification. | |
1938 | * | |
1939 | * Context: Any context. Takes and releases the RCU lock. | |
1940 | * Return: The number of entries copied. | |
1941 | */ | |
1942 | unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start, | |
1943 | unsigned long max, unsigned int n, xa_mark_t filter) | |
1944 | { | |
1945 | XA_STATE(xas, xa, start); | |
1946 | ||
1947 | if (!n) | |
1948 | return 0; | |
1949 | ||
1950 | if ((__force unsigned int)filter < XA_MAX_MARKS) | |
1951 | return xas_extract_marked(&xas, dst, max, n, filter); | |
1952 | return xas_extract_present(&xas, dst, max, n); | |
1953 | } | |
1954 | EXPORT_SYMBOL(xa_extract); | |
1955 | ||
687149fc MW |
1956 | /** |
1957 | * xa_destroy() - Free all internal data structures. | |
1958 | * @xa: XArray. | |
1959 | * | |
1960 | * After calling this function, the XArray is empty and has freed all memory | |
1961 | * allocated for its internal data structures. You are responsible for | |
1962 | * freeing the objects referenced by the XArray. | |
1963 | * | |
1964 | * Context: Any context. Takes and releases the xa_lock, interrupt-safe. | |
1965 | */ | |
1966 | void xa_destroy(struct xarray *xa) | |
1967 | { | |
1968 | XA_STATE(xas, xa, 0); | |
1969 | unsigned long flags; | |
1970 | void *entry; | |
1971 | ||
1972 | xas.xa_node = NULL; | |
1973 | xas_lock_irqsave(&xas, flags); | |
1974 | entry = xa_head_locked(xa); | |
1975 | RCU_INIT_POINTER(xa->xa_head, NULL); | |
1976 | xas_init_marks(&xas); | |
3ccaf57a MW |
1977 | if (xa_zero_busy(xa)) |
1978 | xa_mark_clear(xa, XA_FREE_MARK); | |
687149fc MW |
1979 | /* lockdep checks we're still holding the lock in xas_free_nodes() */ |
1980 | if (xa_is_node(entry)) | |
1981 | xas_free_nodes(&xas, xa_to_node(entry)); | |
1982 | xas_unlock_irqrestore(&xas, flags); | |
1983 | } | |
1984 | EXPORT_SYMBOL(xa_destroy); | |
1985 | ||
ad3d6c72 MW |
1986 | #ifdef XA_DEBUG |
1987 | void xa_dump_node(const struct xa_node *node) | |
1988 | { | |
1989 | unsigned i, j; | |
1990 | ||
1991 | if (!node) | |
1992 | return; | |
1993 | if ((unsigned long)node & 3) { | |
1994 | pr_cont("node %px\n", node); | |
1995 | return; | |
1996 | } | |
1997 | ||
1998 | pr_cont("node %px %s %d parent %px shift %d count %d values %d " | |
1999 | "array %px list %px %px marks", | |
2000 | node, node->parent ? "offset" : "max", node->offset, | |
2001 | node->parent, node->shift, node->count, node->nr_values, | |
2002 | node->array, node->private_list.prev, node->private_list.next); | |
2003 | for (i = 0; i < XA_MAX_MARKS; i++) | |
2004 | for (j = 0; j < XA_MARK_LONGS; j++) | |
2005 | pr_cont(" %lx", node->marks[i][j]); | |
2006 | pr_cont("\n"); | |
2007 | } | |
2008 | ||
2009 | void xa_dump_index(unsigned long index, unsigned int shift) | |
2010 | { | |
2011 | if (!shift) | |
2012 | pr_info("%lu: ", index); | |
2013 | else if (shift >= BITS_PER_LONG) | |
2014 | pr_info("0-%lu: ", ~0UL); | |
2015 | else | |
2016 | pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1)); | |
2017 | } | |
2018 | ||
2019 | void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift) | |
2020 | { | |
2021 | if (!entry) | |
2022 | return; | |
2023 | ||
2024 | xa_dump_index(index, shift); | |
2025 | ||
2026 | if (xa_is_node(entry)) { | |
2027 | if (shift == 0) { | |
2028 | pr_cont("%px\n", entry); | |
2029 | } else { | |
2030 | unsigned long i; | |
2031 | struct xa_node *node = xa_to_node(entry); | |
2032 | xa_dump_node(node); | |
2033 | for (i = 0; i < XA_CHUNK_SIZE; i++) | |
2034 | xa_dump_entry(node->slots[i], | |
2035 | index + (i << node->shift), node->shift); | |
2036 | } | |
2037 | } else if (xa_is_value(entry)) | |
2038 | pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry), | |
2039 | xa_to_value(entry), entry); | |
2040 | else if (!xa_is_internal(entry)) | |
2041 | pr_cont("%px\n", entry); | |
2042 | else if (xa_is_retry(entry)) | |
2043 | pr_cont("retry (%ld)\n", xa_to_internal(entry)); | |
2044 | else if (xa_is_sibling(entry)) | |
2045 | pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry)); | |
9f14d4f1 MW |
2046 | else if (xa_is_zero(entry)) |
2047 | pr_cont("zero (%ld)\n", xa_to_internal(entry)); | |
ad3d6c72 MW |
2048 | else |
2049 | pr_cont("UNKNOWN ENTRY (%px)\n", entry); | |
2050 | } | |
2051 | ||
2052 | void xa_dump(const struct xarray *xa) | |
2053 | { | |
2054 | void *entry = xa->xa_head; | |
2055 | unsigned int shift = 0; | |
2056 | ||
2057 | pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry, | |
9b89a035 MW |
2058 | xa->xa_flags, xa_marked(xa, XA_MARK_0), |
2059 | xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2)); | |
ad3d6c72 MW |
2060 | if (xa_is_node(entry)) |
2061 | shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT; | |
2062 | xa_dump_entry(entry, 0, shift); | |
2063 | } | |
2064 | #endif |