xarray: Add MAINTAINERS entry
[linux-2.6-block.git] / lib / test_xarray.c
CommitLineData
ad3d6c72
MW
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * test_xarray.c: Test the XArray API
4 * Copyright (c) 2017-2018 Microsoft Corporation
5 * Author: Matthew Wilcox <willy@infradead.org>
6 */
7
8#include <linux/xarray.h>
9#include <linux/module.h>
10
11static unsigned int tests_run;
12static unsigned int tests_passed;
13
14#ifndef XA_DEBUG
15# ifdef __KERNEL__
16void xa_dump(const struct xarray *xa) { }
17# endif
18#undef XA_BUG_ON
19#define XA_BUG_ON(xa, x) do { \
20 tests_run++; \
21 if (x) { \
22 printk("BUG at %s:%d\n", __func__, __LINE__); \
23 xa_dump(xa); \
24 dump_stack(); \
25 } else { \
26 tests_passed++; \
27 } \
28} while (0)
29#endif
30
31static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
32{
58d6ea30 33 return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp);
ad3d6c72
MW
34}
35
36static void xa_erase_index(struct xarray *xa, unsigned long index)
37{
58d6ea30
MW
38 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX));
39 XA_BUG_ON(xa, xa_load(xa, index) != NULL);
40}
41
42/*
43 * If anyone needs this, please move it to xarray.c. We have no current
44 * users outside the test suite because all current multislot users want
45 * to use the advanced API.
46 */
47static void *xa_store_order(struct xarray *xa, unsigned long index,
48 unsigned order, void *entry, gfp_t gfp)
49{
50 XA_STATE_ORDER(xas, xa, index, order);
51 void *curr;
52
53 do {
54 xas_lock(&xas);
55 curr = xas_store(&xas, entry);
56 xas_unlock(&xas);
57 } while (xas_nomem(&xas, gfp));
58
59 return curr;
60}
61
62static noinline void check_xa_err(struct xarray *xa)
63{
64 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
65 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
66#ifndef __KERNEL__
67 /* The kernel does not fail GFP_NOWAIT allocations */
68 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
69 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
70#endif
71 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
72 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
73 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
74// kills the test-suite :-(
75// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
ad3d6c72
MW
76}
77
b803b428
MW
78static noinline void check_xas_retry(struct xarray *xa)
79{
80 XA_STATE(xas, xa, 0);
81 void *entry;
82
83 xa_store_index(xa, 0, GFP_KERNEL);
84 xa_store_index(xa, 1, GFP_KERNEL);
85
86 rcu_read_lock();
87 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
88 xa_erase_index(xa, 1);
89 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
90 XA_BUG_ON(xa, xas_retry(&xas, NULL));
91 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
92 xas_reset(&xas);
93 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
94 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
95 XA_BUG_ON(xa, xas.xa_node != NULL);
96
97 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
98 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
99 xas.xa_node = XAS_RESTART;
100 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
101 rcu_read_unlock();
102
103 /* Make sure we can iterate through retry entries */
104 xas_lock(&xas);
105 xas_set(&xas, 0);
106 xas_store(&xas, XA_RETRY_ENTRY);
107 xas_set(&xas, 1);
108 xas_store(&xas, XA_RETRY_ENTRY);
109
110 xas_set(&xas, 0);
111 xas_for_each(&xas, entry, ULONG_MAX) {
112 xas_store(&xas, xa_mk_value(xas.xa_index));
113 }
114 xas_unlock(&xas);
115
116 xa_erase_index(xa, 0);
117 xa_erase_index(xa, 1);
118}
119
ad3d6c72
MW
120static noinline void check_xa_load(struct xarray *xa)
121{
122 unsigned long i, j;
123
124 for (i = 0; i < 1024; i++) {
125 for (j = 0; j < 1024; j++) {
126 void *entry = xa_load(xa, j);
127 if (j < i)
128 XA_BUG_ON(xa, xa_to_value(entry) != j);
129 else
130 XA_BUG_ON(xa, entry);
131 }
132 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
133 }
134
135 for (i = 0; i < 1024; i++) {
136 for (j = 0; j < 1024; j++) {
137 void *entry = xa_load(xa, j);
138 if (j >= i)
139 XA_BUG_ON(xa, xa_to_value(entry) != j);
140 else
141 XA_BUG_ON(xa, entry);
142 }
143 xa_erase_index(xa, i);
144 }
145 XA_BUG_ON(xa, !xa_empty(xa));
146}
147
9b89a035
MW
148static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
149{
58d6ea30
MW
150 unsigned int order;
151 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
152
9b89a035
MW
153 /* NULL elements have no marks set */
154 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
155 xa_set_mark(xa, index, XA_MARK_0);
156 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
157
158 /* Storing a pointer will not make a mark appear */
159 XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
160 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
161 xa_set_mark(xa, index, XA_MARK_0);
162 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
163
164 /* Setting one mark will not set another mark */
165 XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
166 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
167
168 /* Storing NULL clears marks, and they can't be set again */
169 xa_erase_index(xa, index);
170 XA_BUG_ON(xa, !xa_empty(xa));
171 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
172 xa_set_mark(xa, index, XA_MARK_0);
173 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
58d6ea30
MW
174
175 /*
176 * Storing a multi-index entry over entries with marks gives the
177 * entire entry the union of the marks
178 */
179 BUG_ON((index % 4) != 0);
180 for (order = 2; order < max_order; order++) {
181 unsigned long base = round_down(index, 1UL << order);
182 unsigned long next = base + (1UL << order);
183 unsigned long i;
184
185 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
186 xa_set_mark(xa, index + 1, XA_MARK_0);
187 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
188 xa_set_mark(xa, index + 2, XA_MARK_1);
189 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
190 xa_store_order(xa, index, order, xa_mk_value(index),
191 GFP_KERNEL);
192 for (i = base; i < next; i++) {
193 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
194 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1));
195 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
196 }
197 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
198 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
199 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
200 xa_erase_index(xa, index);
201 xa_erase_index(xa, next);
202 XA_BUG_ON(xa, !xa_empty(xa));
203 }
204 XA_BUG_ON(xa, !xa_empty(xa));
9b89a035
MW
205}
206
207static noinline void check_xa_mark(struct xarray *xa)
208{
209 unsigned long index;
210
211 for (index = 0; index < 16384; index += 4)
212 check_xa_mark_1(xa, index);
213}
214
58d6ea30
MW
215static noinline void check_xa_shrink(struct xarray *xa)
216{
217 XA_STATE(xas, xa, 1);
218 struct xa_node *node;
219
220 XA_BUG_ON(xa, !xa_empty(xa));
221 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
222 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
223
224 /*
225 * Check that erasing the entry at 1 shrinks the tree and properly
226 * marks the node as being deleted.
227 */
228 xas_lock(&xas);
229 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
230 node = xas.xa_node;
231 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
232 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
233 XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
234 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
235 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
236 XA_BUG_ON(xa, xas_load(&xas) != NULL);
237 xas_unlock(&xas);
238 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
239 xa_erase_index(xa, 0);
240 XA_BUG_ON(xa, !xa_empty(xa));
241}
242
41aec91f
MW
243static noinline void check_cmpxchg(struct xarray *xa)
244{
245 void *FIVE = xa_mk_value(5);
246 void *SIX = xa_mk_value(6);
247 void *LOTS = xa_mk_value(12345678);
248
249 XA_BUG_ON(xa, !xa_empty(xa));
250 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
251 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST);
252 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
253 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
254 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
255 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
256 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
257 xa_erase_index(xa, 12345678);
258 xa_erase_index(xa, 5);
259 XA_BUG_ON(xa, !xa_empty(xa));
260}
261
9f14d4f1
MW
262static noinline void check_reserve(struct xarray *xa)
263{
264 void *entry;
265 unsigned long index = 0;
266
267 /* An array with a reserved entry is not empty */
268 XA_BUG_ON(xa, !xa_empty(xa));
269 xa_reserve(xa, 12345678, GFP_KERNEL);
270 XA_BUG_ON(xa, xa_empty(xa));
271 XA_BUG_ON(xa, xa_load(xa, 12345678));
272 xa_release(xa, 12345678);
273 XA_BUG_ON(xa, !xa_empty(xa));
274
275 /* Releasing a used entry does nothing */
276 xa_reserve(xa, 12345678, GFP_KERNEL);
277 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
278 xa_release(xa, 12345678);
279 xa_erase_index(xa, 12345678);
280 XA_BUG_ON(xa, !xa_empty(xa));
281
282 /* cmpxchg sees a reserved entry as NULL */
283 xa_reserve(xa, 12345678, GFP_KERNEL);
284 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678),
285 GFP_NOWAIT) != NULL);
286 xa_release(xa, 12345678);
287 xa_erase_index(xa, 12345678);
288 XA_BUG_ON(xa, !xa_empty(xa));
289
290 /* Can iterate through a reserved entry */
291 xa_store_index(xa, 5, GFP_KERNEL);
292 xa_reserve(xa, 6, GFP_KERNEL);
293 xa_store_index(xa, 7, GFP_KERNEL);
294
295 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
296 XA_BUG_ON(xa, index != 5 && index != 7);
297 }
298 xa_destroy(xa);
299}
300
b803b428
MW
301static noinline void check_xas_erase(struct xarray *xa)
302{
303 XA_STATE(xas, xa, 0);
304 void *entry;
305 unsigned long i, j;
306
307 for (i = 0; i < 200; i++) {
308 for (j = i; j < 2 * i + 17; j++) {
309 xas_set(&xas, j);
310 do {
311 xas_lock(&xas);
312 xas_store(&xas, xa_mk_value(j));
313 xas_unlock(&xas);
314 } while (xas_nomem(&xas, GFP_KERNEL));
315 }
316
317 xas_set(&xas, ULONG_MAX);
318 do {
319 xas_lock(&xas);
320 xas_store(&xas, xa_mk_value(0));
321 xas_unlock(&xas);
322 } while (xas_nomem(&xas, GFP_KERNEL));
323
324 xas_lock(&xas);
325 xas_store(&xas, NULL);
326
327 xas_set(&xas, 0);
328 j = i;
329 xas_for_each(&xas, entry, ULONG_MAX) {
330 XA_BUG_ON(xa, entry != xa_mk_value(j));
331 xas_store(&xas, NULL);
332 j++;
333 }
334 xas_unlock(&xas);
335 XA_BUG_ON(xa, !xa_empty(xa));
336 }
337}
338
58d6ea30
MW
339static noinline void check_multi_store(struct xarray *xa)
340{
341#ifdef CONFIG_XARRAY_MULTI
342 unsigned long i, j, k;
343 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
344
345 /* Loading from any position returns the same value */
346 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
347 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
348 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
349 XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
350 rcu_read_lock();
351 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
352 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
353 rcu_read_unlock();
354
355 /* Storing adjacent to the value does not alter the value */
356 xa_store(xa, 3, xa, GFP_KERNEL);
357 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
358 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
359 XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
360 rcu_read_lock();
361 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
362 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
363 rcu_read_unlock();
364
365 /* Overwriting multiple indexes works */
366 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
367 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
368 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
369 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
370 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
371 XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
372 rcu_read_lock();
373 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
374 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
375 rcu_read_unlock();
376
377 /* We can erase multiple values with a single store */
378 xa_store_order(xa, 0, 63, NULL, GFP_KERNEL);
379 XA_BUG_ON(xa, !xa_empty(xa));
380
381 /* Even when the first slot is empty but the others aren't */
382 xa_store_index(xa, 1, GFP_KERNEL);
383 xa_store_index(xa, 2, GFP_KERNEL);
384 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
385 XA_BUG_ON(xa, !xa_empty(xa));
386
387 for (i = 0; i < max_order; i++) {
388 for (j = 0; j < max_order; j++) {
389 xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL);
390 xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL);
391
392 for (k = 0; k < max_order; k++) {
393 void *entry = xa_load(xa, (1UL << k) - 1);
394 if ((i < k) && (j < k))
395 XA_BUG_ON(xa, entry != NULL);
396 else
397 XA_BUG_ON(xa, entry != xa_mk_value(j));
398 }
399
400 xa_erase(xa, 0);
401 XA_BUG_ON(xa, !xa_empty(xa));
402 }
403 }
404#endif
405}
406
4e99d4e9
MW
407static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
408 unsigned int order, unsigned int present)
409{
410 XA_STATE_ORDER(xas, xa, start, order);
411 void *entry;
412 unsigned int count = 0;
413
414retry:
415 xas_lock(&xas);
416 xas_for_each_conflict(&xas, entry) {
417 XA_BUG_ON(xa, !xa_is_value(entry));
418 XA_BUG_ON(xa, entry < xa_mk_value(start));
419 XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1));
420 count++;
421 }
422 xas_store(&xas, xa_mk_value(start));
423 xas_unlock(&xas);
424 if (xas_nomem(&xas, GFP_KERNEL)) {
425 count = 0;
426 goto retry;
427 }
428 XA_BUG_ON(xa, xas_error(&xas));
429 XA_BUG_ON(xa, count != present);
430 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start));
431 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
432 xa_mk_value(start));
433 xa_erase_index(xa, start);
434}
435
436static noinline void check_store_iter(struct xarray *xa)
437{
438 unsigned int i, j;
439 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
440
441 for (i = 0; i < max_order; i++) {
442 unsigned int min = 1 << i;
443 unsigned int max = (2 << i) - 1;
444 __check_store_iter(xa, 0, i, 0);
445 XA_BUG_ON(xa, !xa_empty(xa));
446 __check_store_iter(xa, min, i, 0);
447 XA_BUG_ON(xa, !xa_empty(xa));
448
449 xa_store_index(xa, min, GFP_KERNEL);
450 __check_store_iter(xa, min, i, 1);
451 XA_BUG_ON(xa, !xa_empty(xa));
452 xa_store_index(xa, max, GFP_KERNEL);
453 __check_store_iter(xa, min, i, 1);
454 XA_BUG_ON(xa, !xa_empty(xa));
455
456 for (j = 0; j < min; j++)
457 xa_store_index(xa, j, GFP_KERNEL);
458 __check_store_iter(xa, 0, i, min);
459 XA_BUG_ON(xa, !xa_empty(xa));
460 for (j = 0; j < min; j++)
461 xa_store_index(xa, min + j, GFP_KERNEL);
462 __check_store_iter(xa, min, i, min);
463 XA_BUG_ON(xa, !xa_empty(xa));
464 }
465#ifdef CONFIG_XARRAY_MULTI
466 xa_store_index(xa, 63, GFP_KERNEL);
467 xa_store_index(xa, 65, GFP_KERNEL);
468 __check_store_iter(xa, 64, 2, 1);
469 xa_erase_index(xa, 63);
470#endif
471 XA_BUG_ON(xa, !xa_empty(xa));
472}
473
b803b428
MW
474static noinline void check_multi_find(struct xarray *xa)
475{
476#ifdef CONFIG_XARRAY_MULTI
477 unsigned long index;
478
479 xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
480 XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
481
482 index = 0;
483 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
484 xa_mk_value(12));
485 XA_BUG_ON(xa, index != 12);
486 index = 13;
487 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
488 xa_mk_value(12));
489 XA_BUG_ON(xa, (index < 12) || (index >= 16));
490 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
491 xa_mk_value(16));
492 XA_BUG_ON(xa, index != 16);
493
494 xa_erase_index(xa, 12);
495 xa_erase_index(xa, 16);
496 XA_BUG_ON(xa, !xa_empty(xa));
497#endif
498}
499
500static noinline void check_multi_find_2(struct xarray *xa)
501{
502 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
503 unsigned int i, j;
504 void *entry;
505
506 for (i = 0; i < max_order; i++) {
507 unsigned long index = 1UL << i;
508 for (j = 0; j < index; j++) {
509 XA_STATE(xas, xa, j + index);
510 xa_store_index(xa, index - 1, GFP_KERNEL);
511 xa_store_order(xa, index, i, xa_mk_value(index),
512 GFP_KERNEL);
513 rcu_read_lock();
514 xas_for_each(&xas, entry, ULONG_MAX) {
515 xa_erase_index(xa, index);
516 }
517 rcu_read_unlock();
518 xa_erase_index(xa, index - 1);
519 XA_BUG_ON(xa, !xa_empty(xa));
520 }
521 }
522}
523
524static noinline void check_find(struct xarray *xa)
525{
526 unsigned long i, j, k;
527
528 XA_BUG_ON(xa, !xa_empty(xa));
529
530 /*
531 * Check xa_find with all pairs between 0 and 99 inclusive,
532 * starting at every index between 0 and 99
533 */
534 for (i = 0; i < 100; i++) {
535 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
536 xa_set_mark(xa, i, XA_MARK_0);
537 for (j = 0; j < i; j++) {
538 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
539 NULL);
540 xa_set_mark(xa, j, XA_MARK_0);
541 for (k = 0; k < 100; k++) {
542 unsigned long index = k;
543 void *entry = xa_find(xa, &index, ULONG_MAX,
544 XA_PRESENT);
545 if (k <= j)
546 XA_BUG_ON(xa, index != j);
547 else if (k <= i)
548 XA_BUG_ON(xa, index != i);
549 else
550 XA_BUG_ON(xa, entry != NULL);
551
552 index = k;
553 entry = xa_find(xa, &index, ULONG_MAX,
554 XA_MARK_0);
555 if (k <= j)
556 XA_BUG_ON(xa, index != j);
557 else if (k <= i)
558 XA_BUG_ON(xa, index != i);
559 else
560 XA_BUG_ON(xa, entry != NULL);
561 }
562 xa_erase_index(xa, j);
563 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
564 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
565 }
566 xa_erase_index(xa, i);
567 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
568 }
569 XA_BUG_ON(xa, !xa_empty(xa));
570 check_multi_find(xa);
571 check_multi_find_2(xa);
572}
573
64d3e9a9
MW
574static noinline void check_move_small(struct xarray *xa, unsigned long idx)
575{
576 XA_STATE(xas, xa, 0);
577 unsigned long i;
578
579 xa_store_index(xa, 0, GFP_KERNEL);
580 xa_store_index(xa, idx, GFP_KERNEL);
581
582 rcu_read_lock();
583 for (i = 0; i < idx * 4; i++) {
584 void *entry = xas_next(&xas);
585 if (i <= idx)
586 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
587 XA_BUG_ON(xa, xas.xa_index != i);
588 if (i == 0 || i == idx)
589 XA_BUG_ON(xa, entry != xa_mk_value(i));
590 else
591 XA_BUG_ON(xa, entry != NULL);
592 }
593 xas_next(&xas);
594 XA_BUG_ON(xa, xas.xa_index != i);
595
596 do {
597 void *entry = xas_prev(&xas);
598 i--;
599 if (i <= idx)
600 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
601 XA_BUG_ON(xa, xas.xa_index != i);
602 if (i == 0 || i == idx)
603 XA_BUG_ON(xa, entry != xa_mk_value(i));
604 else
605 XA_BUG_ON(xa, entry != NULL);
606 } while (i > 0);
607
608 xas_set(&xas, ULONG_MAX);
609 XA_BUG_ON(xa, xas_next(&xas) != NULL);
610 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
611 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
612 XA_BUG_ON(xa, xas.xa_index != 0);
613 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
614 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
615 rcu_read_unlock();
616
617 xa_erase_index(xa, 0);
618 xa_erase_index(xa, idx);
619 XA_BUG_ON(xa, !xa_empty(xa));
620}
621
622static noinline void check_move(struct xarray *xa)
623{
624 XA_STATE(xas, xa, (1 << 16) - 1);
625 unsigned long i;
626
627 for (i = 0; i < (1 << 16); i++)
628 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
629
630 rcu_read_lock();
631 do {
632 void *entry = xas_prev(&xas);
633 i--;
634 XA_BUG_ON(xa, entry != xa_mk_value(i));
635 XA_BUG_ON(xa, i != xas.xa_index);
636 } while (i != 0);
637
638 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
639 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
640
641 do {
642 void *entry = xas_next(&xas);
643 XA_BUG_ON(xa, entry != xa_mk_value(i));
644 XA_BUG_ON(xa, i != xas.xa_index);
645 i++;
646 } while (i < (1 << 16));
647 rcu_read_unlock();
648
649 for (i = (1 << 8); i < (1 << 15); i++)
650 xa_erase_index(xa, i);
651
652 i = xas.xa_index;
653
654 rcu_read_lock();
655 do {
656 void *entry = xas_prev(&xas);
657 i--;
658 if ((i < (1 << 8)) || (i >= (1 << 15)))
659 XA_BUG_ON(xa, entry != xa_mk_value(i));
660 else
661 XA_BUG_ON(xa, entry != NULL);
662 XA_BUG_ON(xa, i != xas.xa_index);
663 } while (i != 0);
664
665 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
666 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
667
668 do {
669 void *entry = xas_next(&xas);
670 if ((i < (1 << 8)) || (i >= (1 << 15)))
671 XA_BUG_ON(xa, entry != xa_mk_value(i));
672 else
673 XA_BUG_ON(xa, entry != NULL);
674 XA_BUG_ON(xa, i != xas.xa_index);
675 i++;
676 } while (i < (1 << 16));
677 rcu_read_unlock();
678
679 xa_destroy(xa);
680
681 for (i = 0; i < 16; i++)
682 check_move_small(xa, 1UL << i);
683
684 for (i = 2; i < 16; i++)
685 check_move_small(xa, (1UL << i) - 1);
686}
687
2264f513
MW
688static noinline void xa_store_many_order(struct xarray *xa,
689 unsigned long index, unsigned order)
690{
691 XA_STATE_ORDER(xas, xa, index, order);
692 unsigned int i = 0;
693
694 do {
695 xas_lock(&xas);
696 XA_BUG_ON(xa, xas_find_conflict(&xas));
697 xas_create_range(&xas);
698 if (xas_error(&xas))
699 goto unlock;
700 for (i = 0; i < (1U << order); i++) {
701 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i)));
702 xas_next(&xas);
703 }
704unlock:
705 xas_unlock(&xas);
706 } while (xas_nomem(&xas, GFP_KERNEL));
707
708 XA_BUG_ON(xa, xas_error(&xas));
709}
710
711static noinline void check_create_range_1(struct xarray *xa,
712 unsigned long index, unsigned order)
713{
714 unsigned long i;
715
716 xa_store_many_order(xa, index, order);
717 for (i = index; i < index + (1UL << order); i++)
718 xa_erase_index(xa, i);
719 XA_BUG_ON(xa, !xa_empty(xa));
720}
721
722static noinline void check_create_range_2(struct xarray *xa, unsigned order)
723{
724 unsigned long i;
725 unsigned long nr = 1UL << order;
726
727 for (i = 0; i < nr * nr; i += nr)
728 xa_store_many_order(xa, i, order);
729 for (i = 0; i < nr * nr; i++)
730 xa_erase_index(xa, i);
731 XA_BUG_ON(xa, !xa_empty(xa));
732}
733
734static noinline void check_create_range_3(void)
735{
736 XA_STATE(xas, NULL, 0);
737 xas_set_err(&xas, -EEXIST);
738 xas_create_range(&xas);
739 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
740}
741
742static noinline void check_create_range_4(struct xarray *xa,
743 unsigned long index, unsigned order)
744{
745 XA_STATE_ORDER(xas, xa, index, order);
746 unsigned long base = xas.xa_index;
747 unsigned long i = 0;
748
749 xa_store_index(xa, index, GFP_KERNEL);
750 do {
751 xas_lock(&xas);
752 xas_create_range(&xas);
753 if (xas_error(&xas))
754 goto unlock;
755 for (i = 0; i < (1UL << order); i++) {
756 void *old = xas_store(&xas, xa_mk_value(base + i));
757 if (xas.xa_index == index)
758 XA_BUG_ON(xa, old != xa_mk_value(base + i));
759 else
760 XA_BUG_ON(xa, old != NULL);
761 xas_next(&xas);
762 }
763unlock:
764 xas_unlock(&xas);
765 } while (xas_nomem(&xas, GFP_KERNEL));
766
767 XA_BUG_ON(xa, xas_error(&xas));
768
769 for (i = base; i < base + (1UL << order); i++)
770 xa_erase_index(xa, i);
771 XA_BUG_ON(xa, !xa_empty(xa));
772}
773
774static noinline void check_create_range(struct xarray *xa)
775{
776 unsigned int order;
777 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
778
779 for (order = 0; order < max_order; order++) {
780 check_create_range_1(xa, 0, order);
781 check_create_range_1(xa, 1U << order, order);
782 check_create_range_1(xa, 2U << order, order);
783 check_create_range_1(xa, 3U << order, order);
784 check_create_range_1(xa, 1U << 24, order);
785 if (order < 10)
786 check_create_range_2(xa, order);
787
788 check_create_range_4(xa, 0, order);
789 check_create_range_4(xa, 1U << order, order);
790 check_create_range_4(xa, 2U << order, order);
791 check_create_range_4(xa, 3U << order, order);
792 check_create_range_4(xa, 1U << 24, order);
793
794 check_create_range_4(xa, 1, order);
795 check_create_range_4(xa, (1U << order) + 1, order);
796 check_create_range_4(xa, (2U << order) + 1, order);
797 check_create_range_4(xa, (2U << order) - 1, order);
798 check_create_range_4(xa, (3U << order) + 1, order);
799 check_create_range_4(xa, (3U << order) - 1, order);
800 check_create_range_4(xa, (1U << 24) + 1, order);
801 }
802
803 check_create_range_3();
804}
805
687149fc
MW
806static noinline void check_destroy(struct xarray *xa)
807{
808 unsigned long index;
809
810 XA_BUG_ON(xa, !xa_empty(xa));
811
812 /* Destroying an empty array is a no-op */
813 xa_destroy(xa);
814 XA_BUG_ON(xa, !xa_empty(xa));
815
816 /* Destroying an array with a single entry */
817 for (index = 0; index < 1000; index++) {
818 xa_store_index(xa, index, GFP_KERNEL);
819 XA_BUG_ON(xa, xa_empty(xa));
820 xa_destroy(xa);
821 XA_BUG_ON(xa, !xa_empty(xa));
822 }
823
824 /* Destroying an array with a single entry at ULONG_MAX */
825 xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
826 XA_BUG_ON(xa, xa_empty(xa));
827 xa_destroy(xa);
828 XA_BUG_ON(xa, !xa_empty(xa));
829
830#ifdef CONFIG_XARRAY_MULTI
831 /* Destroying an array with a multi-index entry */
832 xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
833 XA_BUG_ON(xa, xa_empty(xa));
834 xa_destroy(xa);
835 XA_BUG_ON(xa, !xa_empty(xa));
836#endif
837}
838
58d6ea30 839static DEFINE_XARRAY(array);
ad3d6c72
MW
840
841static int xarray_checks(void)
842{
58d6ea30 843 check_xa_err(&array);
b803b428 844 check_xas_retry(&array);
ad3d6c72 845 check_xa_load(&array);
9b89a035 846 check_xa_mark(&array);
58d6ea30 847 check_xa_shrink(&array);
b803b428 848 check_xas_erase(&array);
41aec91f 849 check_cmpxchg(&array);
9f14d4f1 850 check_reserve(&array);
58d6ea30 851 check_multi_store(&array);
b803b428 852 check_find(&array);
687149fc 853 check_destroy(&array);
64d3e9a9 854 check_move(&array);
2264f513 855 check_create_range(&array);
4e99d4e9 856 check_store_iter(&array);
ad3d6c72
MW
857
858 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
859 return (tests_run == tests_passed) ? 0 : -EINVAL;
860}
861
862static void xarray_exit(void)
863{
864}
865
866module_init(xarray_checks);
867module_exit(xarray_exit);
868MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
869MODULE_LICENSE("GPL");