Merge tag 'lsm-pr-20231109' of git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/lsm
[linux-block.git] / tools / testing / radix-tree / regression1.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1366c37e
MW
2/*
3 * Regression1
4 * Description:
5 * Salman Qazi describes the following radix-tree bug:
6 *
7 * In the following case, we get can get a deadlock:
8 *
9 * 0. The radix tree contains two items, one has the index 0.
10 * 1. The reader (in this case find_get_pages) takes the rcu_read_lock.
11 * 2. The reader acquires slot(s) for item(s) including the index 0 item.
12 * 3. The non-zero index item is deleted, and as a consequence the other item
13 * is moved to the root of the tree. The place where it used to be is queued
14 * for deletion after the readers finish.
15 * 3b. The zero item is deleted, removing it from the direct slot, it remains in
16 * the rcu-delayed indirect node.
17 * 4. The reader looks at the index 0 slot, and finds that the page has 0 ref
18 * count
19 * 5. The reader looks at it again, hoping that the item will either be freed
20 * or the ref count will increase. This never happens, as the slot it is
21 * looking at will never be updated. Also, this slot can never be reclaimed
22 * because the reader is holding rcu_read_lock and is in an infinite loop.
23 *
24 * The fix is to re-use the same "indirect" pointer case that requires a slot
25 * lookup retry into a general "retry the lookup" bit.
26 *
27 * Running:
28 * This test should run to completion in a few seconds. The above bug would
29 * cause it to hang indefinitely.
30 *
31 * Upstream commit:
32 * Not yet
33 */
34#include <linux/kernel.h>
35#include <linux/gfp.h>
36#include <linux/slab.h>
37#include <linux/radix-tree.h>
38#include <linux/rcupdate.h>
39#include <stdlib.h>
40#include <pthread.h>
41#include <stdio.h>
42#include <assert.h>
43
44#include "regression.h"
45
46static RADIX_TREE(mt_tree, GFP_KERNEL);
1366c37e
MW
47
48struct page {
49 pthread_mutex_t lock;
50 struct rcu_head rcu;
51 int count;
52 unsigned long index;
53};
54
a332125f 55static struct page *page_alloc(int index)
1366c37e
MW
56{
57 struct page *p;
58 p = malloc(sizeof(struct page));
59 p->count = 1;
a332125f 60 p->index = index;
1366c37e
MW
61 pthread_mutex_init(&p->lock, NULL);
62
63 return p;
64}
65
66static void page_rcu_free(struct rcu_head *rcu)
67{
68 struct page *p = container_of(rcu, struct page, rcu);
69 assert(!p->count);
70 pthread_mutex_destroy(&p->lock);
71 free(p);
72}
73
74static void page_free(struct page *p)
75{
76 call_rcu(&p->rcu, page_rcu_free);
77}
78
79static unsigned find_get_pages(unsigned long start,
80 unsigned int nr_pages, struct page **pages)
81{
a332125f
MW
82 XA_STATE(xas, &mt_tree, start);
83 struct page *page;
84 unsigned int ret = 0;
1366c37e
MW
85
86 rcu_read_lock();
a332125f
MW
87 xas_for_each(&xas, page, ULONG_MAX) {
88 if (xas_retry(&xas, page))
1366c37e
MW
89 continue;
90
1366c37e 91 pthread_mutex_lock(&page->lock);
a332125f
MW
92 if (!page->count)
93 goto unlock;
94
1366c37e
MW
95 /* don't actually update page refcount */
96 pthread_mutex_unlock(&page->lock);
97
98 /* Has the page moved? */
a332125f
MW
99 if (unlikely(page != xas_reload(&xas)))
100 goto put_page;
1366c37e
MW
101
102 pages[ret] = page;
103 ret++;
a332125f
MW
104 continue;
105unlock:
106 pthread_mutex_unlock(&page->lock);
107put_page:
108 xas_reset(&xas);
1366c37e
MW
109 }
110 rcu_read_unlock();
111 return ret;
112}
113
114static pthread_barrier_t worker_barrier;
115
116static void *regression1_fn(void *arg)
117{
118 rcu_register_thread();
119
120 if (pthread_barrier_wait(&worker_barrier) ==
121 PTHREAD_BARRIER_SERIAL_THREAD) {
122 int j;
123
124 for (j = 0; j < 1000000; j++) {
125 struct page *p;
126
a332125f 127 p = page_alloc(0);
372266ba 128 xa_lock(&mt_tree);
1366c37e 129 radix_tree_insert(&mt_tree, 0, p);
372266ba 130 xa_unlock(&mt_tree);
1366c37e 131
a332125f 132 p = page_alloc(1);
372266ba 133 xa_lock(&mt_tree);
1366c37e 134 radix_tree_insert(&mt_tree, 1, p);
372266ba 135 xa_unlock(&mt_tree);
1366c37e 136
372266ba 137 xa_lock(&mt_tree);
1366c37e
MW
138 p = radix_tree_delete(&mt_tree, 1);
139 pthread_mutex_lock(&p->lock);
140 p->count--;
141 pthread_mutex_unlock(&p->lock);
372266ba 142 xa_unlock(&mt_tree);
1366c37e
MW
143 page_free(p);
144
372266ba 145 xa_lock(&mt_tree);
1366c37e
MW
146 p = radix_tree_delete(&mt_tree, 0);
147 pthread_mutex_lock(&p->lock);
148 p->count--;
149 pthread_mutex_unlock(&p->lock);
372266ba 150 xa_unlock(&mt_tree);
1366c37e
MW
151 page_free(p);
152 }
153 } else {
154 int j;
155
156 for (j = 0; j < 100000000; j++) {
157 struct page *pages[10];
158
159 find_get_pages(0, 10, pages);
160 }
161 }
162
163 rcu_unregister_thread();
164
165 return NULL;
166}
167
168static pthread_t *threads;
169void regression1_test(void)
170{
171 int nr_threads;
172 int i;
173 long arg;
174
175 /* Regression #1 */
73bc029b 176 printv(1, "running regression test 1, should finish in under a minute\n");
1366c37e
MW
177 nr_threads = 2;
178 pthread_barrier_init(&worker_barrier, NULL, nr_threads);
179
cac7ea57 180 threads = malloc(nr_threads * sizeof(*threads));
1366c37e
MW
181
182 for (i = 0; i < nr_threads; i++) {
183 arg = i;
184 if (pthread_create(&threads[i], NULL, regression1_fn, (void *)arg)) {
185 perror("pthread_create");
186 exit(1);
187 }
188 }
189
190 for (i = 0; i < nr_threads; i++) {
191 if (pthread_join(threads[i], NULL)) {
192 perror("pthread_join");
193 exit(1);
194 }
195 }
196
197 free(threads);
198
73bc029b 199 printv(1, "regression test 1, done\n");
1366c37e 200}