Merge tag 'for-linus-20181019' of git://git.kernel.dk/linux-block
[linux-2.6-block.git] / kernel / locking / test-ww_mutex.c
CommitLineData
f2a5fec1
CW
1/*
2 * Module-based API test facility for ww_mutexes
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 */
18
19#include <linux/kernel.h>
20
21#include <linux/completion.h>
2a0c1128 22#include <linux/delay.h>
f2a5fec1
CW
23#include <linux/kthread.h>
24#include <linux/module.h>
2a0c1128 25#include <linux/random.h>
d1b42b80 26#include <linux/slab.h>
f2a5fec1
CW
27#include <linux/ww_mutex.h>
28
08295b3b 29static DEFINE_WD_CLASS(ww_class);
d1b42b80 30struct workqueue_struct *wq;
f2a5fec1
CW
31
32struct test_mutex {
33 struct work_struct work;
34 struct ww_mutex mutex;
35 struct completion ready, go, done;
36 unsigned int flags;
37};
38
39#define TEST_MTX_SPIN BIT(0)
40#define TEST_MTX_TRY BIT(1)
41#define TEST_MTX_CTX BIT(2)
42#define __TEST_MTX_LAST BIT(3)
43
44static void test_mutex_work(struct work_struct *work)
45{
46 struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
47
48 complete(&mtx->ready);
49 wait_for_completion(&mtx->go);
50
51 if (mtx->flags & TEST_MTX_TRY) {
52 while (!ww_mutex_trylock(&mtx->mutex))
2b232e0c 53 cond_resched();
f2a5fec1
CW
54 } else {
55 ww_mutex_lock(&mtx->mutex, NULL);
56 }
57 complete(&mtx->done);
58 ww_mutex_unlock(&mtx->mutex);
59}
60
61static int __test_mutex(unsigned int flags)
62{
63#define TIMEOUT (HZ / 16)
64 struct test_mutex mtx;
65 struct ww_acquire_ctx ctx;
66 int ret;
67
68 ww_mutex_init(&mtx.mutex, &ww_class);
69 ww_acquire_init(&ctx, &ww_class);
70
71 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
72 init_completion(&mtx.ready);
73 init_completion(&mtx.go);
74 init_completion(&mtx.done);
75 mtx.flags = flags;
76
77 schedule_work(&mtx.work);
78
79 wait_for_completion(&mtx.ready);
80 ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
81 complete(&mtx.go);
82 if (flags & TEST_MTX_SPIN) {
83 unsigned long timeout = jiffies + TIMEOUT;
84
85 ret = 0;
86 do {
87 if (completion_done(&mtx.done)) {
88 ret = -EINVAL;
89 break;
90 }
2b232e0c 91 cond_resched();
f2a5fec1
CW
92 } while (time_before(jiffies, timeout));
93 } else {
94 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
95 }
96 ww_mutex_unlock(&mtx.mutex);
97 ww_acquire_fini(&ctx);
98
99 if (ret) {
100 pr_err("%s(flags=%x): mutual exclusion failure\n",
101 __func__, flags);
102 ret = -EINVAL;
103 }
104
105 flush_work(&mtx.work);
106 destroy_work_on_stack(&mtx.work);
107 return ret;
108#undef TIMEOUT
109}
110
111static int test_mutex(void)
112{
113 int ret;
114 int i;
115
116 for (i = 0; i < __TEST_MTX_LAST; i++) {
117 ret = __test_mutex(i);
118 if (ret)
119 return ret;
120 }
121
122 return 0;
123}
124
c22fb380
CW
125static int test_aa(void)
126{
127 struct ww_mutex mutex;
128 struct ww_acquire_ctx ctx;
129 int ret;
130
131 ww_mutex_init(&mutex, &ww_class);
132 ww_acquire_init(&ctx, &ww_class);
133
134 ww_mutex_lock(&mutex, &ctx);
135
136 if (ww_mutex_trylock(&mutex)) {
137 pr_err("%s: trylocked itself!\n", __func__);
138 ww_mutex_unlock(&mutex);
139 ret = -EINVAL;
140 goto out;
141 }
142
143 ret = ww_mutex_lock(&mutex, &ctx);
144 if (ret != -EALREADY) {
145 pr_err("%s: missed deadlock for recursing, ret=%d\n",
146 __func__, ret);
147 if (!ret)
148 ww_mutex_unlock(&mutex);
149 ret = -EINVAL;
150 goto out;
151 }
152
153 ret = 0;
154out:
155 ww_mutex_unlock(&mutex);
156 ww_acquire_fini(&ctx);
157 return ret;
158}
159
70207686
CW
160struct test_abba {
161 struct work_struct work;
162 struct ww_mutex a_mutex;
163 struct ww_mutex b_mutex;
164 struct completion a_ready;
165 struct completion b_ready;
166 bool resolve;
167 int result;
168};
169
170static void test_abba_work(struct work_struct *work)
171{
172 struct test_abba *abba = container_of(work, typeof(*abba), work);
173 struct ww_acquire_ctx ctx;
174 int err;
175
176 ww_acquire_init(&ctx, &ww_class);
177 ww_mutex_lock(&abba->b_mutex, &ctx);
178
179 complete(&abba->b_ready);
180 wait_for_completion(&abba->a_ready);
181
182 err = ww_mutex_lock(&abba->a_mutex, &ctx);
183 if (abba->resolve && err == -EDEADLK) {
184 ww_mutex_unlock(&abba->b_mutex);
185 ww_mutex_lock_slow(&abba->a_mutex, &ctx);
186 err = ww_mutex_lock(&abba->b_mutex, &ctx);
187 }
188
189 if (!err)
190 ww_mutex_unlock(&abba->a_mutex);
191 ww_mutex_unlock(&abba->b_mutex);
192 ww_acquire_fini(&ctx);
193
194 abba->result = err;
195}
196
197static int test_abba(bool resolve)
198{
199 struct test_abba abba;
200 struct ww_acquire_ctx ctx;
201 int err, ret;
202
203 ww_mutex_init(&abba.a_mutex, &ww_class);
204 ww_mutex_init(&abba.b_mutex, &ww_class);
205 INIT_WORK_ONSTACK(&abba.work, test_abba_work);
206 init_completion(&abba.a_ready);
207 init_completion(&abba.b_ready);
208 abba.resolve = resolve;
209
210 schedule_work(&abba.work);
211
212 ww_acquire_init(&ctx, &ww_class);
213 ww_mutex_lock(&abba.a_mutex, &ctx);
214
215 complete(&abba.a_ready);
216 wait_for_completion(&abba.b_ready);
217
218 err = ww_mutex_lock(&abba.b_mutex, &ctx);
219 if (resolve && err == -EDEADLK) {
220 ww_mutex_unlock(&abba.a_mutex);
221 ww_mutex_lock_slow(&abba.b_mutex, &ctx);
222 err = ww_mutex_lock(&abba.a_mutex, &ctx);
223 }
224
225 if (!err)
226 ww_mutex_unlock(&abba.b_mutex);
227 ww_mutex_unlock(&abba.a_mutex);
228 ww_acquire_fini(&ctx);
229
230 flush_work(&abba.work);
231 destroy_work_on_stack(&abba.work);
232
233 ret = 0;
234 if (resolve) {
235 if (err || abba.result) {
236 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
237 __func__, err, abba.result);
238 ret = -EINVAL;
239 }
240 } else {
241 if (err != -EDEADLK && abba.result != -EDEADLK) {
242 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
243 __func__, err, abba.result);
244 ret = -EINVAL;
245 }
246 }
247 return ret;
248}
249
d1b42b80
CW
250struct test_cycle {
251 struct work_struct work;
252 struct ww_mutex a_mutex;
253 struct ww_mutex *b_mutex;
254 struct completion *a_signal;
255 struct completion b_signal;
256 int result;
257};
258
259static void test_cycle_work(struct work_struct *work)
260{
261 struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
262 struct ww_acquire_ctx ctx;
e4a02ed2 263 int err, erra = 0;
d1b42b80
CW
264
265 ww_acquire_init(&ctx, &ww_class);
266 ww_mutex_lock(&cycle->a_mutex, &ctx);
267
268 complete(cycle->a_signal);
269 wait_for_completion(&cycle->b_signal);
270
271 err = ww_mutex_lock(cycle->b_mutex, &ctx);
272 if (err == -EDEADLK) {
e4a02ed2 273 err = 0;
d1b42b80
CW
274 ww_mutex_unlock(&cycle->a_mutex);
275 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
e4a02ed2 276 erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
d1b42b80
CW
277 }
278
279 if (!err)
280 ww_mutex_unlock(cycle->b_mutex);
e4a02ed2
GR
281 if (!erra)
282 ww_mutex_unlock(&cycle->a_mutex);
d1b42b80
CW
283 ww_acquire_fini(&ctx);
284
e4a02ed2 285 cycle->result = err ?: erra;
d1b42b80
CW
286}
287
288static int __test_cycle(unsigned int nthreads)
289{
290 struct test_cycle *cycles;
291 unsigned int n, last = nthreads - 1;
292 int ret;
293
294 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
295 if (!cycles)
296 return -ENOMEM;
297
298 for (n = 0; n < nthreads; n++) {
299 struct test_cycle *cycle = &cycles[n];
300
301 ww_mutex_init(&cycle->a_mutex, &ww_class);
302 if (n == last)
303 cycle->b_mutex = &cycles[0].a_mutex;
304 else
305 cycle->b_mutex = &cycles[n + 1].a_mutex;
306
307 if (n == 0)
308 cycle->a_signal = &cycles[last].b_signal;
309 else
310 cycle->a_signal = &cycles[n - 1].b_signal;
311 init_completion(&cycle->b_signal);
312
313 INIT_WORK(&cycle->work, test_cycle_work);
314 cycle->result = 0;
315 }
316
317 for (n = 0; n < nthreads; n++)
318 queue_work(wq, &cycles[n].work);
319
320 flush_workqueue(wq);
321
322 ret = 0;
323 for (n = 0; n < nthreads; n++) {
324 struct test_cycle *cycle = &cycles[n];
325
326 if (!cycle->result)
327 continue;
328
0b405c65 329 pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
d1b42b80
CW
330 n, nthreads, cycle->result);
331 ret = -EINVAL;
332 break;
333 }
334
335 for (n = 0; n < nthreads; n++)
336 ww_mutex_destroy(&cycles[n].a_mutex);
337 kfree(cycles);
338 return ret;
339}
340
341static int test_cycle(unsigned int ncpus)
342{
343 unsigned int n;
344 int ret;
345
346 for (n = 2; n <= ncpus + 1; n++) {
347 ret = __test_cycle(n);
348 if (ret)
349 return ret;
350 }
351
352 return 0;
353}
354
2a0c1128
CW
355struct stress {
356 struct work_struct work;
357 struct ww_mutex *locks;
57dd924e 358 unsigned long timeout;
2a0c1128 359 int nlocks;
2a0c1128
CW
360};
361
362static int *get_random_order(int count)
363{
364 int *order;
365 int n, r, tmp;
366
0ee931c4 367 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
2a0c1128
CW
368 if (!order)
369 return order;
370
371 for (n = 0; n < count; n++)
372 order[n] = n;
373
374 for (n = count - 1; n > 1; n--) {
375 r = get_random_int() % (n + 1);
376 if (r != n) {
377 tmp = order[n];
378 order[n] = order[r];
379 order[r] = tmp;
380 }
381 }
382
383 return order;
384}
385
386static void dummy_load(struct stress *stress)
387{
388 usleep_range(1000, 2000);
389}
390
391static void stress_inorder_work(struct work_struct *work)
392{
393 struct stress *stress = container_of(work, typeof(*stress), work);
394 const int nlocks = stress->nlocks;
395 struct ww_mutex *locks = stress->locks;
396 struct ww_acquire_ctx ctx;
397 int *order;
398
399 order = get_random_order(nlocks);
400 if (!order)
401 return;
402
2a0c1128
CW
403 do {
404 int contended = -1;
405 int n, err;
406
bf7b3ac2 407 ww_acquire_init(&ctx, &ww_class);
2a0c1128
CW
408retry:
409 err = 0;
410 for (n = 0; n < nlocks; n++) {
411 if (n == contended)
412 continue;
413
414 err = ww_mutex_lock(&locks[order[n]], &ctx);
415 if (err < 0)
416 break;
417 }
418 if (!err)
419 dummy_load(stress);
420
421 if (contended > n)
422 ww_mutex_unlock(&locks[order[contended]]);
423 contended = n;
424 while (n--)
425 ww_mutex_unlock(&locks[order[n]]);
426
427 if (err == -EDEADLK) {
428 ww_mutex_lock_slow(&locks[order[contended]], &ctx);
429 goto retry;
430 }
431
432 if (err) {
433 pr_err_once("stress (%s) failed with %d\n",
434 __func__, err);
435 break;
436 }
2a0c1128 437
bf7b3ac2 438 ww_acquire_fini(&ctx);
57dd924e 439 } while (!time_after(jiffies, stress->timeout));
2a0c1128
CW
440
441 kfree(order);
442 kfree(stress);
443}
444
445struct reorder_lock {
446 struct list_head link;
447 struct ww_mutex *lock;
448};
449
450static void stress_reorder_work(struct work_struct *work)
451{
452 struct stress *stress = container_of(work, typeof(*stress), work);
453 LIST_HEAD(locks);
454 struct ww_acquire_ctx ctx;
455 struct reorder_lock *ll, *ln;
456 int *order;
457 int n, err;
458
459 order = get_random_order(stress->nlocks);
460 if (!order)
461 return;
462
463 for (n = 0; n < stress->nlocks; n++) {
464 ll = kmalloc(sizeof(*ll), GFP_KERNEL);
465 if (!ll)
466 goto out;
467
468 ll->lock = &stress->locks[order[n]];
469 list_add(&ll->link, &locks);
470 }
471 kfree(order);
472 order = NULL;
473
2a0c1128 474 do {
bf7b3ac2
PZ
475 ww_acquire_init(&ctx, &ww_class);
476
2a0c1128
CW
477 list_for_each_entry(ll, &locks, link) {
478 err = ww_mutex_lock(ll->lock, &ctx);
479 if (!err)
480 continue;
481
482 ln = ll;
483 list_for_each_entry_continue_reverse(ln, &locks, link)
484 ww_mutex_unlock(ln->lock);
485
486 if (err != -EDEADLK) {
487 pr_err_once("stress (%s) failed with %d\n",
488 __func__, err);
489 break;
490 }
491
492 ww_mutex_lock_slow(ll->lock, &ctx);
493 list_move(&ll->link, &locks); /* restarts iteration */
494 }
495
496 dummy_load(stress);
497 list_for_each_entry(ll, &locks, link)
498 ww_mutex_unlock(ll->lock);
2a0c1128 499
bf7b3ac2 500 ww_acquire_fini(&ctx);
57dd924e 501 } while (!time_after(jiffies, stress->timeout));
2a0c1128
CW
502
503out:
504 list_for_each_entry_safe(ll, ln, &locks, link)
505 kfree(ll);
506 kfree(order);
507 kfree(stress);
508}
509
510static void stress_one_work(struct work_struct *work)
511{
512 struct stress *stress = container_of(work, typeof(*stress), work);
513 const int nlocks = stress->nlocks;
514 struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
515 int err;
516
517 do {
518 err = ww_mutex_lock(lock, NULL);
519 if (!err) {
520 dummy_load(stress);
521 ww_mutex_unlock(lock);
522 } else {
523 pr_err_once("stress (%s) failed with %d\n",
524 __func__, err);
525 break;
526 }
57dd924e 527 } while (!time_after(jiffies, stress->timeout));
2a0c1128
CW
528
529 kfree(stress);
530}
531
532#define STRESS_INORDER BIT(0)
533#define STRESS_REORDER BIT(1)
534#define STRESS_ONE BIT(2)
535#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
536
57dd924e 537static int stress(int nlocks, int nthreads, unsigned int flags)
2a0c1128
CW
538{
539 struct ww_mutex *locks;
540 int n;
541
542 locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
543 if (!locks)
544 return -ENOMEM;
545
546 for (n = 0; n < nlocks; n++)
547 ww_mutex_init(&locks[n], &ww_class);
548
549 for (n = 0; nthreads; n++) {
550 struct stress *stress;
551 void (*fn)(struct work_struct *work);
552
553 fn = NULL;
554 switch (n & 3) {
555 case 0:
556 if (flags & STRESS_INORDER)
557 fn = stress_inorder_work;
558 break;
559 case 1:
560 if (flags & STRESS_REORDER)
561 fn = stress_reorder_work;
562 break;
563 case 2:
564 if (flags & STRESS_ONE)
565 fn = stress_one_work;
566 break;
567 }
568
569 if (!fn)
570 continue;
571
572 stress = kmalloc(sizeof(*stress), GFP_KERNEL);
573 if (!stress)
574 break;
575
576 INIT_WORK(&stress->work, fn);
577 stress->locks = locks;
578 stress->nlocks = nlocks;
57dd924e 579 stress->timeout = jiffies + 2*HZ;
2a0c1128
CW
580
581 queue_work(wq, &stress->work);
582 nthreads--;
583 }
584
585 flush_workqueue(wq);
586
587 for (n = 0; n < nlocks; n++)
588 ww_mutex_destroy(&locks[n]);
589 kfree(locks);
590
591 return 0;
592}
593
f2a5fec1
CW
594static int __init test_ww_mutex_init(void)
595{
d1b42b80 596 int ncpus = num_online_cpus();
f2a5fec1
CW
597 int ret;
598
d1b42b80
CW
599 wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
600 if (!wq)
601 return -ENOMEM;
602
f2a5fec1
CW
603 ret = test_mutex();
604 if (ret)
605 return ret;
606
c22fb380
CW
607 ret = test_aa();
608 if (ret)
609 return ret;
610
70207686
CW
611 ret = test_abba(false);
612 if (ret)
613 return ret;
614
615 ret = test_abba(true);
616 if (ret)
617 return ret;
618
d1b42b80
CW
619 ret = test_cycle(ncpus);
620 if (ret)
621 return ret;
622
57dd924e 623 ret = stress(16, 2*ncpus, STRESS_INORDER);
2a0c1128
CW
624 if (ret)
625 return ret;
626
57dd924e 627 ret = stress(16, 2*ncpus, STRESS_REORDER);
2a0c1128
CW
628 if (ret)
629 return ret;
630
57dd924e 631 ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
2a0c1128
CW
632 if (ret)
633 return ret;
634
f2a5fec1
CW
635 return 0;
636}
637
638static void __exit test_ww_mutex_exit(void)
639{
d1b42b80 640 destroy_workqueue(wq);
f2a5fec1
CW
641}
642
643module_init(test_ww_mutex_init);
644module_exit(test_ww_mutex_exit);
645
646MODULE_LICENSE("GPL");
647MODULE_AUTHOR("Intel Corporation");