1 // SPDX-License-Identifier: GPL-2.0+
3 * Module-based torture test facility for locking
5 * Copyright (C) IBM Corporation, 2014
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Davidlohr Bueso <dave@stgolabs.net>
9 * Based on kernel/rcu/torture.c.
12 #define pr_fmt(fmt) fmt
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/rt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/rwsem.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/rtmutex.h>
26 #include <linux/atomic.h>
27 #include <linux/moduleparam.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/torture.h>
31 #include <linux/reboot.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
36 torture_param(int, nwriters_stress, -1,
37 "Number of write-locking stress-test threads");
38 torture_param(int, nreaders_stress, -1,
39 "Number of read-locking stress-test threads");
40 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
41 torture_param(int, onoff_interval, 0,
42 "Time between CPU hotplugs (s), 0=disable");
43 torture_param(int, shuffle_interval, 3,
44 "Number of jiffies between shuffles, 0=disable");
45 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
46 torture_param(int, stat_interval, 60,
47 "Number of seconds between stats printk()s");
48 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
49 torture_param(int, rt_boost, 2,
50 "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
51 torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
52 torture_param(int, verbose, 1,
53 "Enable verbose debugging printk()s");
54 torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
55 /* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
56 #define MAX_NESTED_LOCKS 8
58 static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock";
59 module_param(torture_type, charp, 0444);
60 MODULE_PARM_DESC(torture_type,
61 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
63 static struct task_struct *stats_task;
64 static struct task_struct **writer_tasks;
65 static struct task_struct **reader_tasks;
67 static bool lock_is_write_held;
68 static atomic_t lock_is_read_held;
69 static unsigned long last_lock_release;
71 struct lock_stress_stats {
76 /* Forward reference. */
77 static void lock_torture_cleanup(void);
80 * Operations vector for selecting different types of tests.
82 struct lock_torture_ops {
85 int (*nested_lock)(int tid, u32 lockset);
86 int (*writelock)(int tid);
87 void (*write_delay)(struct torture_random_state *trsp);
88 void (*task_boost)(struct torture_random_state *trsp);
89 void (*writeunlock)(int tid);
90 void (*nested_unlock)(int tid, u32 lockset);
91 int (*readlock)(int tid);
92 void (*read_delay)(struct torture_random_state *trsp);
93 void (*readunlock)(int tid);
95 unsigned long flags; /* for irq spinlocks */
99 struct lock_torture_cxt {
100 int nrealwriters_stress;
101 int nrealreaders_stress;
104 atomic_t n_lock_torture_errors;
105 struct lock_torture_ops *cur_ops;
106 struct lock_stress_stats *lwsa; /* writer statistics */
107 struct lock_stress_stats *lrsa; /* reader statistics */
109 static struct lock_torture_cxt cxt = { 0, 0, false, false,
113 * Definitions for lock torture testing.
116 static int torture_lock_busted_write_lock(int tid __maybe_unused)
118 return 0; /* BUGGY, do not use in real life!!! */
121 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
123 const unsigned long longdelay_ms = 100;
125 /* We want a long delay occasionally to force massive contention. */
126 if (!(torture_random(trsp) %
127 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
128 mdelay(longdelay_ms);
129 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
130 torture_preempt_schedule(); /* Allow test to be preempted. */
133 static void torture_lock_busted_write_unlock(int tid __maybe_unused)
135 /* BUGGY, do not use in real life!!! */
138 static void __torture_rt_boost(struct torture_random_state *trsp)
140 const unsigned int factor = rt_boost_factor;
142 if (!rt_task(current)) {
144 * Boost priority once every rt_boost_factor operations. When
145 * the task tries to take the lock, the rtmutex it will account
146 * for the new priority, and do any corresponding pi-dance.
148 if (trsp && !(torture_random(trsp) %
149 (cxt.nrealwriters_stress * factor))) {
150 sched_set_fifo(current);
151 } else /* common case, do nothing */
155 * The task will remain boosted for another 10 * rt_boost_factor
156 * operations, then restored back to its original prio, and so
159 * When @trsp is nil, we want to force-reset the task for
160 * stopping the kthread.
162 if (!trsp || !(torture_random(trsp) %
163 (cxt.nrealwriters_stress * factor * 2))) {
164 sched_set_normal(current, 0);
165 } else /* common case, do nothing */
170 static void torture_rt_boost(struct torture_random_state *trsp)
175 __torture_rt_boost(trsp);
178 static struct lock_torture_ops lock_busted_ops = {
179 .writelock = torture_lock_busted_write_lock,
180 .write_delay = torture_lock_busted_write_delay,
181 .task_boost = torture_rt_boost,
182 .writeunlock = torture_lock_busted_write_unlock,
186 .name = "lock_busted"
189 static DEFINE_SPINLOCK(torture_spinlock);
191 static int torture_spin_lock_write_lock(int tid __maybe_unused)
192 __acquires(torture_spinlock)
194 spin_lock(&torture_spinlock);
198 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
200 const unsigned long shortdelay_us = 2;
201 const unsigned long longdelay_ms = 100;
203 /* We want a short delay mostly to emulate likely code, and
204 * we want a long delay occasionally to force massive contention.
206 if (!(torture_random(trsp) %
207 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
208 mdelay(longdelay_ms);
209 if (!(torture_random(trsp) %
210 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
211 udelay(shortdelay_us);
212 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
213 torture_preempt_schedule(); /* Allow test to be preempted. */
216 static void torture_spin_lock_write_unlock(int tid __maybe_unused)
217 __releases(torture_spinlock)
219 spin_unlock(&torture_spinlock);
222 static struct lock_torture_ops spin_lock_ops = {
223 .writelock = torture_spin_lock_write_lock,
224 .write_delay = torture_spin_lock_write_delay,
225 .task_boost = torture_rt_boost,
226 .writeunlock = torture_spin_lock_write_unlock,
233 static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
234 __acquires(torture_spinlock)
238 spin_lock_irqsave(&torture_spinlock, flags);
239 cxt.cur_ops->flags = flags;
243 static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
244 __releases(torture_spinlock)
246 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
249 static struct lock_torture_ops spin_lock_irq_ops = {
250 .writelock = torture_spin_lock_write_lock_irq,
251 .write_delay = torture_spin_lock_write_delay,
252 .task_boost = torture_rt_boost,
253 .writeunlock = torture_lock_spin_write_unlock_irq,
257 .name = "spin_lock_irq"
260 static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);
262 static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
263 __acquires(torture_raw_spinlock)
265 raw_spin_lock(&torture_raw_spinlock);
269 static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
270 __releases(torture_raw_spinlock)
272 raw_spin_unlock(&torture_raw_spinlock);
275 static struct lock_torture_ops raw_spin_lock_ops = {
276 .writelock = torture_raw_spin_lock_write_lock,
277 .write_delay = torture_spin_lock_write_delay,
278 .task_boost = torture_rt_boost,
279 .writeunlock = torture_raw_spin_lock_write_unlock,
283 .name = "raw_spin_lock"
286 static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
287 __acquires(torture_raw_spinlock)
291 raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
292 cxt.cur_ops->flags = flags;
296 static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
297 __releases(torture_raw_spinlock)
299 raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
302 static struct lock_torture_ops raw_spin_lock_irq_ops = {
303 .writelock = torture_raw_spin_lock_write_lock_irq,
304 .write_delay = torture_spin_lock_write_delay,
305 .task_boost = torture_rt_boost,
306 .writeunlock = torture_raw_spin_lock_write_unlock_irq,
310 .name = "raw_spin_lock_irq"
313 static DEFINE_RWLOCK(torture_rwlock);
315 static int torture_rwlock_write_lock(int tid __maybe_unused)
316 __acquires(torture_rwlock)
318 write_lock(&torture_rwlock);
322 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
324 const unsigned long shortdelay_us = 2;
325 const unsigned long longdelay_ms = 100;
327 /* We want a short delay mostly to emulate likely code, and
328 * we want a long delay occasionally to force massive contention.
330 if (!(torture_random(trsp) %
331 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
332 mdelay(longdelay_ms);
334 udelay(shortdelay_us);
337 static void torture_rwlock_write_unlock(int tid __maybe_unused)
338 __releases(torture_rwlock)
340 write_unlock(&torture_rwlock);
343 static int torture_rwlock_read_lock(int tid __maybe_unused)
344 __acquires(torture_rwlock)
346 read_lock(&torture_rwlock);
350 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
352 const unsigned long shortdelay_us = 10;
353 const unsigned long longdelay_ms = 100;
355 /* We want a short delay mostly to emulate likely code, and
356 * we want a long delay occasionally to force massive contention.
358 if (!(torture_random(trsp) %
359 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
360 mdelay(longdelay_ms);
362 udelay(shortdelay_us);
365 static void torture_rwlock_read_unlock(int tid __maybe_unused)
366 __releases(torture_rwlock)
368 read_unlock(&torture_rwlock);
371 static struct lock_torture_ops rw_lock_ops = {
372 .writelock = torture_rwlock_write_lock,
373 .write_delay = torture_rwlock_write_delay,
374 .task_boost = torture_rt_boost,
375 .writeunlock = torture_rwlock_write_unlock,
376 .readlock = torture_rwlock_read_lock,
377 .read_delay = torture_rwlock_read_delay,
378 .readunlock = torture_rwlock_read_unlock,
382 static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
383 __acquires(torture_rwlock)
387 write_lock_irqsave(&torture_rwlock, flags);
388 cxt.cur_ops->flags = flags;
392 static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
393 __releases(torture_rwlock)
395 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
398 static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
399 __acquires(torture_rwlock)
403 read_lock_irqsave(&torture_rwlock, flags);
404 cxt.cur_ops->flags = flags;
408 static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
409 __releases(torture_rwlock)
411 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
414 static struct lock_torture_ops rw_lock_irq_ops = {
415 .writelock = torture_rwlock_write_lock_irq,
416 .write_delay = torture_rwlock_write_delay,
417 .task_boost = torture_rt_boost,
418 .writeunlock = torture_rwlock_write_unlock_irq,
419 .readlock = torture_rwlock_read_lock_irq,
420 .read_delay = torture_rwlock_read_delay,
421 .readunlock = torture_rwlock_read_unlock_irq,
422 .name = "rw_lock_irq"
425 static DEFINE_MUTEX(torture_mutex);
426 static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
427 static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];
429 static void torture_mutex_init(void)
433 for (i = 0; i < MAX_NESTED_LOCKS; i++)
434 __mutex_init(&torture_nested_mutexes[i], __func__,
435 &nested_mutex_keys[i]);
438 static int torture_mutex_nested_lock(int tid __maybe_unused,
443 for (i = 0; i < nested_locks; i++)
444 if (lockset & (1 << i))
445 mutex_lock(&torture_nested_mutexes[i]);
449 static int torture_mutex_lock(int tid __maybe_unused)
450 __acquires(torture_mutex)
452 mutex_lock(&torture_mutex);
456 static void torture_mutex_delay(struct torture_random_state *trsp)
458 const unsigned long longdelay_ms = 100;
460 /* We want a long delay occasionally to force massive contention. */
461 if (!(torture_random(trsp) %
462 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
463 mdelay(longdelay_ms * 5);
465 mdelay(longdelay_ms / 5);
466 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
467 torture_preempt_schedule(); /* Allow test to be preempted. */
470 static void torture_mutex_unlock(int tid __maybe_unused)
471 __releases(torture_mutex)
473 mutex_unlock(&torture_mutex);
476 static void torture_mutex_nested_unlock(int tid __maybe_unused,
481 for (i = nested_locks - 1; i >= 0; i--)
482 if (lockset & (1 << i))
483 mutex_unlock(&torture_nested_mutexes[i]);
486 static struct lock_torture_ops mutex_lock_ops = {
487 .init = torture_mutex_init,
488 .nested_lock = torture_mutex_nested_lock,
489 .writelock = torture_mutex_lock,
490 .write_delay = torture_mutex_delay,
491 .task_boost = torture_rt_boost,
492 .writeunlock = torture_mutex_unlock,
493 .nested_unlock = torture_mutex_nested_unlock,
500 #include <linux/ww_mutex.h>
502 * The torture ww_mutexes should belong to the same lock class as
503 * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
504 * function is called for initialization to ensure that.
506 static DEFINE_WD_CLASS(torture_ww_class);
507 static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
508 static struct ww_acquire_ctx *ww_acquire_ctxs;
510 static void torture_ww_mutex_init(void)
512 ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
513 ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
514 ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
516 ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
517 sizeof(*ww_acquire_ctxs),
519 if (!ww_acquire_ctxs)
520 VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
523 static void torture_ww_mutex_exit(void)
525 kfree(ww_acquire_ctxs);
528 static int torture_ww_mutex_lock(int tid)
529 __acquires(torture_ww_mutex_0)
530 __acquires(torture_ww_mutex_1)
531 __acquires(torture_ww_mutex_2)
534 struct reorder_lock {
535 struct list_head link;
536 struct ww_mutex *lock;
537 } locks[3], *ll, *ln;
538 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
540 locks[0].lock = &torture_ww_mutex_0;
541 list_add(&locks[0].link, &list);
543 locks[1].lock = &torture_ww_mutex_1;
544 list_add(&locks[1].link, &list);
546 locks[2].lock = &torture_ww_mutex_2;
547 list_add(&locks[2].link, &list);
549 ww_acquire_init(ctx, &torture_ww_class);
551 list_for_each_entry(ll, &list, link) {
554 err = ww_mutex_lock(ll->lock, ctx);
559 list_for_each_entry_continue_reverse(ln, &list, link)
560 ww_mutex_unlock(ln->lock);
565 ww_mutex_lock_slow(ll->lock, ctx);
566 list_move(&ll->link, &list);
572 static void torture_ww_mutex_unlock(int tid)
573 __releases(torture_ww_mutex_0)
574 __releases(torture_ww_mutex_1)
575 __releases(torture_ww_mutex_2)
577 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
579 ww_mutex_unlock(&torture_ww_mutex_0);
580 ww_mutex_unlock(&torture_ww_mutex_1);
581 ww_mutex_unlock(&torture_ww_mutex_2);
582 ww_acquire_fini(ctx);
585 static struct lock_torture_ops ww_mutex_lock_ops = {
586 .init = torture_ww_mutex_init,
587 .exit = torture_ww_mutex_exit,
588 .writelock = torture_ww_mutex_lock,
589 .write_delay = torture_mutex_delay,
590 .task_boost = torture_rt_boost,
591 .writeunlock = torture_ww_mutex_unlock,
595 .name = "ww_mutex_lock"
598 #ifdef CONFIG_RT_MUTEXES
599 static DEFINE_RT_MUTEX(torture_rtmutex);
600 static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
601 static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];
603 static void torture_rtmutex_init(void)
607 for (i = 0; i < MAX_NESTED_LOCKS; i++)
608 __rt_mutex_init(&torture_nested_rtmutexes[i], __func__,
609 &nested_rtmutex_keys[i]);
612 static int torture_rtmutex_nested_lock(int tid __maybe_unused,
617 for (i = 0; i < nested_locks; i++)
618 if (lockset & (1 << i))
619 rt_mutex_lock(&torture_nested_rtmutexes[i]);
623 static int torture_rtmutex_lock(int tid __maybe_unused)
624 __acquires(torture_rtmutex)
626 rt_mutex_lock(&torture_rtmutex);
630 static void torture_rtmutex_delay(struct torture_random_state *trsp)
632 const unsigned long shortdelay_us = 2;
633 const unsigned long longdelay_ms = 100;
636 * We want a short delay mostly to emulate likely code, and
637 * we want a long delay occasionally to force massive contention.
639 if (!(torture_random(trsp) %
640 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
641 mdelay(longdelay_ms);
642 if (!(torture_random(trsp) %
643 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
644 udelay(shortdelay_us);
645 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
646 torture_preempt_schedule(); /* Allow test to be preempted. */
649 static void torture_rtmutex_unlock(int tid __maybe_unused)
650 __releases(torture_rtmutex)
652 rt_mutex_unlock(&torture_rtmutex);
655 static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
660 __torture_rt_boost(trsp);
663 static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
668 for (i = nested_locks - 1; i >= 0; i--)
669 if (lockset & (1 << i))
670 rt_mutex_unlock(&torture_nested_rtmutexes[i]);
673 static struct lock_torture_ops rtmutex_lock_ops = {
674 .init = torture_rtmutex_init,
675 .nested_lock = torture_rtmutex_nested_lock,
676 .writelock = torture_rtmutex_lock,
677 .write_delay = torture_rtmutex_delay,
678 .task_boost = torture_rt_boost_rtmutex,
679 .writeunlock = torture_rtmutex_unlock,
680 .nested_unlock = torture_rtmutex_nested_unlock,
684 .name = "rtmutex_lock"
688 static DECLARE_RWSEM(torture_rwsem);
689 static int torture_rwsem_down_write(int tid __maybe_unused)
690 __acquires(torture_rwsem)
692 down_write(&torture_rwsem);
696 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
698 const unsigned long longdelay_ms = 100;
700 /* We want a long delay occasionally to force massive contention. */
701 if (!(torture_random(trsp) %
702 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
703 mdelay(longdelay_ms * 10);
705 mdelay(longdelay_ms / 10);
706 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
707 torture_preempt_schedule(); /* Allow test to be preempted. */
710 static void torture_rwsem_up_write(int tid __maybe_unused)
711 __releases(torture_rwsem)
713 up_write(&torture_rwsem);
716 static int torture_rwsem_down_read(int tid __maybe_unused)
717 __acquires(torture_rwsem)
719 down_read(&torture_rwsem);
723 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
725 const unsigned long longdelay_ms = 100;
727 /* We want a long delay occasionally to force massive contention. */
728 if (!(torture_random(trsp) %
729 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
730 mdelay(longdelay_ms * 2);
732 mdelay(longdelay_ms / 2);
733 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
734 torture_preempt_schedule(); /* Allow test to be preempted. */
737 static void torture_rwsem_up_read(int tid __maybe_unused)
738 __releases(torture_rwsem)
740 up_read(&torture_rwsem);
743 static struct lock_torture_ops rwsem_lock_ops = {
744 .writelock = torture_rwsem_down_write,
745 .write_delay = torture_rwsem_write_delay,
746 .task_boost = torture_rt_boost,
747 .writeunlock = torture_rwsem_up_write,
748 .readlock = torture_rwsem_down_read,
749 .read_delay = torture_rwsem_read_delay,
750 .readunlock = torture_rwsem_up_read,
754 #include <linux/percpu-rwsem.h>
755 static struct percpu_rw_semaphore pcpu_rwsem;
757 static void torture_percpu_rwsem_init(void)
759 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
762 static void torture_percpu_rwsem_exit(void)
764 percpu_free_rwsem(&pcpu_rwsem);
767 static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
768 __acquires(pcpu_rwsem)
770 percpu_down_write(&pcpu_rwsem);
774 static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
775 __releases(pcpu_rwsem)
777 percpu_up_write(&pcpu_rwsem);
780 static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
781 __acquires(pcpu_rwsem)
783 percpu_down_read(&pcpu_rwsem);
787 static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
788 __releases(pcpu_rwsem)
790 percpu_up_read(&pcpu_rwsem);
793 static struct lock_torture_ops percpu_rwsem_lock_ops = {
794 .init = torture_percpu_rwsem_init,
795 .exit = torture_percpu_rwsem_exit,
796 .writelock = torture_percpu_rwsem_down_write,
797 .write_delay = torture_rwsem_write_delay,
798 .task_boost = torture_rt_boost,
799 .writeunlock = torture_percpu_rwsem_up_write,
800 .readlock = torture_percpu_rwsem_down_read,
801 .read_delay = torture_rwsem_read_delay,
802 .readunlock = torture_percpu_rwsem_up_read,
803 .name = "percpu_rwsem_lock"
807 * Lock torture writer kthread. Repeatedly acquires and releases
808 * the lock, checking for duplicate acquisitions.
810 static int lock_torture_writer(void *arg)
812 struct lock_stress_stats *lwsp = arg;
813 int tid = lwsp - cxt.lwsa;
814 DEFINE_TORTURE_RANDOM(rand);
818 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
819 set_user_nice(current, MAX_NICE);
822 if ((torture_random(&rand) & 0xfffff) == 0)
823 schedule_timeout_uninterruptible(1);
825 lockset_mask = torture_random(&rand);
827 * When using nested_locks, we want to occasionally
828 * skip the main lock so we can avoid always serializing
829 * the lock chains on that central lock. By skipping the
830 * main lock occasionally, we can create different
831 * contention patterns (allowing for multiple disjoint
834 skip_main_lock = (nested_locks &&
835 !(torture_random(&rand) % 100));
837 cxt.cur_ops->task_boost(&rand);
838 if (cxt.cur_ops->nested_lock)
839 cxt.cur_ops->nested_lock(tid, lockset_mask);
841 if (!skip_main_lock) {
842 cxt.cur_ops->writelock(tid);
843 if (WARN_ON_ONCE(lock_is_write_held))
845 lock_is_write_held = true;
846 if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
847 lwsp->n_lock_fail++; /* rare, but... */
849 lwsp->n_lock_acquired++;
851 cxt.cur_ops->write_delay(&rand);
852 if (!skip_main_lock) {
853 lock_is_write_held = false;
854 WRITE_ONCE(last_lock_release, jiffies);
855 cxt.cur_ops->writeunlock(tid);
857 if (cxt.cur_ops->nested_unlock)
858 cxt.cur_ops->nested_unlock(tid, lockset_mask);
860 stutter_wait("lock_torture_writer");
861 } while (!torture_must_stop());
863 cxt.cur_ops->task_boost(NULL); /* reset prio */
864 torture_kthread_stopping("lock_torture_writer");
869 * Lock torture reader kthread. Repeatedly acquires and releases
872 static int lock_torture_reader(void *arg)
874 struct lock_stress_stats *lrsp = arg;
875 int tid = lrsp - cxt.lrsa;
876 DEFINE_TORTURE_RANDOM(rand);
878 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
879 set_user_nice(current, MAX_NICE);
882 if ((torture_random(&rand) & 0xfffff) == 0)
883 schedule_timeout_uninterruptible(1);
885 cxt.cur_ops->readlock(tid);
886 atomic_inc(&lock_is_read_held);
887 if (WARN_ON_ONCE(lock_is_write_held))
888 lrsp->n_lock_fail++; /* rare, but... */
890 lrsp->n_lock_acquired++;
891 cxt.cur_ops->read_delay(&rand);
892 atomic_dec(&lock_is_read_held);
893 cxt.cur_ops->readunlock(tid);
895 stutter_wait("lock_torture_reader");
896 } while (!torture_must_stop());
897 torture_kthread_stopping("lock_torture_reader");
902 * Create an lock-torture-statistics message in the specified buffer.
904 static void __torture_print_stats(char *page,
905 struct lock_stress_stats *statp, bool write)
910 long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
913 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
914 for (i = 0; i < n_stress; i++) {
915 if (data_race(statp[i].n_lock_fail))
917 cur = data_race(statp[i].n_lock_acquired);
924 page += sprintf(page,
925 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
926 write ? "Writes" : "Reads ",
928 !onoff_interval && max / 2 > min ? "???" : "",
929 fail, fail ? "!!!" : "");
931 atomic_inc(&cxt.n_lock_torture_errors);
935 * Print torture statistics. Caller must ensure that there is only one
936 * call to this function at a given time!!! This is normally accomplished
937 * by relying on the module system to only have one copy of the module
938 * loaded, and then by giving the lock_torture_stats kthread full control
939 * (or the init/cleanup functions when lock_torture_stats thread is not
942 static void lock_torture_stats_print(void)
944 int size = cxt.nrealwriters_stress * 200 + 8192;
947 if (cxt.cur_ops->readlock)
948 size += cxt.nrealreaders_stress * 200 + 8192;
950 buf = kmalloc(size, GFP_KERNEL);
952 pr_err("lock_torture_stats_print: Out of memory, need: %d",
957 __torture_print_stats(buf, cxt.lwsa, true);
961 if (cxt.cur_ops->readlock) {
962 buf = kmalloc(size, GFP_KERNEL);
964 pr_err("lock_torture_stats_print: Out of memory, need: %d",
969 __torture_print_stats(buf, cxt.lrsa, false);
976 * Periodically prints torture statistics, if periodic statistics printing
977 * was specified via the stat_interval module parameter.
979 * No need to worry about fullstop here, since this one doesn't reference
980 * volatile state or register callbacks.
982 static int lock_torture_stats(void *arg)
984 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
986 schedule_timeout_interruptible(stat_interval * HZ);
987 lock_torture_stats_print();
988 torture_shutdown_absorb("lock_torture_stats");
989 } while (!torture_must_stop());
990 torture_kthread_stopping("lock_torture_stats");
995 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
998 pr_alert("%s" TORTURE_FLAG
999 "--- %s%s: nwriters_stress=%d nreaders_stress=%d nested_locks=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
1000 torture_type, tag, cxt.debug_lock ? " [debug]": "",
1001 cxt.nrealwriters_stress, cxt.nrealreaders_stress,
1002 nested_locks, stat_interval, verbose, shuffle_interval,
1003 stutter, shutdown_secs, onoff_interval, onoff_holdoff);
1006 static void lock_torture_cleanup(void)
1010 if (torture_cleanup_begin())
1014 * Indicates early cleanup, meaning that the test has not run,
1015 * such as when passing bogus args when loading the module.
1016 * However cxt->cur_ops.init() may have been invoked, so beside
1017 * perform the underlying torture-specific cleanups, cur_ops.exit()
1018 * will be invoked if needed.
1020 if (!cxt.lwsa && !cxt.lrsa)
1024 for (i = 0; i < cxt.nrealwriters_stress; i++)
1025 torture_stop_kthread(lock_torture_writer,
1027 kfree(writer_tasks);
1028 writer_tasks = NULL;
1032 for (i = 0; i < cxt.nrealreaders_stress; i++)
1033 torture_stop_kthread(lock_torture_reader,
1035 kfree(reader_tasks);
1036 reader_tasks = NULL;
1039 torture_stop_kthread(lock_torture_stats, stats_task);
1040 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
1042 if (atomic_read(&cxt.n_lock_torture_errors))
1043 lock_torture_print_module_parms(cxt.cur_ops,
1044 "End of test: FAILURE");
1045 else if (torture_onoff_failures())
1046 lock_torture_print_module_parms(cxt.cur_ops,
1047 "End of test: LOCK_HOTPLUG");
1049 lock_torture_print_module_parms(cxt.cur_ops,
1050 "End of test: SUCCESS");
1058 if (cxt.init_called) {
1059 if (cxt.cur_ops->exit)
1060 cxt.cur_ops->exit();
1061 cxt.init_called = false;
1063 torture_cleanup_end();
1066 static int __init lock_torture_init(void)
1070 static struct lock_torture_ops *torture_ops[] = {
1072 &spin_lock_ops, &spin_lock_irq_ops,
1073 &raw_spin_lock_ops, &raw_spin_lock_irq_ops,
1074 &rw_lock_ops, &rw_lock_irq_ops,
1077 #ifdef CONFIG_RT_MUTEXES
1081 &percpu_rwsem_lock_ops,
1084 if (!torture_init_begin(torture_type, verbose))
1087 /* Process args and tell the world that the torturer is on the job. */
1088 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1089 cxt.cur_ops = torture_ops[i];
1090 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
1093 if (i == ARRAY_SIZE(torture_ops)) {
1094 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
1096 pr_alert("lock-torture types:");
1097 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1098 pr_alert(" %s", torture_ops[i]->name);
1104 if (nwriters_stress == 0 &&
1105 (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
1106 pr_alert("lock-torture: must run at least one locking thread\n");
1111 if (nwriters_stress >= 0)
1112 cxt.nrealwriters_stress = nwriters_stress;
1114 cxt.nrealwriters_stress = 2 * num_online_cpus();
1116 if (cxt.cur_ops->init) {
1117 cxt.cur_ops->init();
1118 cxt.init_called = true;
1121 #ifdef CONFIG_DEBUG_MUTEXES
1122 if (str_has_prefix(torture_type, "mutex"))
1123 cxt.debug_lock = true;
1125 #ifdef CONFIG_DEBUG_RT_MUTEXES
1126 if (str_has_prefix(torture_type, "rtmutex"))
1127 cxt.debug_lock = true;
1129 #ifdef CONFIG_DEBUG_SPINLOCK
1130 if ((str_has_prefix(torture_type, "spin")) ||
1131 (str_has_prefix(torture_type, "rw_lock")))
1132 cxt.debug_lock = true;
1135 /* Initialize the statistics so that each run gets its own numbers. */
1136 if (nwriters_stress) {
1137 lock_is_write_held = false;
1138 cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
1141 if (cxt.lwsa == NULL) {
1142 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
1147 for (i = 0; i < cxt.nrealwriters_stress; i++) {
1148 cxt.lwsa[i].n_lock_fail = 0;
1149 cxt.lwsa[i].n_lock_acquired = 0;
1153 if (cxt.cur_ops->readlock) {
1154 if (nreaders_stress >= 0)
1155 cxt.nrealreaders_stress = nreaders_stress;
1158 * By default distribute evenly the number of
1159 * readers and writers. We still run the same number
1160 * of threads as the writer-only locks default.
1162 if (nwriters_stress < 0) /* user doesn't care */
1163 cxt.nrealwriters_stress = num_online_cpus();
1164 cxt.nrealreaders_stress = cxt.nrealwriters_stress;
1167 if (nreaders_stress) {
1168 cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
1171 if (cxt.lrsa == NULL) {
1172 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
1179 for (i = 0; i < cxt.nrealreaders_stress; i++) {
1180 cxt.lrsa[i].n_lock_fail = 0;
1181 cxt.lrsa[i].n_lock_acquired = 0;
1186 lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
1188 /* Prepare torture context. */
1189 if (onoff_interval > 0) {
1190 firsterr = torture_onoff_init(onoff_holdoff * HZ,
1191 onoff_interval * HZ, NULL);
1192 if (torture_init_error(firsterr))
1195 if (shuffle_interval > 0) {
1196 firsterr = torture_shuffle_init(shuffle_interval);
1197 if (torture_init_error(firsterr))
1200 if (shutdown_secs > 0) {
1201 firsterr = torture_shutdown_init(shutdown_secs,
1202 lock_torture_cleanup);
1203 if (torture_init_error(firsterr))
1207 firsterr = torture_stutter_init(stutter, stutter);
1208 if (torture_init_error(firsterr))
1212 if (nwriters_stress) {
1213 writer_tasks = kcalloc(cxt.nrealwriters_stress,
1214 sizeof(writer_tasks[0]),
1216 if (writer_tasks == NULL) {
1217 TOROUT_ERRSTRING("writer_tasks: Out of memory");
1223 /* cap nested_locks to MAX_NESTED_LOCKS */
1224 if (nested_locks > MAX_NESTED_LOCKS)
1225 nested_locks = MAX_NESTED_LOCKS;
1227 if (cxt.cur_ops->readlock) {
1228 reader_tasks = kcalloc(cxt.nrealreaders_stress,
1229 sizeof(reader_tasks[0]),
1231 if (reader_tasks == NULL) {
1232 TOROUT_ERRSTRING("reader_tasks: Out of memory");
1233 kfree(writer_tasks);
1234 writer_tasks = NULL;
1241 * Create the kthreads and start torturing (oh, those poor little locks).
1243 * TODO: Note that we interleave writers with readers, giving writers a
1244 * slight advantage, by creating its kthread first. This can be modified
1245 * for very specific needs, or even let the user choose the policy, if
1248 for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1249 j < cxt.nrealreaders_stress; i++, j++) {
1250 if (i >= cxt.nrealwriters_stress)
1253 /* Create writer. */
1254 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
1256 if (torture_init_error(firsterr))
1260 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1262 /* Create reader. */
1263 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1265 if (torture_init_error(firsterr))
1268 if (stat_interval > 0) {
1269 firsterr = torture_create_kthread(lock_torture_stats, NULL,
1271 if (torture_init_error(firsterr))
1279 lock_torture_cleanup();
1280 if (shutdown_secs) {
1281 WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
1287 module_init(lock_torture_init);
1288 module_exit(lock_torture_cleanup);