1 // SPDX-License-Identifier: GPL-2.0+
3 * Module-based torture test facility for locking
5 * Copyright (C) IBM Corporation, 2014
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Davidlohr Bueso <dave@stgolabs.net>
9 * Based on kernel/rcu/torture.c.
12 #define pr_fmt(fmt) fmt
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/rt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/rwsem.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/rtmutex.h>
26 #include <linux/atomic.h>
27 #include <linux/moduleparam.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/torture.h>
31 #include <linux/reboot.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
36 torture_param(int, acq_writer_lim, 0, "Write_acquisition time limit (jiffies).");
37 torture_param(int, call_rcu_chains, 0, "Self-propagate call_rcu() chains during test (0=disable).");
38 torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable");
39 torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
40 torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
41 torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
42 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
43 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
44 torture_param(int, rt_boost, 2,
45 "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
46 torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
47 torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable");
48 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
49 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
50 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
51 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
52 torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
53 /* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
54 #define MAX_NESTED_LOCKS 8
56 static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock";
57 module_param(torture_type, charp, 0444);
58 MODULE_PARM_DESC(torture_type,
59 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
61 static cpumask_var_t bind_readers; // Bind the readers to the specified set of CPUs.
62 static cpumask_var_t bind_writers; // Bind the writers to the specified set of CPUs.
64 // Parse a cpumask kernel parameter. If there are more users later on,
65 // this might need to got to a more central location.
66 static int param_set_cpumask(const char *val, const struct kernel_param *kp)
68 cpumask_var_t *cm_bind = kp->arg;
72 if (!alloc_cpumask_var(cm_bind, GFP_KERNEL)) {
77 ret = cpulist_parse(val, *cm_bind);
82 pr_warn("%s: %s, all CPUs set\n", kp->name, s);
83 cpumask_setall(*cm_bind);
87 // Output a cpumask kernel parameter.
88 static int param_get_cpumask(char *buffer, const struct kernel_param *kp)
90 cpumask_var_t *cm_bind = kp->arg;
92 return sprintf(buffer, "%*pbl", cpumask_pr_args(*cm_bind));
95 static bool cpumask_nonempty(cpumask_var_t mask)
97 return cpumask_available(mask) && !cpumask_empty(mask);
100 static const struct kernel_param_ops lt_bind_ops = {
101 .set = param_set_cpumask,
102 .get = param_get_cpumask,
105 module_param_cb(bind_readers, <_bind_ops, &bind_readers, 0644);
106 module_param_cb(bind_writers, <_bind_ops, &bind_writers, 0644);
108 long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
110 static struct task_struct *stats_task;
111 static struct task_struct **writer_tasks;
112 static struct task_struct **reader_tasks;
114 static bool lock_is_write_held;
115 static atomic_t lock_is_read_held;
116 static unsigned long last_lock_release;
118 struct lock_stress_stats {
120 long n_lock_acquired;
123 struct call_rcu_chain {
124 struct rcu_head crc_rh;
127 struct call_rcu_chain *call_rcu_chain_list;
129 /* Forward reference. */
130 static void lock_torture_cleanup(void);
133 * Operations vector for selecting different types of tests.
135 struct lock_torture_ops {
138 int (*nested_lock)(int tid, u32 lockset);
139 int (*writelock)(int tid);
140 void (*write_delay)(struct torture_random_state *trsp);
141 void (*task_boost)(struct torture_random_state *trsp);
142 void (*writeunlock)(int tid);
143 void (*nested_unlock)(int tid, u32 lockset);
144 int (*readlock)(int tid);
145 void (*read_delay)(struct torture_random_state *trsp);
146 void (*readunlock)(int tid);
148 unsigned long flags; /* for irq spinlocks */
152 struct lock_torture_cxt {
153 int nrealwriters_stress;
154 int nrealreaders_stress;
157 atomic_t n_lock_torture_errors;
158 struct lock_torture_ops *cur_ops;
159 struct lock_stress_stats *lwsa; /* writer statistics */
160 struct lock_stress_stats *lrsa; /* reader statistics */
162 static struct lock_torture_cxt cxt = { 0, 0, false, false,
166 * Definitions for lock torture testing.
169 static int torture_lock_busted_write_lock(int tid __maybe_unused)
171 return 0; /* BUGGY, do not use in real life!!! */
174 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
176 /* We want a long delay occasionally to force massive contention. */
177 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
179 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
180 torture_preempt_schedule(); /* Allow test to be preempted. */
183 static void torture_lock_busted_write_unlock(int tid __maybe_unused)
185 /* BUGGY, do not use in real life!!! */
188 static void __torture_rt_boost(struct torture_random_state *trsp)
190 const unsigned int factor = rt_boost_factor;
192 if (!rt_task(current)) {
194 * Boost priority once every rt_boost_factor operations. When
195 * the task tries to take the lock, the rtmutex it will account
196 * for the new priority, and do any corresponding pi-dance.
198 if (trsp && !(torture_random(trsp) %
199 (cxt.nrealwriters_stress * factor))) {
200 sched_set_fifo(current);
201 } else /* common case, do nothing */
205 * The task will remain boosted for another 10 * rt_boost_factor
206 * operations, then restored back to its original prio, and so
209 * When @trsp is nil, we want to force-reset the task for
210 * stopping the kthread.
212 if (!trsp || !(torture_random(trsp) %
213 (cxt.nrealwriters_stress * factor * 2))) {
214 sched_set_normal(current, 0);
215 } else /* common case, do nothing */
220 static void torture_rt_boost(struct torture_random_state *trsp)
225 __torture_rt_boost(trsp);
228 static struct lock_torture_ops lock_busted_ops = {
229 .writelock = torture_lock_busted_write_lock,
230 .write_delay = torture_lock_busted_write_delay,
231 .task_boost = torture_rt_boost,
232 .writeunlock = torture_lock_busted_write_unlock,
236 .name = "lock_busted"
239 static DEFINE_SPINLOCK(torture_spinlock);
241 static int torture_spin_lock_write_lock(int tid __maybe_unused)
242 __acquires(torture_spinlock)
244 spin_lock(&torture_spinlock);
248 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
250 const unsigned long shortdelay_us = 2;
253 /* We want a short delay mostly to emulate likely code, and
254 * we want a long delay occasionally to force massive contention.
256 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold))) {
259 pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j);
261 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 200 * shortdelay_us)))
262 udelay(shortdelay_us);
263 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
264 torture_preempt_schedule(); /* Allow test to be preempted. */
267 static void torture_spin_lock_write_unlock(int tid __maybe_unused)
268 __releases(torture_spinlock)
270 spin_unlock(&torture_spinlock);
273 static struct lock_torture_ops spin_lock_ops = {
274 .writelock = torture_spin_lock_write_lock,
275 .write_delay = torture_spin_lock_write_delay,
276 .task_boost = torture_rt_boost,
277 .writeunlock = torture_spin_lock_write_unlock,
284 static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
285 __acquires(torture_spinlock)
289 spin_lock_irqsave(&torture_spinlock, flags);
290 cxt.cur_ops->flags = flags;
294 static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
295 __releases(torture_spinlock)
297 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
300 static struct lock_torture_ops spin_lock_irq_ops = {
301 .writelock = torture_spin_lock_write_lock_irq,
302 .write_delay = torture_spin_lock_write_delay,
303 .task_boost = torture_rt_boost,
304 .writeunlock = torture_lock_spin_write_unlock_irq,
308 .name = "spin_lock_irq"
311 static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);
313 static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
314 __acquires(torture_raw_spinlock)
316 raw_spin_lock(&torture_raw_spinlock);
320 static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
321 __releases(torture_raw_spinlock)
323 raw_spin_unlock(&torture_raw_spinlock);
326 static struct lock_torture_ops raw_spin_lock_ops = {
327 .writelock = torture_raw_spin_lock_write_lock,
328 .write_delay = torture_spin_lock_write_delay,
329 .task_boost = torture_rt_boost,
330 .writeunlock = torture_raw_spin_lock_write_unlock,
334 .name = "raw_spin_lock"
337 static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
338 __acquires(torture_raw_spinlock)
342 raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
343 cxt.cur_ops->flags = flags;
347 static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
348 __releases(torture_raw_spinlock)
350 raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
353 static struct lock_torture_ops raw_spin_lock_irq_ops = {
354 .writelock = torture_raw_spin_lock_write_lock_irq,
355 .write_delay = torture_spin_lock_write_delay,
356 .task_boost = torture_rt_boost,
357 .writeunlock = torture_raw_spin_lock_write_unlock_irq,
361 .name = "raw_spin_lock_irq"
364 static DEFINE_RWLOCK(torture_rwlock);
366 static int torture_rwlock_write_lock(int tid __maybe_unused)
367 __acquires(torture_rwlock)
369 write_lock(&torture_rwlock);
373 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
375 const unsigned long shortdelay_us = 2;
377 /* We want a short delay mostly to emulate likely code, and
378 * we want a long delay occasionally to force massive contention.
380 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
383 udelay(shortdelay_us);
386 static void torture_rwlock_write_unlock(int tid __maybe_unused)
387 __releases(torture_rwlock)
389 write_unlock(&torture_rwlock);
392 static int torture_rwlock_read_lock(int tid __maybe_unused)
393 __acquires(torture_rwlock)
395 read_lock(&torture_rwlock);
399 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
401 const unsigned long shortdelay_us = 10;
403 /* We want a short delay mostly to emulate likely code, and
404 * we want a long delay occasionally to force massive contention.
406 if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
409 udelay(shortdelay_us);
412 static void torture_rwlock_read_unlock(int tid __maybe_unused)
413 __releases(torture_rwlock)
415 read_unlock(&torture_rwlock);
418 static struct lock_torture_ops rw_lock_ops = {
419 .writelock = torture_rwlock_write_lock,
420 .write_delay = torture_rwlock_write_delay,
421 .task_boost = torture_rt_boost,
422 .writeunlock = torture_rwlock_write_unlock,
423 .readlock = torture_rwlock_read_lock,
424 .read_delay = torture_rwlock_read_delay,
425 .readunlock = torture_rwlock_read_unlock,
429 static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
430 __acquires(torture_rwlock)
434 write_lock_irqsave(&torture_rwlock, flags);
435 cxt.cur_ops->flags = flags;
439 static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
440 __releases(torture_rwlock)
442 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
445 static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
446 __acquires(torture_rwlock)
450 read_lock_irqsave(&torture_rwlock, flags);
451 cxt.cur_ops->flags = flags;
455 static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
456 __releases(torture_rwlock)
458 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
461 static struct lock_torture_ops rw_lock_irq_ops = {
462 .writelock = torture_rwlock_write_lock_irq,
463 .write_delay = torture_rwlock_write_delay,
464 .task_boost = torture_rt_boost,
465 .writeunlock = torture_rwlock_write_unlock_irq,
466 .readlock = torture_rwlock_read_lock_irq,
467 .read_delay = torture_rwlock_read_delay,
468 .readunlock = torture_rwlock_read_unlock_irq,
469 .name = "rw_lock_irq"
472 static DEFINE_MUTEX(torture_mutex);
473 static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
474 static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];
476 static void torture_mutex_init(void)
480 for (i = 0; i < MAX_NESTED_LOCKS; i++)
481 __mutex_init(&torture_nested_mutexes[i], __func__,
482 &nested_mutex_keys[i]);
485 static int torture_mutex_nested_lock(int tid __maybe_unused,
490 for (i = 0; i < nested_locks; i++)
491 if (lockset & (1 << i))
492 mutex_lock(&torture_nested_mutexes[i]);
496 static int torture_mutex_lock(int tid __maybe_unused)
497 __acquires(torture_mutex)
499 mutex_lock(&torture_mutex);
503 static void torture_mutex_delay(struct torture_random_state *trsp)
505 /* We want a long delay occasionally to force massive contention. */
506 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
507 mdelay(long_hold * 5);
508 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
509 torture_preempt_schedule(); /* Allow test to be preempted. */
512 static void torture_mutex_unlock(int tid __maybe_unused)
513 __releases(torture_mutex)
515 mutex_unlock(&torture_mutex);
518 static void torture_mutex_nested_unlock(int tid __maybe_unused,
523 for (i = nested_locks - 1; i >= 0; i--)
524 if (lockset & (1 << i))
525 mutex_unlock(&torture_nested_mutexes[i]);
528 static struct lock_torture_ops mutex_lock_ops = {
529 .init = torture_mutex_init,
530 .nested_lock = torture_mutex_nested_lock,
531 .writelock = torture_mutex_lock,
532 .write_delay = torture_mutex_delay,
533 .task_boost = torture_rt_boost,
534 .writeunlock = torture_mutex_unlock,
535 .nested_unlock = torture_mutex_nested_unlock,
542 #include <linux/ww_mutex.h>
544 * The torture ww_mutexes should belong to the same lock class as
545 * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
546 * function is called for initialization to ensure that.
548 static DEFINE_WD_CLASS(torture_ww_class);
549 static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
550 static struct ww_acquire_ctx *ww_acquire_ctxs;
552 static void torture_ww_mutex_init(void)
554 ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
555 ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
556 ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
558 ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
559 sizeof(*ww_acquire_ctxs),
561 if (!ww_acquire_ctxs)
562 VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
565 static void torture_ww_mutex_exit(void)
567 kfree(ww_acquire_ctxs);
570 static int torture_ww_mutex_lock(int tid)
571 __acquires(torture_ww_mutex_0)
572 __acquires(torture_ww_mutex_1)
573 __acquires(torture_ww_mutex_2)
576 struct reorder_lock {
577 struct list_head link;
578 struct ww_mutex *lock;
579 } locks[3], *ll, *ln;
580 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
582 locks[0].lock = &torture_ww_mutex_0;
583 list_add(&locks[0].link, &list);
585 locks[1].lock = &torture_ww_mutex_1;
586 list_add(&locks[1].link, &list);
588 locks[2].lock = &torture_ww_mutex_2;
589 list_add(&locks[2].link, &list);
591 ww_acquire_init(ctx, &torture_ww_class);
593 list_for_each_entry(ll, &list, link) {
596 err = ww_mutex_lock(ll->lock, ctx);
601 list_for_each_entry_continue_reverse(ln, &list, link)
602 ww_mutex_unlock(ln->lock);
607 ww_mutex_lock_slow(ll->lock, ctx);
608 list_move(&ll->link, &list);
614 static void torture_ww_mutex_unlock(int tid)
615 __releases(torture_ww_mutex_0)
616 __releases(torture_ww_mutex_1)
617 __releases(torture_ww_mutex_2)
619 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
621 ww_mutex_unlock(&torture_ww_mutex_0);
622 ww_mutex_unlock(&torture_ww_mutex_1);
623 ww_mutex_unlock(&torture_ww_mutex_2);
624 ww_acquire_fini(ctx);
627 static struct lock_torture_ops ww_mutex_lock_ops = {
628 .init = torture_ww_mutex_init,
629 .exit = torture_ww_mutex_exit,
630 .writelock = torture_ww_mutex_lock,
631 .write_delay = torture_mutex_delay,
632 .task_boost = torture_rt_boost,
633 .writeunlock = torture_ww_mutex_unlock,
637 .name = "ww_mutex_lock"
640 #ifdef CONFIG_RT_MUTEXES
641 static DEFINE_RT_MUTEX(torture_rtmutex);
642 static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
643 static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];
645 static void torture_rtmutex_init(void)
649 for (i = 0; i < MAX_NESTED_LOCKS; i++)
650 __rt_mutex_init(&torture_nested_rtmutexes[i], __func__,
651 &nested_rtmutex_keys[i]);
654 static int torture_rtmutex_nested_lock(int tid __maybe_unused,
659 for (i = 0; i < nested_locks; i++)
660 if (lockset & (1 << i))
661 rt_mutex_lock(&torture_nested_rtmutexes[i]);
665 static int torture_rtmutex_lock(int tid __maybe_unused)
666 __acquires(torture_rtmutex)
668 rt_mutex_lock(&torture_rtmutex);
672 static void torture_rtmutex_delay(struct torture_random_state *trsp)
674 const unsigned long shortdelay_us = 2;
677 * We want a short delay mostly to emulate likely code, and
678 * we want a long delay occasionally to force massive contention.
680 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
682 if (!(torture_random(trsp) %
683 (cxt.nrealwriters_stress * 200 * shortdelay_us)))
684 udelay(shortdelay_us);
685 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
686 torture_preempt_schedule(); /* Allow test to be preempted. */
689 static void torture_rtmutex_unlock(int tid __maybe_unused)
690 __releases(torture_rtmutex)
692 rt_mutex_unlock(&torture_rtmutex);
695 static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
700 __torture_rt_boost(trsp);
703 static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
708 for (i = nested_locks - 1; i >= 0; i--)
709 if (lockset & (1 << i))
710 rt_mutex_unlock(&torture_nested_rtmutexes[i]);
713 static struct lock_torture_ops rtmutex_lock_ops = {
714 .init = torture_rtmutex_init,
715 .nested_lock = torture_rtmutex_nested_lock,
716 .writelock = torture_rtmutex_lock,
717 .write_delay = torture_rtmutex_delay,
718 .task_boost = torture_rt_boost_rtmutex,
719 .writeunlock = torture_rtmutex_unlock,
720 .nested_unlock = torture_rtmutex_nested_unlock,
724 .name = "rtmutex_lock"
728 static DECLARE_RWSEM(torture_rwsem);
729 static int torture_rwsem_down_write(int tid __maybe_unused)
730 __acquires(torture_rwsem)
732 down_write(&torture_rwsem);
736 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
738 /* We want a long delay occasionally to force massive contention. */
739 if (long_hold && !(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * long_hold)))
740 mdelay(long_hold * 10);
741 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
742 torture_preempt_schedule(); /* Allow test to be preempted. */
745 static void torture_rwsem_up_write(int tid __maybe_unused)
746 __releases(torture_rwsem)
748 up_write(&torture_rwsem);
751 static int torture_rwsem_down_read(int tid __maybe_unused)
752 __acquires(torture_rwsem)
754 down_read(&torture_rwsem);
758 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
760 /* We want a long delay occasionally to force massive contention. */
761 if (long_hold && !(torture_random(trsp) % (cxt.nrealreaders_stress * 2000 * long_hold)))
762 mdelay(long_hold * 2);
764 mdelay(long_hold / 2);
765 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
766 torture_preempt_schedule(); /* Allow test to be preempted. */
769 static void torture_rwsem_up_read(int tid __maybe_unused)
770 __releases(torture_rwsem)
772 up_read(&torture_rwsem);
775 static struct lock_torture_ops rwsem_lock_ops = {
776 .writelock = torture_rwsem_down_write,
777 .write_delay = torture_rwsem_write_delay,
778 .task_boost = torture_rt_boost,
779 .writeunlock = torture_rwsem_up_write,
780 .readlock = torture_rwsem_down_read,
781 .read_delay = torture_rwsem_read_delay,
782 .readunlock = torture_rwsem_up_read,
786 #include <linux/percpu-rwsem.h>
787 static struct percpu_rw_semaphore pcpu_rwsem;
789 static void torture_percpu_rwsem_init(void)
791 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
794 static void torture_percpu_rwsem_exit(void)
796 percpu_free_rwsem(&pcpu_rwsem);
799 static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
800 __acquires(pcpu_rwsem)
802 percpu_down_write(&pcpu_rwsem);
806 static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
807 __releases(pcpu_rwsem)
809 percpu_up_write(&pcpu_rwsem);
812 static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
813 __acquires(pcpu_rwsem)
815 percpu_down_read(&pcpu_rwsem);
819 static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
820 __releases(pcpu_rwsem)
822 percpu_up_read(&pcpu_rwsem);
825 static struct lock_torture_ops percpu_rwsem_lock_ops = {
826 .init = torture_percpu_rwsem_init,
827 .exit = torture_percpu_rwsem_exit,
828 .writelock = torture_percpu_rwsem_down_write,
829 .write_delay = torture_rwsem_write_delay,
830 .task_boost = torture_rt_boost,
831 .writeunlock = torture_percpu_rwsem_up_write,
832 .readlock = torture_percpu_rwsem_down_read,
833 .read_delay = torture_rwsem_read_delay,
834 .readunlock = torture_percpu_rwsem_up_read,
835 .name = "percpu_rwsem_lock"
839 * Lock torture writer kthread. Repeatedly acquires and releases
840 * the lock, checking for duplicate acquisitions.
842 static int lock_torture_writer(void *arg)
847 struct lock_stress_stats *lwsp = arg;
848 DEFINE_TORTURE_RANDOM(rand);
850 int tid = lwsp - cxt.lwsa;
852 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
853 if (!rt_task(current))
854 set_user_nice(current, MAX_NICE);
857 if ((torture_random(&rand) & 0xfffff) == 0)
858 schedule_timeout_uninterruptible(1);
860 lockset_mask = torture_random(&rand);
862 * When using nested_locks, we want to occasionally
863 * skip the main lock so we can avoid always serializing
864 * the lock chains on that central lock. By skipping the
865 * main lock occasionally, we can create different
866 * contention patterns (allowing for multiple disjoint
869 skip_main_lock = (nested_locks &&
870 !(torture_random(&rand) % 100));
872 cxt.cur_ops->task_boost(&rand);
873 if (cxt.cur_ops->nested_lock)
874 cxt.cur_ops->nested_lock(tid, lockset_mask);
876 if (!skip_main_lock) {
877 if (acq_writer_lim > 0)
879 cxt.cur_ops->writelock(tid);
880 if (WARN_ON_ONCE(lock_is_write_held))
882 lock_is_write_held = true;
883 if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
884 lwsp->n_lock_fail++; /* rare, but... */
885 if (acq_writer_lim > 0) {
887 WARN_ONCE(time_after(j1, j + acq_writer_lim),
888 "%s: Lock acquisition took %lu jiffies.\n",
891 lwsp->n_lock_acquired++;
893 cxt.cur_ops->write_delay(&rand);
895 lock_is_write_held = false;
896 WRITE_ONCE(last_lock_release, jiffies);
897 cxt.cur_ops->writeunlock(tid);
899 if (cxt.cur_ops->nested_unlock)
900 cxt.cur_ops->nested_unlock(tid, lockset_mask);
902 stutter_wait("lock_torture_writer");
903 } while (!torture_must_stop());
905 cxt.cur_ops->task_boost(NULL); /* reset prio */
906 torture_kthread_stopping("lock_torture_writer");
911 * Lock torture reader kthread. Repeatedly acquires and releases
914 static int lock_torture_reader(void *arg)
916 struct lock_stress_stats *lrsp = arg;
917 int tid = lrsp - cxt.lrsa;
918 DEFINE_TORTURE_RANDOM(rand);
920 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
921 set_user_nice(current, MAX_NICE);
924 if ((torture_random(&rand) & 0xfffff) == 0)
925 schedule_timeout_uninterruptible(1);
927 cxt.cur_ops->readlock(tid);
928 atomic_inc(&lock_is_read_held);
929 if (WARN_ON_ONCE(lock_is_write_held))
930 lrsp->n_lock_fail++; /* rare, but... */
932 lrsp->n_lock_acquired++;
933 cxt.cur_ops->read_delay(&rand);
934 atomic_dec(&lock_is_read_held);
935 cxt.cur_ops->readunlock(tid);
937 stutter_wait("lock_torture_reader");
938 } while (!torture_must_stop());
939 torture_kthread_stopping("lock_torture_reader");
944 * Create an lock-torture-statistics message in the specified buffer.
946 static void __torture_print_stats(char *page,
947 struct lock_stress_stats *statp, bool write)
952 long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
955 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
956 for (i = 0; i < n_stress; i++) {
957 if (data_race(statp[i].n_lock_fail))
959 cur = data_race(statp[i].n_lock_acquired);
966 page += sprintf(page,
967 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
968 write ? "Writes" : "Reads ",
970 !onoff_interval && max / 2 > min ? "???" : "",
971 fail, fail ? "!!!" : "");
973 atomic_inc(&cxt.n_lock_torture_errors);
977 * Print torture statistics. Caller must ensure that there is only one
978 * call to this function at a given time!!! This is normally accomplished
979 * by relying on the module system to only have one copy of the module
980 * loaded, and then by giving the lock_torture_stats kthread full control
981 * (or the init/cleanup functions when lock_torture_stats thread is not
984 static void lock_torture_stats_print(void)
986 int size = cxt.nrealwriters_stress * 200 + 8192;
989 if (cxt.cur_ops->readlock)
990 size += cxt.nrealreaders_stress * 200 + 8192;
992 buf = kmalloc(size, GFP_KERNEL);
994 pr_err("lock_torture_stats_print: Out of memory, need: %d",
999 __torture_print_stats(buf, cxt.lwsa, true);
1000 pr_alert("%s", buf);
1003 if (cxt.cur_ops->readlock) {
1004 buf = kmalloc(size, GFP_KERNEL);
1006 pr_err("lock_torture_stats_print: Out of memory, need: %d",
1011 __torture_print_stats(buf, cxt.lrsa, false);
1012 pr_alert("%s", buf);
1018 * Periodically prints torture statistics, if periodic statistics printing
1019 * was specified via the stat_interval module parameter.
1021 * No need to worry about fullstop here, since this one doesn't reference
1022 * volatile state or register callbacks.
1024 static int lock_torture_stats(void *arg)
1026 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
1028 schedule_timeout_interruptible(stat_interval * HZ);
1029 lock_torture_stats_print();
1030 torture_shutdown_absorb("lock_torture_stats");
1031 } while (!torture_must_stop());
1032 torture_kthread_stopping("lock_torture_stats");
1038 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
1041 static cpumask_t cpumask_all;
1042 cpumask_t *rcmp = cpumask_nonempty(bind_readers) ? bind_readers : &cpumask_all;
1043 cpumask_t *wcmp = cpumask_nonempty(bind_writers) ? bind_writers : &cpumask_all;
1045 cpumask_setall(&cpumask_all);
1046 pr_alert("%s" TORTURE_FLAG
1047 "--- %s%s: acq_writer_lim=%d bind_readers=%*pbl bind_writers=%*pbl call_rcu_chains=%d long_hold=%d nested_locks=%d nreaders_stress=%d nwriters_stress=%d onoff_holdoff=%d onoff_interval=%d rt_boost=%d rt_boost_factor=%d shuffle_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d verbose=%d writer_fifo=%d\n",
1048 torture_type, tag, cxt.debug_lock ? " [debug]": "",
1049 acq_writer_lim, cpumask_pr_args(rcmp), cpumask_pr_args(wcmp),
1050 call_rcu_chains, long_hold, nested_locks, cxt.nrealreaders_stress,
1051 cxt.nrealwriters_stress, onoff_holdoff, onoff_interval, rt_boost,
1052 rt_boost_factor, shuffle_interval, shutdown_secs, stat_interval, stutter,
1053 verbose, writer_fifo);
1056 // If requested, maintain call_rcu() chains to keep a grace period always
1057 // in flight. These increase the probability of getting an RCU CPU stall
1058 // warning and associated diagnostics when a locking primitive stalls.
1060 static void call_rcu_chain_cb(struct rcu_head *rhp)
1062 struct call_rcu_chain *crcp = container_of(rhp, struct call_rcu_chain, crc_rh);
1064 if (!smp_load_acquire(&crcp->crc_stop)) {
1065 (void)start_poll_synchronize_rcu(); // Start one grace period...
1066 call_rcu(&crcp->crc_rh, call_rcu_chain_cb); // ... and later start another.
1070 // Start the requested number of call_rcu() chains.
1071 static int call_rcu_chain_init(void)
1075 if (call_rcu_chains <= 0)
1077 call_rcu_chain_list = kcalloc(call_rcu_chains, sizeof(*call_rcu_chain_list), GFP_KERNEL);
1078 if (!call_rcu_chain_list)
1080 for (i = 0; i < call_rcu_chains; i++) {
1081 call_rcu_chain_list[i].crc_stop = false;
1082 call_rcu(&call_rcu_chain_list[i].crc_rh, call_rcu_chain_cb);
1087 // Stop all of the call_rcu() chains.
1088 static void call_rcu_chain_cleanup(void)
1092 if (!call_rcu_chain_list)
1094 for (i = 0; i < call_rcu_chains; i++)
1095 smp_store_release(&call_rcu_chain_list[i].crc_stop, true);
1097 kfree(call_rcu_chain_list);
1098 call_rcu_chain_list = NULL;
1101 static void lock_torture_cleanup(void)
1105 if (torture_cleanup_begin())
1109 * Indicates early cleanup, meaning that the test has not run,
1110 * such as when passing bogus args when loading the module.
1111 * However cxt->cur_ops.init() may have been invoked, so beside
1112 * perform the underlying torture-specific cleanups, cur_ops.exit()
1113 * will be invoked if needed.
1115 if (!cxt.lwsa && !cxt.lrsa)
1119 for (i = 0; i < cxt.nrealwriters_stress; i++)
1120 torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
1121 kfree(writer_tasks);
1122 writer_tasks = NULL;
1126 for (i = 0; i < cxt.nrealreaders_stress; i++)
1127 torture_stop_kthread(lock_torture_reader,
1129 kfree(reader_tasks);
1130 reader_tasks = NULL;
1133 torture_stop_kthread(lock_torture_stats, stats_task);
1134 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
1136 if (atomic_read(&cxt.n_lock_torture_errors))
1137 lock_torture_print_module_parms(cxt.cur_ops,
1138 "End of test: FAILURE");
1139 else if (torture_onoff_failures())
1140 lock_torture_print_module_parms(cxt.cur_ops,
1141 "End of test: LOCK_HOTPLUG");
1143 lock_torture_print_module_parms(cxt.cur_ops,
1144 "End of test: SUCCESS");
1151 call_rcu_chain_cleanup();
1154 if (cxt.init_called) {
1155 if (cxt.cur_ops->exit)
1156 cxt.cur_ops->exit();
1157 cxt.init_called = false;
1159 torture_cleanup_end();
1162 static int __init lock_torture_init(void)
1166 static struct lock_torture_ops *torture_ops[] = {
1168 &spin_lock_ops, &spin_lock_irq_ops,
1169 &raw_spin_lock_ops, &raw_spin_lock_irq_ops,
1170 &rw_lock_ops, &rw_lock_irq_ops,
1173 #ifdef CONFIG_RT_MUTEXES
1177 &percpu_rwsem_lock_ops,
1180 if (!torture_init_begin(torture_type, verbose))
1183 /* Process args and tell the world that the torturer is on the job. */
1184 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1185 cxt.cur_ops = torture_ops[i];
1186 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
1189 if (i == ARRAY_SIZE(torture_ops)) {
1190 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
1192 pr_alert("lock-torture types:");
1193 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1194 pr_alert(" %s", torture_ops[i]->name);
1200 if (nwriters_stress == 0 &&
1201 (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
1202 pr_alert("lock-torture: must run at least one locking thread\n");
1207 if (nwriters_stress >= 0)
1208 cxt.nrealwriters_stress = nwriters_stress;
1210 cxt.nrealwriters_stress = 2 * num_online_cpus();
1212 if (cxt.cur_ops->init) {
1213 cxt.cur_ops->init();
1214 cxt.init_called = true;
1217 #ifdef CONFIG_DEBUG_MUTEXES
1218 if (str_has_prefix(torture_type, "mutex"))
1219 cxt.debug_lock = true;
1221 #ifdef CONFIG_DEBUG_RT_MUTEXES
1222 if (str_has_prefix(torture_type, "rtmutex"))
1223 cxt.debug_lock = true;
1225 #ifdef CONFIG_DEBUG_SPINLOCK
1226 if ((str_has_prefix(torture_type, "spin")) ||
1227 (str_has_prefix(torture_type, "rw_lock")))
1228 cxt.debug_lock = true;
1231 /* Initialize the statistics so that each run gets its own numbers. */
1232 if (nwriters_stress) {
1233 lock_is_write_held = false;
1234 cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
1237 if (cxt.lwsa == NULL) {
1238 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
1243 for (i = 0; i < cxt.nrealwriters_stress; i++) {
1244 cxt.lwsa[i].n_lock_fail = 0;
1245 cxt.lwsa[i].n_lock_acquired = 0;
1249 if (cxt.cur_ops->readlock) {
1250 if (nreaders_stress >= 0)
1251 cxt.nrealreaders_stress = nreaders_stress;
1254 * By default distribute evenly the number of
1255 * readers and writers. We still run the same number
1256 * of threads as the writer-only locks default.
1258 if (nwriters_stress < 0) /* user doesn't care */
1259 cxt.nrealwriters_stress = num_online_cpus();
1260 cxt.nrealreaders_stress = cxt.nrealwriters_stress;
1263 if (nreaders_stress) {
1264 cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
1267 if (cxt.lrsa == NULL) {
1268 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
1275 for (i = 0; i < cxt.nrealreaders_stress; i++) {
1276 cxt.lrsa[i].n_lock_fail = 0;
1277 cxt.lrsa[i].n_lock_acquired = 0;
1282 firsterr = call_rcu_chain_init();
1283 if (torture_init_error(firsterr))
1286 lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
1288 /* Prepare torture context. */
1289 if (onoff_interval > 0) {
1290 firsterr = torture_onoff_init(onoff_holdoff * HZ,
1291 onoff_interval * HZ, NULL);
1292 if (torture_init_error(firsterr))
1295 if (shuffle_interval > 0) {
1296 firsterr = torture_shuffle_init(shuffle_interval);
1297 if (torture_init_error(firsterr))
1300 if (shutdown_secs > 0) {
1301 firsterr = torture_shutdown_init(shutdown_secs,
1302 lock_torture_cleanup);
1303 if (torture_init_error(firsterr))
1307 firsterr = torture_stutter_init(stutter, stutter);
1308 if (torture_init_error(firsterr))
1312 if (nwriters_stress) {
1313 writer_tasks = kcalloc(cxt.nrealwriters_stress,
1314 sizeof(writer_tasks[0]),
1316 if (writer_tasks == NULL) {
1317 TOROUT_ERRSTRING("writer_tasks: Out of memory");
1323 /* cap nested_locks to MAX_NESTED_LOCKS */
1324 if (nested_locks > MAX_NESTED_LOCKS)
1325 nested_locks = MAX_NESTED_LOCKS;
1327 if (cxt.cur_ops->readlock) {
1328 reader_tasks = kcalloc(cxt.nrealreaders_stress,
1329 sizeof(reader_tasks[0]),
1331 if (reader_tasks == NULL) {
1332 TOROUT_ERRSTRING("reader_tasks: Out of memory");
1333 kfree(writer_tasks);
1334 writer_tasks = NULL;
1341 * Create the kthreads and start torturing (oh, those poor little locks).
1343 * TODO: Note that we interleave writers with readers, giving writers a
1344 * slight advantage, by creating its kthread first. This can be modified
1345 * for very specific needs, or even let the user choose the policy, if
1348 for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1349 j < cxt.nrealreaders_stress; i++, j++) {
1350 if (i >= cxt.nrealwriters_stress)
1353 /* Create writer. */
1354 firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
1356 writer_fifo ? sched_set_fifo : NULL);
1357 if (torture_init_error(firsterr))
1359 if (cpumask_nonempty(bind_writers))
1360 torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers);
1363 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1365 /* Create reader. */
1366 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1368 if (torture_init_error(firsterr))
1370 if (cpumask_nonempty(bind_readers))
1371 torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers);
1373 if (stat_interval > 0) {
1374 firsterr = torture_create_kthread(lock_torture_stats, NULL,
1376 if (torture_init_error(firsterr))
1384 lock_torture_cleanup();
1385 if (shutdown_secs) {
1386 WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
1392 module_init(lock_torture_init);
1393 module_exit(lock_torture_cleanup);