Merge branch 'rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck...
authorIngo Molnar <mingo@kernel.org>
Thu, 9 Oct 2014 06:39:25 +0000 (08:39 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 9 Oct 2014 06:39:25 +0000 (08:39 +0200)
Pull additional commits for locktorture, from Paul E. McKenney.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Documentation/locking/locktorture.txt
kernel/locking/locktorture.c
kernel/workqueue.c
tools/testing/selftests/rcutorture/configs/lock/CFLIST
tools/testing/selftests/rcutorture/configs/lock/LOCK04 [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/lock/LOCK04.boot [new file with mode: 0644]

index be715015e0f76db5312ae4c5c4e29bf0f63d83a6..619f2bb136a545f1932609f3b93b898145f6a5cc 100644 (file)
@@ -45,6 +45,11 @@ torture_type   Type of lock to torture. By default, only spinlocks will
                     o "spin_lock_irq": spin_lock_irq() and spin_unlock_irq()
                                        pairs.
 
+                    o "rw_lock": read/write lock() and unlock() rwlock pairs.
+
+                    o "rw_lock_irq": read/write lock_irq() and unlock_irq()
+                                     rwlock pairs.
+
                     o "mutex_lock": mutex_lock() and mutex_unlock() pairs.
 
                     o "rwsem_lock": read/write down() and up() semaphore pairs.
index 540d5dfe11126719a98a73a18dee00192b1b0a08..ec8cce259779061dd863e6a48ff1dcfa1a7a131c 100644 (file)
  * Author: Paul E. McKenney <paulmck@us.ibm.com>
  *     Based on kernel/rcu/torture.c.
  */
-#include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kthread.h>
-#include <linux/err.h>
 #include <linux/spinlock.h>
+#include <linux/rwlock.h>
 #include <linux/mutex.h>
+#include <linux/rwsem.h>
 #include <linux/smp.h>
 #include <linux/interrupt.h>
 #include <linux/sched.h>
 #include <linux/atomic.h>
-#include <linux/bitops.h>
-#include <linux/completion.h>
 #include <linux/moduleparam.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/freezer.h>
-#include <linux/cpu.h>
 #include <linux/delay.h>
-#include <linux/stat.h>
 #include <linux/slab.h>
-#include <linux/trace_clock.h>
-#include <asm/byteorder.h>
 #include <linux/torture.h>
 
 MODULE_LICENSE("GPL");
@@ -204,7 +193,7 @@ static struct lock_torture_ops spin_lock_ops = {
 };
 
 static int torture_spin_lock_write_lock_irq(void)
-__acquires(torture_spinlock_irq)
+__acquires(torture_spinlock)
 {
        unsigned long flags;
 
@@ -229,6 +218,110 @@ static struct lock_torture_ops spin_lock_irq_ops = {
        .name           = "spin_lock_irq"
 };
 
+static DEFINE_RWLOCK(torture_rwlock);
+
+static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
+{
+       write_lock(&torture_rwlock);
+       return 0;
+}
+
+static void torture_rwlock_write_delay(struct torture_random_state *trsp)
+{
+       const unsigned long shortdelay_us = 2;
+       const unsigned long longdelay_ms = 100;
+
+       /* We want a short delay mostly to emulate likely code, and
+        * we want a long delay occasionally to force massive contention.
+        */
+       if (!(torture_random(trsp) %
+             (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+               mdelay(longdelay_ms);
+       else
+               udelay(shortdelay_us);
+}
+
+static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
+{
+       write_unlock(&torture_rwlock);
+}
+
+static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
+{
+       read_lock(&torture_rwlock);
+       return 0;
+}
+
+static void torture_rwlock_read_delay(struct torture_random_state *trsp)
+{
+       const unsigned long shortdelay_us = 10;
+       const unsigned long longdelay_ms = 100;
+
+       /* We want a short delay mostly to emulate likely code, and
+        * we want a long delay occasionally to force massive contention.
+        */
+       if (!(torture_random(trsp) %
+             (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
+               mdelay(longdelay_ms);
+       else
+               udelay(shortdelay_us);
+}
+
+static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
+{
+       read_unlock(&torture_rwlock);
+}
+
+static struct lock_torture_ops rw_lock_ops = {
+       .writelock      = torture_rwlock_write_lock,
+       .write_delay    = torture_rwlock_write_delay,
+       .writeunlock    = torture_rwlock_write_unlock,
+       .readlock       = torture_rwlock_read_lock,
+       .read_delay     = torture_rwlock_read_delay,
+       .readunlock     = torture_rwlock_read_unlock,
+       .name           = "rw_lock"
+};
+
+static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
+{
+       unsigned long flags;
+
+       write_lock_irqsave(&torture_rwlock, flags);
+       cxt.cur_ops->flags = flags;
+       return 0;
+}
+
+static void torture_rwlock_write_unlock_irq(void)
+__releases(torture_rwlock)
+{
+       write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
+}
+
+static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
+{
+       unsigned long flags;
+
+       read_lock_irqsave(&torture_rwlock, flags);
+       cxt.cur_ops->flags = flags;
+       return 0;
+}
+
+static void torture_rwlock_read_unlock_irq(void)
+__releases(torture_rwlock)
+{
+       write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
+}
+
+static struct lock_torture_ops rw_lock_irq_ops = {
+       .writelock      = torture_rwlock_write_lock_irq,
+       .write_delay    = torture_rwlock_write_delay,
+       .writeunlock    = torture_rwlock_write_unlock_irq,
+       .readlock       = torture_rwlock_read_lock_irq,
+       .read_delay     = torture_rwlock_read_delay,
+       .readunlock     = torture_rwlock_read_unlock_irq,
+       .name           = "rw_lock_irq"
+};
+
 static DEFINE_MUTEX(torture_mutex);
 
 static int torture_mutex_lock(void) __acquires(torture_mutex)
@@ -348,14 +441,19 @@ static int lock_torture_writer(void *arg)
        do {
                if ((torture_random(&rand) & 0xfffff) == 0)
                        schedule_timeout_uninterruptible(1);
+
                cxt.cur_ops->writelock();
                if (WARN_ON_ONCE(lock_is_write_held))
                        lwsp->n_lock_fail++;
                lock_is_write_held = 1;
+               if (WARN_ON_ONCE(lock_is_read_held))
+                       lwsp->n_lock_fail++; /* rare, but... */
+
                lwsp->n_lock_acquired++;
                cxt.cur_ops->write_delay(&rand);
                lock_is_write_held = 0;
                cxt.cur_ops->writeunlock();
+
                stutter_wait("lock_torture_writer");
        } while (!torture_must_stop());
        torture_kthread_stopping("lock_torture_writer");
@@ -377,12 +475,17 @@ static int lock_torture_reader(void *arg)
        do {
                if ((torture_random(&rand) & 0xfffff) == 0)
                        schedule_timeout_uninterruptible(1);
+
                cxt.cur_ops->readlock();
                lock_is_read_held = 1;
+               if (WARN_ON_ONCE(lock_is_write_held))
+                       lrsp->n_lock_fail++; /* rare, but... */
+
                lrsp->n_lock_acquired++;
                cxt.cur_ops->read_delay(&rand);
                lock_is_read_held = 0;
                cxt.cur_ops->readunlock();
+
                stutter_wait("lock_torture_reader");
        } while (!torture_must_stop());
        torture_kthread_stopping("lock_torture_reader");
@@ -535,8 +638,11 @@ static int __init lock_torture_init(void)
        int i, j;
        int firsterr = 0;
        static struct lock_torture_ops *torture_ops[] = {
-               &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops,
-               &mutex_lock_ops, &rwsem_lock_ops,
+               &lock_busted_ops,
+               &spin_lock_ops, &spin_lock_irq_ops,
+               &rw_lock_ops, &rw_lock_irq_ops,
+               &mutex_lock_ops,
+               &rwsem_lock_ops,
        };
 
        if (!torture_init_begin(torture_type, verbose, &torture_runnable))
@@ -571,7 +677,8 @@ static int __init lock_torture_init(void)
                cxt.debug_lock = true;
 #endif
 #ifdef CONFIG_DEBUG_SPINLOCK
-       if (strncmp(torture_type, "spin", 4) == 0)
+       if ((strncmp(torture_type, "spin", 4) == 0) ||
+           (strncmp(torture_type, "rw_lock", 7) == 0))
                cxt.debug_lock = true;
 #endif
 
index 5dbe22aa3efd48f92b39e6ccd25b63880dc1015b..09b685daee3d8c18e2004a2333583e1c7e00f706 100644 (file)
@@ -2043,9 +2043,10 @@ __acquires(&pool->lock)
         * kernels, where a requeueing work item waiting for something to
         * happen could deadlock with stop_machine as such work item could
         * indefinitely requeue itself while all other CPUs are trapped in
-        * stop_machine.
+        * stop_machine. At the same time, report a quiescent RCU state so
+        * the same condition doesn't freeze RCU.
         */
-       cond_resched();
+       cond_resched_rcu_qs();
 
        spin_lock_irq(&pool->lock);
 
index 6108137da770296dc5e9acd7a6ac412f9820cf76..6910b73707617cc2ddf855be82e488420a35bed1 100644 (file)
@@ -1,3 +1,4 @@
 LOCK01
 LOCK02
 LOCK03
+LOCK04
\ No newline at end of file
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK04 b/tools/testing/selftests/rcutorture/configs/lock/LOCK04
new file mode 100644 (file)
index 0000000..1d1da14
--- /dev/null
@@ -0,0 +1,6 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK04.boot b/tools/testing/selftests/rcutorture/configs/lock/LOCK04.boot
new file mode 100644 (file)
index 0000000..48c04fe
--- /dev/null
@@ -0,0 +1 @@
+locktorture.torture_type=rw_lock