locking/lockdep: Simplify mark_held_locks()
authorFrederic Weisbecker <frederic@kernel.org>
Fri, 28 Dec 2018 05:02:00 +0000 (06:02 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 21 Jan 2019 10:18:54 +0000 (11:18 +0100)
The enum mark_type appears a bit artificial here. We can directly pass
the base enum lock_usage_bit value to mark_held_locks(). All we need
then is to add the read index for each lock if necessary. It makes the
code clearer.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Link: https://lkml.kernel.org/r/1545973321-24422-2-git-send-email-frederic@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/lockdep.c

index e805fe3bf87f6621da08ef510ae7e3646d84e27e..1dcd8341e35b4cc7d0425650e32592dcf7d6d875 100644 (file)
@@ -2709,35 +2709,28 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
        return 1;
 }
 
-enum mark_type {
-#define LOCKDEP_STATE(__STATE) __STATE,
-#include "lockdep_states.h"
-#undef LOCKDEP_STATE
-};
-
 /*
  * Mark all held locks with a usage bit:
  */
 static int
-mark_held_locks(struct task_struct *curr, enum mark_type mark)
+mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
 {
-       enum lock_usage_bit usage_bit;
        struct held_lock *hlock;
        int i;
 
        for (i = 0; i < curr->lockdep_depth; i++) {
+               enum lock_usage_bit hlock_bit = base_bit;
                hlock = curr->held_locks + i;
 
-               usage_bit = 2 + (mark << 2); /* ENABLED */
                if (hlock->read)
-                       usage_bit += 1; /* READ */
+                       hlock_bit += 1; /* READ */
 
-               BUG_ON(usage_bit >= LOCK_USAGE_STATES);
+               BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
 
                if (!hlock->check)
                        continue;
 
-               if (!mark_lock(curr, hlock, usage_bit))
+               if (!mark_lock(curr, hlock, hlock_bit))
                        return 0;
        }
 
@@ -2758,7 +2751,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
         * We are going to turn hardirqs on, so set the
         * usage bit for all held locks:
         */
-       if (!mark_held_locks(curr, HARDIRQ))
+       if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
                return;
        /*
         * If we have softirqs enabled, then set the usage
@@ -2766,7 +2759,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
         * this bit from being set before)
         */
        if (curr->softirqs_enabled)
-               if (!mark_held_locks(curr, SOFTIRQ))
+               if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
                        return;
 
        curr->hardirq_enable_ip = ip;
@@ -2880,7 +2873,7 @@ void trace_softirqs_on(unsigned long ip)
         * enabled too:
         */
        if (curr->hardirqs_enabled)
-               mark_held_locks(curr, SOFTIRQ);
+               mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
        current->lockdep_recursion = 0;
 }