locking/local_lock: Add local nested BH locking infrastructure.
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Thu, 20 Jun 2024 13:21:52 +0000 (15:21 +0200)
committerJakub Kicinski <kuba@kernel.org>
Mon, 24 Jun 2024 23:41:22 +0000 (16:41 -0700)
Add local_lock_nested_bh() locking. It is based on local_lock_t and the
naming follows the preempt_disable_nested() example.

For !PREEMPT_RT + !LOCKDEP it is a per-CPU annotation for locking
assumptions based on local_bh_disable(). The macro is optimized away
during compilation.
For !PREEMPT_RT + LOCKDEP the local_lock_nested_bh() is reduced to
the usual lock-acquire plus lockdep_assert_in_softirq() - ensuring that
BH is disabled.

For PREEMPT_RT local_lock_nested_bh() acquires the specified per-CPU
lock. It does not disable CPU migration because it relies on
local_bh_disable() disabling CPU migration.
With LOCKDEP it performans the usual lockdep checks as with !PREEMPT_RT.
Due to include hell the softirq check has been moved spinlock.c.

The intention is to use this locking in places where locking of a per-CPU
variable relies on BH being disabled. Instead of treating disabled
bottom halves as a big per-CPU lock, PREEMPT_RT can use this to reduce
the locking scope to what actually needs protecting.
A side effect is that it also documents the protection scope of the
per-CPU variables.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20240620132727.660738-3-bigeasy@linutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/linux/local_lock.h
include/linux/local_lock_internal.h
include/linux/lockdep.h
kernel/locking/spinlock.c

index 82366a37f4474bbed690479ad2a9e18a3a92791e..091dc0b6bdfb9f4721f94d19828a38fbfa59346c 100644 (file)
@@ -62,4 +62,14 @@ DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
                    local_unlock_irqrestore(_T->lock, _T->flags),
                    unsigned long flags)
 
+#define local_lock_nested_bh(_lock)                            \
+       __local_lock_nested_bh(_lock)
+
+#define local_unlock_nested_bh(_lock)                          \
+       __local_unlock_nested_bh(_lock)
+
+DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
+            local_lock_nested_bh(_T),
+            local_unlock_nested_bh(_T))
+
 #endif
index 975e33b793a774ecc72580c46c0123da725273f1..8dd71fbbb6d2b6748969438c4642f7d970834871 100644 (file)
@@ -62,6 +62,17 @@ do {                                                         \
        local_lock_debug_init(lock);                            \
 } while (0)
 
+#define __spinlock_nested_bh_init(lock)                                \
+do {                                                           \
+       static struct lock_class_key __key;                     \
+                                                               \
+       debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+       lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
+                             0, LD_WAIT_CONFIG, LD_WAIT_INV,   \
+                             LD_LOCK_NORMAL);                  \
+       local_lock_debug_init(lock);                            \
+} while (0)
+
 #define __local_lock(lock)                                     \
        do {                                                    \
                preempt_disable();                              \
@@ -98,6 +109,15 @@ do {                                                                \
                local_irq_restore(flags);                       \
        } while (0)
 
+#define __local_lock_nested_bh(lock)                           \
+       do {                                                    \
+               lockdep_assert_in_softirq();                    \
+               local_lock_acquire(this_cpu_ptr(lock)); \
+       } while (0)
+
+#define __local_unlock_nested_bh(lock)                         \
+       local_lock_release(this_cpu_ptr(lock))
+
 #else /* !CONFIG_PREEMPT_RT */
 
 /*
@@ -138,4 +158,15 @@ typedef spinlock_t local_lock_t;
 
 #define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
 
+#define __local_lock_nested_bh(lock)                           \
+do {                                                           \
+       lockdep_assert_in_softirq_func();                       \
+       spin_lock(this_cpu_ptr(lock));                          \
+} while (0)
+
+#define __local_unlock_nested_bh(lock)                         \
+do {                                                           \
+       spin_unlock(this_cpu_ptr((lock)));                      \
+} while (0)
+
 #endif /* CONFIG_PREEMPT_RT */
index 08b0d1d9d78b76355e76c5751a094b197a01b096..3f5a551579cc9aa043a1347f3a1b28e0ab1d6985 100644 (file)
@@ -600,6 +600,8 @@ do {                                                                        \
                     (!in_softirq() || in_irq() || in_nmi()));          \
 } while (0)
 
+extern void lockdep_assert_in_softirq_func(void);
+
 #else
 # define might_lock(lock) do { } while (0)
 # define might_lock_read(lock) do { } while (0)
@@ -613,6 +615,7 @@ do {                                                                        \
 # define lockdep_assert_preemption_enabled() do { } while (0)
 # define lockdep_assert_preemption_disabled() do { } while (0)
 # define lockdep_assert_in_softirq() do { } while (0)
+# define lockdep_assert_in_softirq_func() do { } while (0)
 #endif
 
 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
index 8475a0794f8c5ad26b90b9109b7c2880d7629da4..438c6086d540ecc4cac819091c03ea9ec5f829da 100644 (file)
@@ -413,3 +413,11 @@ notrace int in_lock_functions(unsigned long addr)
        && addr < (unsigned long)__lock_text_end;
 }
 EXPORT_SYMBOL(in_lock_functions);
+
+#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_PREEMPT_RT)
+void notrace lockdep_assert_in_softirq_func(void)
+{
+       lockdep_assert_in_softirq();
+}
+EXPORT_SYMBOL(lockdep_assert_in_softirq_func);
+#endif