lockdep: spin_lock_nest_lock()
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 11 Aug 2008 07:30:24 +0000 (09:30 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 11 Aug 2008 07:30:24 +0000 (09:30 +0200)
Expose the new lock protection lock.

This can be used to annotate places where we take multiple locks of the
same class and avoid deadlocks by always taking another (top-level) lock
first.

NOTE: we're still bound to the MAX_LOCK_DEPTH (48) limit.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/lockdep.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h
kernel/spinlock.c

index 93a8cc02a033cd36c4f1c92e66852a8e2e924bf8..4452c04a7f6e419ec6b0f887ec1770d5a7fbb24a 100644 (file)
@@ -410,8 +410,10 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # ifdef CONFIG_PROVE_LOCKING
 #  define spin_acquire(l, s, t, i)             lock_acquire(l, s, t, 0, 2, NULL, i)
+#  define spin_acquire_nest(l, s, t, n, i)     lock_acquire(l, s, t, 0, 2, n, i)
 # else
 #  define spin_acquire(l, s, t, i)             lock_acquire(l, s, t, 0, 1, NULL, i)
+#  define spin_acquire_nest(l, s, t, n, i)     lock_acquire(l, s, t, 0, 1, NULL, i)
 # endif
 # define spin_release(l, n, i)                 lock_release(l, n, i)
 #else
index 61e5610ad165592c0605eb68330ff50d9e0b77f8..e0c0fccced46c4b09d654121423f76357407bc95 100644 (file)
@@ -183,8 +183,14 @@ do {                                                               \
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
+# define spin_lock_nest_lock(lock, nest_lock)                          \
+        do {                                                           \
+                typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
+                _spin_lock_nest_lock(lock, &(nest_lock)->dep_map);     \
+        } while (0)
 #else
 # define spin_lock_nested(lock, subclass) _spin_lock(lock)
+# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
 #endif
 
 #define write_lock(lock)               _write_lock(lock)
index 8a2307ce729687f25a954969ae14df0b31748915..d79845d034b530372ff0049655179525a45dffc0 100644 (file)
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
 void __lockfunc _spin_lock(spinlock_t *lock)           __acquires(lock);
 void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
                                                        __acquires(lock);
+void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
+                                                       __acquires(lock);
 void __lockfunc _read_lock(rwlock_t *lock)             __acquires(lock);
 void __lockfunc _write_lock(rwlock_t *lock)            __acquires(lock);
 void __lockfunc _spin_lock_bh(spinlock_t *lock)                __acquires(lock);
index a1fb54c93cdd2381f23573748852a4a105e8ddd3..44baeea94ab906b06ff87d6bf936f8a19d71915d 100644 (file)
@@ -292,6 +292,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
 }
 
 EXPORT_SYMBOL(_spin_lock_nested);
+
 unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
 {
        unsigned long flags;
@@ -314,6 +315,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
 
 EXPORT_SYMBOL(_spin_lock_irqsave_nested);
 
+void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
+                                    struct lockdep_map *nest_lock)
+{
+       preempt_disable();
+       spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+       LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+}
+
+EXPORT_SYMBOL(_spin_lock_nest_lock);
+
 #endif
 
 void __lockfunc _spin_unlock(spinlock_t *lock)