1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_LOCAL_LOCK_H
3 # error "Do not include directly, include linux/local_lock.h"
6 #include <linux/percpu-defs.h>
7 #include <linux/lockdep.h>
9 #ifndef CONFIG_PREEMPT_RT
12 #ifdef CONFIG_DEBUG_LOCK_ALLOC
13 struct lockdep_map dep_map;
14 struct task_struct *owner;
18 /* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
24 #ifdef CONFIG_DEBUG_LOCK_ALLOC
25 # define LOCAL_LOCK_DEBUG_INIT(lockname) \
28 .wait_type_inner = LD_WAIT_CONFIG, \
29 .lock_type = LD_LOCK_PERCPU, \
33 # define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
34 .llock = { LOCAL_LOCK_DEBUG_INIT((lockname).llock) },
36 static inline void local_lock_acquire(local_lock_t *l)
38 lock_map_acquire(&l->dep_map);
39 DEBUG_LOCKS_WARN_ON(l->owner);
43 static inline void local_trylock_acquire(local_lock_t *l)
45 lock_map_acquire_try(&l->dep_map);
46 DEBUG_LOCKS_WARN_ON(l->owner);
50 static inline void local_lock_release(local_lock_t *l)
52 DEBUG_LOCKS_WARN_ON(l->owner != current);
54 lock_map_release(&l->dep_map);
57 static inline void local_lock_debug_init(local_lock_t *l)
61 #else /* CONFIG_DEBUG_LOCK_ALLOC */
62 # define LOCAL_LOCK_DEBUG_INIT(lockname)
63 # define LOCAL_TRYLOCK_DEBUG_INIT(lockname)
64 static inline void local_lock_acquire(local_lock_t *l) { }
65 static inline void local_trylock_acquire(local_lock_t *l) { }
66 static inline void local_lock_release(local_lock_t *l) { }
67 static inline void local_lock_debug_init(local_lock_t *l) { }
68 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
70 #define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
71 #define INIT_LOCAL_TRYLOCK(lockname) { LOCAL_TRYLOCK_DEBUG_INIT(lockname) }
73 #define __local_lock_init(lock) \
75 static struct lock_class_key __key; \
77 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
78 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
79 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
81 local_lock_debug_init(lock); \
84 #define __local_trylock_init(lock) __local_lock_init(lock.llock)
86 #define __spinlock_nested_bh_init(lock) \
88 static struct lock_class_key __key; \
90 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
91 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
92 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
94 local_lock_debug_init(lock); \
97 #define __local_lock_acquire(lock) \
99 local_trylock_t *tl; \
102 l = (local_lock_t *)this_cpu_ptr(lock); \
103 tl = (local_trylock_t *)l; \
105 __percpu local_trylock_t *: ({ \
106 lockdep_assert(tl->acquired == 0); \
107 WRITE_ONCE(tl->acquired, 1); \
109 __percpu local_lock_t *: (void)0); \
110 local_lock_acquire(l); \
113 #define __local_lock(lock) \
116 __local_lock_acquire(lock); \
119 #define __local_lock_irq(lock) \
121 local_irq_disable(); \
122 __local_lock_acquire(lock); \
125 #define __local_lock_irqsave(lock, flags) \
127 local_irq_save(flags); \
128 __local_lock_acquire(lock); \
131 #define __local_trylock(lock) \
133 local_trylock_t *tl; \
136 tl = this_cpu_ptr(lock); \
137 if (READ_ONCE(tl->acquired)) { \
141 WRITE_ONCE(tl->acquired, 1); \
142 local_trylock_acquire( \
143 (local_lock_t *)tl); \
148 #define __local_trylock_irqsave(lock, flags) \
150 local_trylock_t *tl; \
152 local_irq_save(flags); \
153 tl = this_cpu_ptr(lock); \
154 if (READ_ONCE(tl->acquired)) { \
155 local_irq_restore(flags); \
158 WRITE_ONCE(tl->acquired, 1); \
159 local_trylock_acquire( \
160 (local_lock_t *)tl); \
165 #define __local_lock_release(lock) \
167 local_trylock_t *tl; \
170 l = (local_lock_t *)this_cpu_ptr(lock); \
171 tl = (local_trylock_t *)l; \
172 local_lock_release(l); \
174 __percpu local_trylock_t *: ({ \
175 lockdep_assert(tl->acquired == 1); \
176 WRITE_ONCE(tl->acquired, 0); \
178 __percpu local_lock_t *: (void)0); \
181 #define __local_unlock(lock) \
183 __local_lock_release(lock); \
187 #define __local_unlock_irq(lock) \
189 __local_lock_release(lock); \
190 local_irq_enable(); \
193 #define __local_unlock_irqrestore(lock, flags) \
195 __local_lock_release(lock); \
196 local_irq_restore(flags); \
199 #define __local_lock_nested_bh(lock) \
201 lockdep_assert_in_softirq(); \
202 local_lock_acquire(this_cpu_ptr(lock)); \
205 #define __local_unlock_nested_bh(lock) \
206 local_lock_release(this_cpu_ptr(lock))
208 #else /* !CONFIG_PREEMPT_RT */
211 * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
212 * critical section while staying preemptible.
214 typedef spinlock_t local_lock_t;
215 typedef spinlock_t local_trylock_t;
217 #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
218 #define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
220 #define __local_lock_init(l) \
222 local_spin_lock_init((l)); \
225 #define __local_trylock_init(l) __local_lock_init(l)
227 #define __local_lock(__lock) \
230 spin_lock(this_cpu_ptr((__lock))); \
233 #define __local_lock_irq(lock) __local_lock(lock)
235 #define __local_lock_irqsave(lock, flags) \
237 typecheck(unsigned long, flags); \
239 __local_lock(lock); \
242 #define __local_unlock(__lock) \
244 spin_unlock(this_cpu_ptr((__lock))); \
248 #define __local_unlock_irq(lock) __local_unlock(lock)
250 #define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
252 #define __local_lock_nested_bh(lock) \
254 lockdep_assert_in_softirq_func(); \
255 spin_lock(this_cpu_ptr(lock)); \
258 #define __local_unlock_nested_bh(lock) \
260 spin_unlock(this_cpu_ptr((lock))); \
263 #define __local_trylock(lock) \
267 if (in_nmi() | in_hardirq()) { \
271 __locked = spin_trylock(this_cpu_ptr((lock))); \
278 #define __local_trylock_irqsave(lock, flags) \
280 typecheck(unsigned long, flags); \
282 __local_trylock(lock); \
285 #endif /* CONFIG_PREEMPT_RT */