Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef __LINUX_SPINLOCK_H |
3 | #define __LINUX_SPINLOCK_H | |
4 | ||
5 | /* | |
fb1c8f93 IM |
6 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
7 | * | |
8 | * here's the role of the various spinlock/rwlock related include files: | |
9 | * | |
10 | * on SMP builds: | |
11 | * | |
fb3a6bbc | 12 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
fb1c8f93 IM |
13 | * initializers |
14 | * | |
4f084ca7 TG |
15 | * linux/spinlock_types_raw: |
16 | * The raw types and initializers | |
fb1c8f93 IM |
17 | * linux/spinlock_types.h: |
18 | * defines the generic type and initializers | |
19 | * | |
0199c4e6 | 20 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
fb1c8f93 IM |
21 | * implementations, mostly inline assembly code |
22 | * | |
23 | * (also included on UP-debug builds:) | |
24 | * | |
25 | * linux/spinlock_api_smp.h: | |
26 | * contains the prototypes for the _spin_*() APIs. | |
27 | * | |
28 | * linux/spinlock.h: builds the final spin_*() APIs. | |
29 | * | |
30 | * on UP builds: | |
31 | * | |
32 | * linux/spinlock_type_up.h: | |
33 | * contains the generic, simplified UP spinlock type. | |
34 | * (which is an empty structure on non-debug builds) | |
35 | * | |
4f084ca7 TG |
36 | * linux/spinlock_types_raw: |
37 | * The raw RT types and initializers | |
fb1c8f93 IM |
38 | * linux/spinlock_types.h: |
39 | * defines the generic type and initializers | |
40 | * | |
41 | * linux/spinlock_up.h: | |
0199c4e6 | 42 | * contains the arch_spin_*()/etc. version of UP |
fb1c8f93 IM |
43 | * builds. (which are NOPs on non-debug, non-preempt |
44 | * builds) | |
45 | * | |
46 | * (included on UP-non-debug builds:) | |
47 | * | |
48 | * linux/spinlock_api_up.h: | |
49 | * builds the _spin_*() APIs. | |
50 | * | |
51 | * linux/spinlock.h: builds the final spin_*() APIs. | |
1da177e4 LT |
52 | */ |
53 | ||
3f307891 | 54 | #include <linux/typecheck.h> |
1da177e4 LT |
55 | #include <linux/preempt.h> |
56 | #include <linux/linkage.h> | |
57 | #include <linux/compiler.h> | |
df9ee292 | 58 | #include <linux/irqflags.h> |
1da177e4 | 59 | #include <linux/thread_info.h> |
1da177e4 | 60 | #include <linux/stringify.h> |
676dcb8b | 61 | #include <linux/bottom_half.h> |
c935cd62 | 62 | #include <linux/lockdep.h> |
96f951ed | 63 | #include <asm/barrier.h> |
60ca1e5a | 64 | #include <asm/mmiowb.h> |
1da177e4 | 65 | |
1da177e4 LT |
66 | |
67 | /* | |
68 | * Must define these before including other files, inline functions need them | |
69 | */ | |
75ddb0e8 | 70 | #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME |
1da177e4 LT |
71 | |
72 | #define LOCK_SECTION_START(extra) \ | |
73 | ".subsection 1\n\t" \ | |
74 | extra \ | |
75 | ".ifndef " LOCK_SECTION_NAME "\n\t" \ | |
76 | LOCK_SECTION_NAME ":\n\t" \ | |
77 | ".endif\n" | |
78 | ||
79 | #define LOCK_SECTION_END \ | |
80 | ".previous\n\t" | |
81 | ||
33def849 | 82 | #define __lockfunc __section(".spinlock.text") |
1da177e4 LT |
83 | |
84 | /* | |
fb3a6bbc | 85 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
1da177e4 | 86 | */ |
fb1c8f93 | 87 | #include <linux/spinlock_types.h> |
1da177e4 | 88 | |
1da177e4 | 89 | /* |
25985edc | 90 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): |
1da177e4 | 91 | */ |
8a25d5de | 92 | #ifdef CONFIG_SMP |
fb1c8f93 | 93 | # include <asm/spinlock.h> |
1da177e4 | 94 | #else |
fb1c8f93 | 95 | # include <linux/spinlock_up.h> |
1da177e4 LT |
96 | #endif |
97 | ||
8a25d5de | 98 | #ifdef CONFIG_DEBUG_SPINLOCK |
c2f21ce2 | 99 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
de8f5e4f PZ |
100 | struct lock_class_key *key, short inner); |
101 | ||
102 | # define raw_spin_lock_init(lock) \ | |
103 | do { \ | |
104 | static struct lock_class_key __key; \ | |
105 | \ | |
106 | __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ | |
8a25d5de IM |
107 | } while (0) |
108 | ||
109 | #else | |
c2f21ce2 TG |
110 | # define raw_spin_lock_init(lock) \ |
111 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) | |
8a25d5de IM |
112 | #endif |
113 | ||
c2f21ce2 | 114 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
fb1c8f93 | 115 | |
0199c4e6 | 116 | #ifdef arch_spin_is_contended |
c2f21ce2 | 117 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
a5ef7ca0 | 118 | #else |
c2f21ce2 | 119 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
0199c4e6 | 120 | #endif /*arch_spin_is_contended*/ |
95c354fe | 121 | |
e0acd0a6 | 122 | /* |
3d85b270 AP |
123 | * smp_mb__after_spinlock() provides the equivalent of a full memory barrier |
124 | * between program-order earlier lock acquisitions and program-order later | |
125 | * memory accesses. | |
d89e588c | 126 | * |
3d85b270 | 127 | * This guarantees that the following two properties hold: |
d89e588c | 128 | * |
3d85b270 | 129 | * 1) Given the snippet: |
d89e588c | 130 | * |
3d85b270 | 131 | * { X = 0; Y = 0; } |
d89e588c | 132 | * |
3d85b270 | 133 | * CPU0 CPU1 |
d89e588c | 134 | * |
3d85b270 AP |
135 | * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); |
136 | * spin_lock(S); smp_mb(); | |
137 | * smp_mb__after_spinlock(); r1 = READ_ONCE(X); | |
138 | * r0 = READ_ONCE(Y); | |
139 | * spin_unlock(S); | |
d89e588c | 140 | * |
3d85b270 AP |
141 | * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) |
142 | * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments | |
143 | * preceding the call to smp_mb__after_spinlock() in __schedule() and in | |
144 | * try_to_wake_up(). | |
145 | * | |
146 | * 2) Given the snippet: | |
147 | * | |
148 | * { X = 0; Y = 0; } | |
149 | * | |
150 | * CPU0 CPU1 CPU2 | |
151 | * | |
152 | * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); | |
153 | * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); | |
154 | * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); | |
155 | * WRITE_ONCE(Y, 1); | |
156 | * spin_unlock(S); | |
157 | * | |
158 | * it is forbidden that CPU0's critical section executes before CPU1's | |
159 | * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) | |
160 | * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments | |
161 | * preceding the calls to smp_rmb() in try_to_wake_up() for similar | |
162 | * snippets but "projected" onto two CPUs. | |
163 | * | |
164 | * Property (2) upgrades the lock to an RCsc lock. | |
d89e588c PZ |
165 | * |
166 | * Since most load-store architectures implement ACQUIRE with an smp_mb() after | |
167 | * the LL/SC loop, they need no further barriers. Similarly all our TSO | |
168 | * architectures imply an smp_mb() for each atomic instruction and equally don't | |
169 | * need more. | |
170 | * | |
171 | * Architectures that can implement ACQUIRE better need to take care. | |
e0acd0a6 | 172 | */ |
d89e588c | 173 | #ifndef smp_mb__after_spinlock |
f948666d | 174 | #define smp_mb__after_spinlock() kcsan_mb() |
ad462769 JO |
175 | #endif |
176 | ||
fb1c8f93 | 177 | #ifdef CONFIG_DEBUG_SPINLOCK |
b97c4bc1 | 178 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
9828ea9d | 179 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
b97c4bc1 | 180 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
fb1c8f93 | 181 | #else |
b97c4bc1 | 182 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
c2f21ce2 | 183 | { |
b97c4bc1 | 184 | __acquire(lock); |
c2f21ce2 | 185 | arch_spin_lock(&lock->raw_lock); |
60ca1e5a | 186 | mmiowb_spin_lock(); |
c2f21ce2 TG |
187 | } |
188 | ||
9828ea9d | 189 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
c2f21ce2 | 190 | { |
60ca1e5a WD |
191 | int ret = arch_spin_trylock(&(lock)->raw_lock); |
192 | ||
193 | if (ret) | |
194 | mmiowb_spin_lock(); | |
195 | ||
196 | return ret; | |
c2f21ce2 TG |
197 | } |
198 | ||
b97c4bc1 | 199 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
c2f21ce2 | 200 | { |
60ca1e5a | 201 | mmiowb_spin_unlock(); |
c2f21ce2 | 202 | arch_spin_unlock(&lock->raw_lock); |
b97c4bc1 | 203 | __release(lock); |
c2f21ce2 | 204 | } |
fb1c8f93 | 205 | #endif |
1da177e4 | 206 | |
1da177e4 | 207 | /* |
ef12f109 | 208 | * Define the various spin_lock methods. Note we define these |
27972765 | 209 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The |
ef12f109 TG |
210 | * various methods are defined as nops in the case they are not |
211 | * required. | |
1da177e4 | 212 | */ |
9c1721aa | 213 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
1da177e4 | 214 | |
9c1721aa | 215 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
8a25d5de IM |
216 | |
217 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
9c1721aa TG |
218 | # define raw_spin_lock_nested(lock, subclass) \ |
219 | _raw_spin_lock_nested(lock, subclass) | |
220 | ||
c2f21ce2 | 221 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ |
b7d39aff PZ |
222 | do { \ |
223 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ | |
9c1721aa | 224 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
b7d39aff | 225 | } while (0) |
8a25d5de | 226 | #else |
4999201a BVA |
227 | /* |
228 | * Always evaluate the 'subclass' argument to avoid that the compiler | |
229 | * warns about set-but-not-used variables when building with | |
230 | * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. | |
231 | */ | |
232 | # define raw_spin_lock_nested(lock, subclass) \ | |
233 | _raw_spin_lock(((void)(subclass), (lock))) | |
9c1721aa | 234 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
8a25d5de IM |
235 | #endif |
236 | ||
fb1c8f93 | 237 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
b8e6ec86 | 238 | |
c2f21ce2 | 239 | #define raw_spin_lock_irqsave(lock, flags) \ |
3f307891 SR |
240 | do { \ |
241 | typecheck(unsigned long, flags); \ | |
9c1721aa | 242 | flags = _raw_spin_lock_irqsave(lock); \ |
3f307891 | 243 | } while (0) |
cfd3ef23 AV |
244 | |
245 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
c2f21ce2 | 246 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
3f307891 SR |
247 | do { \ |
248 | typecheck(unsigned long, flags); \ | |
9c1721aa | 249 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
3f307891 | 250 | } while (0) |
cfd3ef23 | 251 | #else |
c2f21ce2 | 252 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
3f307891 SR |
253 | do { \ |
254 | typecheck(unsigned long, flags); \ | |
9c1721aa | 255 | flags = _raw_spin_lock_irqsave(lock); \ |
3f307891 | 256 | } while (0) |
cfd3ef23 AV |
257 | #endif |
258 | ||
1da177e4 | 259 | #else |
b8e6ec86 | 260 | |
c2f21ce2 | 261 | #define raw_spin_lock_irqsave(lock, flags) \ |
3f307891 SR |
262 | do { \ |
263 | typecheck(unsigned long, flags); \ | |
9c1721aa | 264 | _raw_spin_lock_irqsave(lock, flags); \ |
3f307891 | 265 | } while (0) |
ef12f109 | 266 | |
c2f21ce2 TG |
267 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
268 | raw_spin_lock_irqsave(lock, flags) | |
cfd3ef23 | 269 | |
1da177e4 LT |
270 | #endif |
271 | ||
9c1721aa TG |
272 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
273 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) | |
274 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) | |
275 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) | |
1da177e4 | 276 | |
c2f21ce2 TG |
277 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
278 | do { \ | |
279 | typecheck(unsigned long, flags); \ | |
9c1721aa | 280 | _raw_spin_unlock_irqrestore(lock, flags); \ |
3f307891 | 281 | } while (0) |
9c1721aa | 282 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
1da177e4 | 283 | |
9c1721aa TG |
284 | #define raw_spin_trylock_bh(lock) \ |
285 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) | |
1da177e4 | 286 | |
c2f21ce2 | 287 | #define raw_spin_trylock_irq(lock) \ |
1da177e4 LT |
288 | ({ \ |
289 | local_irq_disable(); \ | |
c2f21ce2 | 290 | raw_spin_trylock(lock) ? \ |
fb1c8f93 | 291 | 1 : ({ local_irq_enable(); 0; }); \ |
1da177e4 LT |
292 | }) |
293 | ||
c2f21ce2 | 294 | #define raw_spin_trylock_irqsave(lock, flags) \ |
1da177e4 LT |
295 | ({ \ |
296 | local_irq_save(flags); \ | |
c2f21ce2 | 297 | raw_spin_trylock(lock) ? \ |
fb1c8f93 | 298 | 1 : ({ local_irq_restore(flags); 0; }); \ |
1da177e4 LT |
299 | }) |
300 | ||
342a9324 TG |
301 | #ifndef CONFIG_PREEMPT_RT |
302 | /* Include rwlock functions for !RT */ | |
c2f21ce2 | 303 | #include <linux/rwlock.h> |
342a9324 | 304 | #endif |
c2f21ce2 TG |
305 | |
306 | /* | |
307 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | |
308 | */ | |
309 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
310 | # include <linux/spinlock_api_smp.h> | |
311 | #else | |
312 | # include <linux/spinlock_api_up.h> | |
313 | #endif | |
314 | ||
342a9324 TG |
315 | /* Non PREEMPT_RT kernel, map to raw spinlocks: */ |
316 | #ifndef CONFIG_PREEMPT_RT | |
317 | ||
c2f21ce2 TG |
318 | /* |
319 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n | |
320 | */ | |
321 | ||
3490565b | 322 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
c2f21ce2 TG |
323 | { |
324 | return &lock->rlock; | |
325 | } | |
326 | ||
de8f5e4f PZ |
327 | #ifdef CONFIG_DEBUG_SPINLOCK |
328 | ||
329 | # define spin_lock_init(lock) \ | |
330 | do { \ | |
331 | static struct lock_class_key __key; \ | |
332 | \ | |
333 | __raw_spin_lock_init(spinlock_check(lock), \ | |
334 | #lock, &__key, LD_WAIT_CONFIG); \ | |
335 | } while (0) | |
336 | ||
337 | #else | |
338 | ||
339 | # define spin_lock_init(_lock) \ | |
340 | do { \ | |
341 | spinlock_check(_lock); \ | |
342 | *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \ | |
c2f21ce2 TG |
343 | } while (0) |
344 | ||
de8f5e4f PZ |
345 | #endif |
346 | ||
3490565b | 347 | static __always_inline void spin_lock(spinlock_t *lock) |
c2f21ce2 TG |
348 | { |
349 | raw_spin_lock(&lock->rlock); | |
350 | } | |
351 | ||
3490565b | 352 | static __always_inline void spin_lock_bh(spinlock_t *lock) |
c2f21ce2 TG |
353 | { |
354 | raw_spin_lock_bh(&lock->rlock); | |
355 | } | |
356 | ||
3490565b | 357 | static __always_inline int spin_trylock(spinlock_t *lock) |
c2f21ce2 TG |
358 | { |
359 | return raw_spin_trylock(&lock->rlock); | |
360 | } | |
361 | ||
362 | #define spin_lock_nested(lock, subclass) \ | |
363 | do { \ | |
364 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ | |
365 | } while (0) | |
366 | ||
367 | #define spin_lock_nest_lock(lock, nest_lock) \ | |
368 | do { \ | |
369 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ | |
370 | } while (0) | |
371 | ||
3490565b | 372 | static __always_inline void spin_lock_irq(spinlock_t *lock) |
c2f21ce2 TG |
373 | { |
374 | raw_spin_lock_irq(&lock->rlock); | |
375 | } | |
376 | ||
377 | #define spin_lock_irqsave(lock, flags) \ | |
378 | do { \ | |
379 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ | |
380 | } while (0) | |
381 | ||
382 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | |
383 | do { \ | |
384 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ | |
385 | } while (0) | |
386 | ||
3490565b | 387 | static __always_inline void spin_unlock(spinlock_t *lock) |
c2f21ce2 TG |
388 | { |
389 | raw_spin_unlock(&lock->rlock); | |
390 | } | |
391 | ||
3490565b | 392 | static __always_inline void spin_unlock_bh(spinlock_t *lock) |
c2f21ce2 TG |
393 | { |
394 | raw_spin_unlock_bh(&lock->rlock); | |
395 | } | |
396 | ||
3490565b | 397 | static __always_inline void spin_unlock_irq(spinlock_t *lock) |
c2f21ce2 TG |
398 | { |
399 | raw_spin_unlock_irq(&lock->rlock); | |
400 | } | |
401 | ||
3490565b | 402 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
c2f21ce2 TG |
403 | { |
404 | raw_spin_unlock_irqrestore(&lock->rlock, flags); | |
405 | } | |
406 | ||
3490565b | 407 | static __always_inline int spin_trylock_bh(spinlock_t *lock) |
c2f21ce2 TG |
408 | { |
409 | return raw_spin_trylock_bh(&lock->rlock); | |
410 | } | |
411 | ||
3490565b | 412 | static __always_inline int spin_trylock_irq(spinlock_t *lock) |
c2f21ce2 TG |
413 | { |
414 | return raw_spin_trylock_irq(&lock->rlock); | |
415 | } | |
416 | ||
417 | #define spin_trylock_irqsave(lock, flags) \ | |
418 | ({ \ | |
419 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | |
420 | }) | |
421 | ||
b7e4aade AP |
422 | /** |
423 | * spin_is_locked() - Check whether a spinlock is locked. | |
424 | * @lock: Pointer to the spinlock. | |
425 | * | |
426 | * This function is NOT required to provide any memory ordering | |
427 | * guarantees; it could be used for debugging purposes or, when | |
428 | * additional synchronization is needed, accompanied with other | |
429 | * constructs (memory barriers) enforcing the synchronization. | |
430 | * | |
431 | * Returns: 1 if @lock is locked, 0 otherwise. | |
432 | * | |
433 | * Note that the function only tells you that the spinlock is | |
434 | * seen to be locked, not that it is locked on your CPU. | |
435 | * | |
436 | * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, | |
437 | * the return value is always 0 (see include/linux/spinlock_up.h). | |
438 | * Therefore you should not rely heavily on the return value. | |
439 | */ | |
3490565b | 440 | static __always_inline int spin_is_locked(spinlock_t *lock) |
c2f21ce2 TG |
441 | { |
442 | return raw_spin_is_locked(&lock->rlock); | |
443 | } | |
444 | ||
3490565b | 445 | static __always_inline int spin_is_contended(spinlock_t *lock) |
c2f21ce2 TG |
446 | { |
447 | return raw_spin_is_contended(&lock->rlock); | |
448 | } | |
449 | ||
4ebc1b4b | 450 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
c2f21ce2 | 451 | |
342a9324 TG |
452 | #else /* !CONFIG_PREEMPT_RT */ |
453 | # include <linux/spinlock_rt.h> | |
454 | #endif /* CONFIG_PREEMPT_RT */ | |
455 | ||
1da177e4 | 456 | /* |
fb1c8f93 IM |
457 | * Pull the atomic_t declaration: |
458 | * (asm-mips/atomic.h needs above definitions) | |
1da177e4 | 459 | */ |
60063497 | 460 | #include <linux/atomic.h> |
fb1c8f93 IM |
461 | /** |
462 | * atomic_dec_and_lock - lock on reaching reference count zero | |
463 | * @atomic: the atomic counter | |
464 | * @lock: the spinlock in question | |
dc07e721 BF |
465 | * |
466 | * Decrements @atomic by 1. If the result is 0, returns true and locks | |
467 | * @lock. Returns false for all other cases. | |
1da177e4 | 468 | */ |
fb1c8f93 IM |
469 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
470 | #define atomic_dec_and_lock(atomic, lock) \ | |
dcc8e559 | 471 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
1da177e4 | 472 | |
ccfbb5be AMG |
473 | extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, |
474 | unsigned long *flags); | |
475 | #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ | |
476 | __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) | |
477 | ||
ff93bca7 CW |
478 | int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, |
479 | size_t max_size, unsigned int cpu_mult, | |
480 | gfp_t gfp, const char *name, | |
481 | struct lock_class_key *key); | |
482 | ||
483 | #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ | |
484 | ({ \ | |
485 | static struct lock_class_key key; \ | |
486 | int ret; \ | |
487 | \ | |
488 | ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ | |
489 | cpu_mult, gfp, #locks, &key); \ | |
490 | ret; \ | |
491 | }) | |
92f36cca TH |
492 | |
493 | void free_bucket_spinlocks(spinlock_t *locks); | |
494 | ||
1da177e4 | 495 | #endif /* __LINUX_SPINLOCK_H */ |