Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef __LINUX_SPINLOCK_H |
3 | #define __LINUX_SPINLOCK_H | |
2747b93e | 4 | #define __LINUX_INSIDE_SPINLOCK_H |
1da177e4 LT |
5 | |
6 | /* | |
fb1c8f93 IM |
7 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
8 | * | |
9 | * here's the role of the various spinlock/rwlock related include files: | |
10 | * | |
11 | * on SMP builds: | |
12 | * | |
fb3a6bbc | 13 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
fb1c8f93 IM |
14 | * initializers |
15 | * | |
4f084ca7 TG |
16 | * linux/spinlock_types_raw: |
17 | * The raw types and initializers | |
fb1c8f93 IM |
18 | * linux/spinlock_types.h: |
19 | * defines the generic type and initializers | |
20 | * | |
0199c4e6 | 21 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
fb1c8f93 IM |
22 | * implementations, mostly inline assembly code |
23 | * | |
24 | * (also included on UP-debug builds:) | |
25 | * | |
26 | * linux/spinlock_api_smp.h: | |
27 | * contains the prototypes for the _spin_*() APIs. | |
28 | * | |
29 | * linux/spinlock.h: builds the final spin_*() APIs. | |
30 | * | |
31 | * on UP builds: | |
32 | * | |
33 | * linux/spinlock_type_up.h: | |
34 | * contains the generic, simplified UP spinlock type. | |
35 | * (which is an empty structure on non-debug builds) | |
36 | * | |
4f084ca7 TG |
37 | * linux/spinlock_types_raw: |
38 | * The raw RT types and initializers | |
fb1c8f93 IM |
39 | * linux/spinlock_types.h: |
40 | * defines the generic type and initializers | |
41 | * | |
42 | * linux/spinlock_up.h: | |
0199c4e6 | 43 | * contains the arch_spin_*()/etc. version of UP |
fb1c8f93 IM |
44 | * builds. (which are NOPs on non-debug, non-preempt |
45 | * builds) | |
46 | * | |
47 | * (included on UP-non-debug builds:) | |
48 | * | |
49 | * linux/spinlock_api_up.h: | |
50 | * builds the _spin_*() APIs. | |
51 | * | |
52 | * linux/spinlock.h: builds the final spin_*() APIs. | |
1da177e4 LT |
53 | */ |
54 | ||
3f307891 | 55 | #include <linux/typecheck.h> |
1da177e4 LT |
56 | #include <linux/preempt.h> |
57 | #include <linux/linkage.h> | |
58 | #include <linux/compiler.h> | |
df9ee292 | 59 | #include <linux/irqflags.h> |
1da177e4 | 60 | #include <linux/thread_info.h> |
1da177e4 | 61 | #include <linux/stringify.h> |
676dcb8b | 62 | #include <linux/bottom_half.h> |
c935cd62 | 63 | #include <linux/lockdep.h> |
96f951ed | 64 | #include <asm/barrier.h> |
60ca1e5a | 65 | #include <asm/mmiowb.h> |
1da177e4 | 66 | |
1da177e4 LT |
67 | |
68 | /* | |
69 | * Must define these before including other files, inline functions need them | |
70 | */ | |
75ddb0e8 | 71 | #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME |
1da177e4 LT |
72 | |
73 | #define LOCK_SECTION_START(extra) \ | |
74 | ".subsection 1\n\t" \ | |
75 | extra \ | |
76 | ".ifndef " LOCK_SECTION_NAME "\n\t" \ | |
77 | LOCK_SECTION_NAME ":\n\t" \ | |
78 | ".endif\n" | |
79 | ||
80 | #define LOCK_SECTION_END \ | |
81 | ".previous\n\t" | |
82 | ||
33def849 | 83 | #define __lockfunc __section(".spinlock.text") |
1da177e4 LT |
84 | |
85 | /* | |
fb3a6bbc | 86 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
1da177e4 | 87 | */ |
fb1c8f93 | 88 | #include <linux/spinlock_types.h> |
1da177e4 | 89 | |
1da177e4 | 90 | /* |
25985edc | 91 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): |
1da177e4 | 92 | */ |
8a25d5de | 93 | #ifdef CONFIG_SMP |
fb1c8f93 | 94 | # include <asm/spinlock.h> |
1da177e4 | 95 | #else |
fb1c8f93 | 96 | # include <linux/spinlock_up.h> |
1da177e4 LT |
97 | #endif |
98 | ||
8a25d5de | 99 | #ifdef CONFIG_DEBUG_SPINLOCK |
c2f21ce2 | 100 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
de8f5e4f PZ |
101 | struct lock_class_key *key, short inner); |
102 | ||
103 | # define raw_spin_lock_init(lock) \ | |
104 | do { \ | |
105 | static struct lock_class_key __key; \ | |
106 | \ | |
107 | __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ | |
8a25d5de IM |
108 | } while (0) |
109 | ||
110 | #else | |
c2f21ce2 TG |
111 | # define raw_spin_lock_init(lock) \ |
112 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) | |
8a25d5de IM |
113 | #endif |
114 | ||
c2f21ce2 | 115 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
fb1c8f93 | 116 | |
0199c4e6 | 117 | #ifdef arch_spin_is_contended |
c2f21ce2 | 118 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
a5ef7ca0 | 119 | #else |
c2f21ce2 | 120 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
0199c4e6 | 121 | #endif /*arch_spin_is_contended*/ |
95c354fe | 122 | |
e0acd0a6 | 123 | /* |
3d85b270 AP |
124 | * smp_mb__after_spinlock() provides the equivalent of a full memory barrier |
125 | * between program-order earlier lock acquisitions and program-order later | |
126 | * memory accesses. | |
d89e588c | 127 | * |
3d85b270 | 128 | * This guarantees that the following two properties hold: |
d89e588c | 129 | * |
3d85b270 | 130 | * 1) Given the snippet: |
d89e588c | 131 | * |
3d85b270 | 132 | * { X = 0; Y = 0; } |
d89e588c | 133 | * |
3d85b270 | 134 | * CPU0 CPU1 |
d89e588c | 135 | * |
3d85b270 AP |
136 | * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); |
137 | * spin_lock(S); smp_mb(); | |
138 | * smp_mb__after_spinlock(); r1 = READ_ONCE(X); | |
139 | * r0 = READ_ONCE(Y); | |
140 | * spin_unlock(S); | |
d89e588c | 141 | * |
3d85b270 AP |
142 | * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) |
143 | * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments | |
144 | * preceding the call to smp_mb__after_spinlock() in __schedule() and in | |
145 | * try_to_wake_up(). | |
146 | * | |
147 | * 2) Given the snippet: | |
148 | * | |
149 | * { X = 0; Y = 0; } | |
150 | * | |
151 | * CPU0 CPU1 CPU2 | |
152 | * | |
153 | * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); | |
154 | * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); | |
155 | * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); | |
156 | * WRITE_ONCE(Y, 1); | |
157 | * spin_unlock(S); | |
158 | * | |
159 | * it is forbidden that CPU0's critical section executes before CPU1's | |
160 | * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) | |
161 | * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments | |
162 | * preceding the calls to smp_rmb() in try_to_wake_up() for similar | |
163 | * snippets but "projected" onto two CPUs. | |
164 | * | |
165 | * Property (2) upgrades the lock to an RCsc lock. | |
d89e588c PZ |
166 | * |
167 | * Since most load-store architectures implement ACQUIRE with an smp_mb() after | |
168 | * the LL/SC loop, they need no further barriers. Similarly all our TSO | |
169 | * architectures imply an smp_mb() for each atomic instruction and equally don't | |
170 | * need more. | |
171 | * | |
172 | * Architectures that can implement ACQUIRE better need to take care. | |
e0acd0a6 | 173 | */ |
d89e588c | 174 | #ifndef smp_mb__after_spinlock |
f948666d | 175 | #define smp_mb__after_spinlock() kcsan_mb() |
ad462769 JO |
176 | #endif |
177 | ||
fb1c8f93 | 178 | #ifdef CONFIG_DEBUG_SPINLOCK |
b97c4bc1 | 179 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
9828ea9d | 180 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
b97c4bc1 | 181 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
fb1c8f93 | 182 | #else |
b97c4bc1 | 183 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
c2f21ce2 | 184 | { |
b97c4bc1 | 185 | __acquire(lock); |
c2f21ce2 | 186 | arch_spin_lock(&lock->raw_lock); |
60ca1e5a | 187 | mmiowb_spin_lock(); |
c2f21ce2 TG |
188 | } |
189 | ||
9828ea9d | 190 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
c2f21ce2 | 191 | { |
60ca1e5a WD |
192 | int ret = arch_spin_trylock(&(lock)->raw_lock); |
193 | ||
194 | if (ret) | |
195 | mmiowb_spin_lock(); | |
196 | ||
197 | return ret; | |
c2f21ce2 TG |
198 | } |
199 | ||
b97c4bc1 | 200 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
c2f21ce2 | 201 | { |
60ca1e5a | 202 | mmiowb_spin_unlock(); |
c2f21ce2 | 203 | arch_spin_unlock(&lock->raw_lock); |
b97c4bc1 | 204 | __release(lock); |
c2f21ce2 | 205 | } |
fb1c8f93 | 206 | #endif |
1da177e4 | 207 | |
1da177e4 | 208 | /* |
ef12f109 | 209 | * Define the various spin_lock methods. Note we define these |
27972765 | 210 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The |
ef12f109 TG |
211 | * various methods are defined as nops in the case they are not |
212 | * required. | |
1da177e4 | 213 | */ |
9c1721aa | 214 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
1da177e4 | 215 | |
9c1721aa | 216 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
8a25d5de IM |
217 | |
218 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
9c1721aa TG |
219 | # define raw_spin_lock_nested(lock, subclass) \ |
220 | _raw_spin_lock_nested(lock, subclass) | |
221 | ||
c2f21ce2 | 222 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ |
b7d39aff PZ |
223 | do { \ |
224 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ | |
9c1721aa | 225 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
b7d39aff | 226 | } while (0) |
8a25d5de | 227 | #else |
4999201a BVA |
228 | /* |
229 | * Always evaluate the 'subclass' argument to avoid that the compiler | |
230 | * warns about set-but-not-used variables when building with | |
231 | * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. | |
232 | */ | |
233 | # define raw_spin_lock_nested(lock, subclass) \ | |
234 | _raw_spin_lock(((void)(subclass), (lock))) | |
9c1721aa | 235 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
8a25d5de IM |
236 | #endif |
237 | ||
fb1c8f93 | 238 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
b8e6ec86 | 239 | |
c2f21ce2 | 240 | #define raw_spin_lock_irqsave(lock, flags) \ |
3f307891 SR |
241 | do { \ |
242 | typecheck(unsigned long, flags); \ | |
9c1721aa | 243 | flags = _raw_spin_lock_irqsave(lock); \ |
3f307891 | 244 | } while (0) |
cfd3ef23 AV |
245 | |
246 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
c2f21ce2 | 247 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
3f307891 SR |
248 | do { \ |
249 | typecheck(unsigned long, flags); \ | |
9c1721aa | 250 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
3f307891 | 251 | } while (0) |
cfd3ef23 | 252 | #else |
c2f21ce2 | 253 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
3f307891 SR |
254 | do { \ |
255 | typecheck(unsigned long, flags); \ | |
9c1721aa | 256 | flags = _raw_spin_lock_irqsave(lock); \ |
3f307891 | 257 | } while (0) |
cfd3ef23 AV |
258 | #endif |
259 | ||
1da177e4 | 260 | #else |
b8e6ec86 | 261 | |
c2f21ce2 | 262 | #define raw_spin_lock_irqsave(lock, flags) \ |
3f307891 SR |
263 | do { \ |
264 | typecheck(unsigned long, flags); \ | |
9c1721aa | 265 | _raw_spin_lock_irqsave(lock, flags); \ |
3f307891 | 266 | } while (0) |
ef12f109 | 267 | |
c2f21ce2 TG |
268 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
269 | raw_spin_lock_irqsave(lock, flags) | |
cfd3ef23 | 270 | |
1da177e4 LT |
271 | #endif |
272 | ||
9c1721aa TG |
273 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
274 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) | |
275 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) | |
276 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) | |
1da177e4 | 277 | |
c2f21ce2 TG |
278 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
279 | do { \ | |
280 | typecheck(unsigned long, flags); \ | |
9c1721aa | 281 | _raw_spin_unlock_irqrestore(lock, flags); \ |
3f307891 | 282 | } while (0) |
9c1721aa | 283 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
1da177e4 | 284 | |
9c1721aa TG |
285 | #define raw_spin_trylock_bh(lock) \ |
286 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) | |
1da177e4 | 287 | |
c2f21ce2 | 288 | #define raw_spin_trylock_irq(lock) \ |
1da177e4 LT |
289 | ({ \ |
290 | local_irq_disable(); \ | |
c2f21ce2 | 291 | raw_spin_trylock(lock) ? \ |
fb1c8f93 | 292 | 1 : ({ local_irq_enable(); 0; }); \ |
1da177e4 LT |
293 | }) |
294 | ||
c2f21ce2 | 295 | #define raw_spin_trylock_irqsave(lock, flags) \ |
1da177e4 LT |
296 | ({ \ |
297 | local_irq_save(flags); \ | |
c2f21ce2 | 298 | raw_spin_trylock(lock) ? \ |
fb1c8f93 | 299 | 1 : ({ local_irq_restore(flags); 0; }); \ |
1da177e4 LT |
300 | }) |
301 | ||
342a9324 TG |
302 | #ifndef CONFIG_PREEMPT_RT |
303 | /* Include rwlock functions for !RT */ | |
c2f21ce2 | 304 | #include <linux/rwlock.h> |
342a9324 | 305 | #endif |
c2f21ce2 TG |
306 | |
307 | /* | |
308 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | |
309 | */ | |
310 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
311 | # include <linux/spinlock_api_smp.h> | |
312 | #else | |
313 | # include <linux/spinlock_api_up.h> | |
314 | #endif | |
315 | ||
342a9324 TG |
316 | /* Non PREEMPT_RT kernel, map to raw spinlocks: */ |
317 | #ifndef CONFIG_PREEMPT_RT | |
318 | ||
c2f21ce2 TG |
319 | /* |
320 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n | |
321 | */ | |
322 | ||
3490565b | 323 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
c2f21ce2 TG |
324 | { |
325 | return &lock->rlock; | |
326 | } | |
327 | ||
de8f5e4f PZ |
328 | #ifdef CONFIG_DEBUG_SPINLOCK |
329 | ||
330 | # define spin_lock_init(lock) \ | |
331 | do { \ | |
332 | static struct lock_class_key __key; \ | |
333 | \ | |
334 | __raw_spin_lock_init(spinlock_check(lock), \ | |
335 | #lock, &__key, LD_WAIT_CONFIG); \ | |
336 | } while (0) | |
337 | ||
338 | #else | |
339 | ||
340 | # define spin_lock_init(_lock) \ | |
341 | do { \ | |
342 | spinlock_check(_lock); \ | |
343 | *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \ | |
c2f21ce2 TG |
344 | } while (0) |
345 | ||
de8f5e4f PZ |
346 | #endif |
347 | ||
3490565b | 348 | static __always_inline void spin_lock(spinlock_t *lock) |
c2f21ce2 TG |
349 | { |
350 | raw_spin_lock(&lock->rlock); | |
351 | } | |
352 | ||
3490565b | 353 | static __always_inline void spin_lock_bh(spinlock_t *lock) |
c2f21ce2 TG |
354 | { |
355 | raw_spin_lock_bh(&lock->rlock); | |
356 | } | |
357 | ||
3490565b | 358 | static __always_inline int spin_trylock(spinlock_t *lock) |
c2f21ce2 TG |
359 | { |
360 | return raw_spin_trylock(&lock->rlock); | |
361 | } | |
362 | ||
363 | #define spin_lock_nested(lock, subclass) \ | |
364 | do { \ | |
365 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ | |
366 | } while (0) | |
367 | ||
368 | #define spin_lock_nest_lock(lock, nest_lock) \ | |
369 | do { \ | |
370 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ | |
371 | } while (0) | |
372 | ||
3490565b | 373 | static __always_inline void spin_lock_irq(spinlock_t *lock) |
c2f21ce2 TG |
374 | { |
375 | raw_spin_lock_irq(&lock->rlock); | |
376 | } | |
377 | ||
378 | #define spin_lock_irqsave(lock, flags) \ | |
379 | do { \ | |
380 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ | |
381 | } while (0) | |
382 | ||
383 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | |
384 | do { \ | |
385 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ | |
386 | } while (0) | |
387 | ||
3490565b | 388 | static __always_inline void spin_unlock(spinlock_t *lock) |
c2f21ce2 TG |
389 | { |
390 | raw_spin_unlock(&lock->rlock); | |
391 | } | |
392 | ||
3490565b | 393 | static __always_inline void spin_unlock_bh(spinlock_t *lock) |
c2f21ce2 TG |
394 | { |
395 | raw_spin_unlock_bh(&lock->rlock); | |
396 | } | |
397 | ||
3490565b | 398 | static __always_inline void spin_unlock_irq(spinlock_t *lock) |
c2f21ce2 TG |
399 | { |
400 | raw_spin_unlock_irq(&lock->rlock); | |
401 | } | |
402 | ||
3490565b | 403 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
c2f21ce2 TG |
404 | { |
405 | raw_spin_unlock_irqrestore(&lock->rlock, flags); | |
406 | } | |
407 | ||
3490565b | 408 | static __always_inline int spin_trylock_bh(spinlock_t *lock) |
c2f21ce2 TG |
409 | { |
410 | return raw_spin_trylock_bh(&lock->rlock); | |
411 | } | |
412 | ||
3490565b | 413 | static __always_inline int spin_trylock_irq(spinlock_t *lock) |
c2f21ce2 TG |
414 | { |
415 | return raw_spin_trylock_irq(&lock->rlock); | |
416 | } | |
417 | ||
418 | #define spin_trylock_irqsave(lock, flags) \ | |
419 | ({ \ | |
420 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | |
421 | }) | |
422 | ||
b7e4aade AP |
423 | /** |
424 | * spin_is_locked() - Check whether a spinlock is locked. | |
425 | * @lock: Pointer to the spinlock. | |
426 | * | |
427 | * This function is NOT required to provide any memory ordering | |
428 | * guarantees; it could be used for debugging purposes or, when | |
429 | * additional synchronization is needed, accompanied with other | |
430 | * constructs (memory barriers) enforcing the synchronization. | |
431 | * | |
432 | * Returns: 1 if @lock is locked, 0 otherwise. | |
433 | * | |
434 | * Note that the function only tells you that the spinlock is | |
435 | * seen to be locked, not that it is locked on your CPU. | |
436 | * | |
437 | * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, | |
438 | * the return value is always 0 (see include/linux/spinlock_up.h). | |
439 | * Therefore you should not rely heavily on the return value. | |
440 | */ | |
3490565b | 441 | static __always_inline int spin_is_locked(spinlock_t *lock) |
c2f21ce2 TG |
442 | { |
443 | return raw_spin_is_locked(&lock->rlock); | |
444 | } | |
445 | ||
3490565b | 446 | static __always_inline int spin_is_contended(spinlock_t *lock) |
c2f21ce2 TG |
447 | { |
448 | return raw_spin_is_contended(&lock->rlock); | |
449 | } | |
450 | ||
4ebc1b4b | 451 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
c2f21ce2 | 452 | |
342a9324 TG |
453 | #else /* !CONFIG_PREEMPT_RT */ |
454 | # include <linux/spinlock_rt.h> | |
455 | #endif /* CONFIG_PREEMPT_RT */ | |
456 | ||
1da177e4 | 457 | /* |
fb1c8f93 IM |
458 | * Pull the atomic_t declaration: |
459 | * (asm-mips/atomic.h needs above definitions) | |
1da177e4 | 460 | */ |
60063497 | 461 | #include <linux/atomic.h> |
fb1c8f93 IM |
462 | /** |
463 | * atomic_dec_and_lock - lock on reaching reference count zero | |
464 | * @atomic: the atomic counter | |
465 | * @lock: the spinlock in question | |
dc07e721 BF |
466 | * |
467 | * Decrements @atomic by 1. If the result is 0, returns true and locks | |
468 | * @lock. Returns false for all other cases. | |
1da177e4 | 469 | */ |
fb1c8f93 IM |
470 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
471 | #define atomic_dec_and_lock(atomic, lock) \ | |
dcc8e559 | 472 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
1da177e4 | 473 | |
ccfbb5be AMG |
474 | extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, |
475 | unsigned long *flags); | |
476 | #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ | |
477 | __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) | |
478 | ||
4f64a6c9 JC |
479 | extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock); |
480 | #define atomic_dec_and_raw_lock(atomic, lock) \ | |
481 | __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock)) | |
482 | ||
483 | extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock, | |
484 | unsigned long *flags); | |
485 | #define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \ | |
486 | __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags))) | |
487 | ||
ff93bca7 CW |
488 | int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, |
489 | size_t max_size, unsigned int cpu_mult, | |
490 | gfp_t gfp, const char *name, | |
491 | struct lock_class_key *key); | |
492 | ||
493 | #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ | |
494 | ({ \ | |
495 | static struct lock_class_key key; \ | |
496 | int ret; \ | |
497 | \ | |
498 | ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ | |
499 | cpu_mult, gfp, #locks, &key); \ | |
500 | ret; \ | |
501 | }) | |
92f36cca TH |
502 | |
503 | void free_bucket_spinlocks(spinlock_t *locks); | |
504 | ||
2747b93e | 505 | #undef __LINUX_INSIDE_SPINLOCK_H |
1da177e4 | 506 | #endif /* __LINUX_SPINLOCK_H */ |