Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * Copyright (2004) Linus Torvalds | |
4 | * | |
5 | * Author: Zwane Mwaikambo <zwane@fsmlabs.com> | |
6 | * | |
fb1c8f93 IM |
7 | * Copyright (2004, 2005) Ingo Molnar |
8 | * | |
9 | * This file contains the spinlock/rwlock implementations for the | |
10 | * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) | |
0cb91a22 AK |
11 | * |
12 | * Note that some architectures have special knowledge about the | |
13 | * stack frames of these functions in their profile_pc. If you | |
14 | * change anything significant here that could change the stack | |
15 | * frame contact the architecture maintainers. | |
1da177e4 LT |
16 | */ |
17 | ||
1da177e4 LT |
18 | #include <linux/linkage.h> |
19 | #include <linux/preempt.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/interrupt.h> | |
8a25d5de | 22 | #include <linux/debug_locks.h> |
9984de1a | 23 | #include <linux/export.h> |
1da177e4 | 24 | |
d1be6a28 WD |
25 | #ifdef CONFIG_MMIOWB |
26 | #ifndef arch_mmiowb_state | |
27 | DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state); | |
28 | EXPORT_PER_CPU_SYMBOL(__mmiowb_state); | |
29 | #endif | |
30 | #endif | |
31 | ||
8e13c7b7 TG |
32 | /* |
33 | * If lockdep is enabled then we use the non-preemption spin-ops | |
34 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | |
35 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | |
36 | */ | |
37 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | |
38 | /* | |
39 | * The __lock_function inlines are taken from | |
f791dd25 CJ |
40 | * spinlock : include/linux/spinlock_api_smp.h |
41 | * rwlock : include/linux/rwlock_api_smp.h | |
8e13c7b7 TG |
42 | */ |
43 | #else | |
c14c338c WD |
44 | |
45 | /* | |
46 | * Some architectures can relax in favour of the CPU owning the lock. | |
47 | */ | |
48 | #ifndef arch_read_relax | |
49 | # define arch_read_relax(l) cpu_relax() | |
50 | #endif | |
51 | #ifndef arch_write_relax | |
52 | # define arch_write_relax(l) cpu_relax() | |
53 | #endif | |
54 | #ifndef arch_spin_relax | |
55 | # define arch_spin_relax(l) cpu_relax() | |
56 | #endif | |
57 | ||
8e13c7b7 TG |
58 | /* |
59 | * We build the __lock_function inlines here. They are too large for | |
60 | * inlining all over the place, but here is only one user per function | |
e2db7592 | 61 | * which embeds them into the calling _lock_function below. |
8e13c7b7 TG |
62 | * |
63 | * This could be a long-held lock. We both prepare to spin for a long | |
e2db7592 | 64 | * time (making _this_ CPU preemptible if possible), and we also signal |
8e13c7b7 TG |
65 | * towards that other CPU that it should break the lock ASAP. |
66 | */ | |
67 | #define BUILD_LOCK_OPS(op, locktype) \ | |
9c1721aa | 68 | void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ |
8e13c7b7 TG |
69 | { \ |
70 | for (;;) { \ | |
71 | preempt_disable(); \ | |
9828ea9d | 72 | if (likely(do_raw_##op##_trylock(lock))) \ |
8e13c7b7 TG |
73 | break; \ |
74 | preempt_enable(); \ | |
75 | \ | |
f87f3a32 | 76 | arch_##op##_relax(&lock->raw_lock); \ |
8e13c7b7 | 77 | } \ |
8e13c7b7 TG |
78 | } \ |
79 | \ | |
9c1721aa | 80 | unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ |
8e13c7b7 TG |
81 | { \ |
82 | unsigned long flags; \ | |
83 | \ | |
84 | for (;;) { \ | |
85 | preempt_disable(); \ | |
86 | local_irq_save(flags); \ | |
9828ea9d | 87 | if (likely(do_raw_##op##_trylock(lock))) \ |
8e13c7b7 TG |
88 | break; \ |
89 | local_irq_restore(flags); \ | |
90 | preempt_enable(); \ | |
91 | \ | |
f87f3a32 | 92 | arch_##op##_relax(&lock->raw_lock); \ |
8e13c7b7 | 93 | } \ |
d89c7035 | 94 | \ |
8e13c7b7 TG |
95 | return flags; \ |
96 | } \ | |
97 | \ | |
9c1721aa | 98 | void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ |
8e13c7b7 | 99 | { \ |
9c1721aa | 100 | _raw_##op##_lock_irqsave(lock); \ |
8e13c7b7 TG |
101 | } \ |
102 | \ | |
9c1721aa | 103 | void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ |
8e13c7b7 TG |
104 | { \ |
105 | unsigned long flags; \ | |
106 | \ | |
107 | /* */ \ | |
108 | /* Careful: we must exclude softirqs too, hence the */ \ | |
109 | /* irq-disabling. We use the generic preemption-aware */ \ | |
110 | /* function: */ \ | |
111 | /**/ \ | |
9c1721aa | 112 | flags = _raw_##op##_lock_irqsave(lock); \ |
8e13c7b7 TG |
113 | local_bh_disable(); \ |
114 | local_irq_restore(flags); \ | |
115 | } \ | |
116 | ||
117 | /* | |
118 | * Build preemption-friendly versions of the following | |
119 | * lock-spinning functions: | |
120 | * | |
121 | * __[spin|read|write]_lock() | |
122 | * __[spin|read|write]_lock_irq() | |
123 | * __[spin|read|write]_lock_irqsave() | |
124 | * __[spin|read|write]_lock_bh() | |
125 | */ | |
c2f21ce2 | 126 | BUILD_LOCK_OPS(spin, raw_spinlock); |
8282947f TG |
127 | |
128 | #ifndef CONFIG_PREEMPT_RT | |
8e13c7b7 TG |
129 | BUILD_LOCK_OPS(read, rwlock); |
130 | BUILD_LOCK_OPS(write, rwlock); | |
8282947f | 131 | #endif |
8e13c7b7 TG |
132 | |
133 | #endif | |
134 | ||
6beb0009 | 135 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK |
9c1721aa | 136 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) |
1da177e4 | 137 | { |
9c1721aa | 138 | return __raw_spin_trylock(lock); |
1da177e4 | 139 | } |
9c1721aa | 140 | EXPORT_SYMBOL(_raw_spin_trylock); |
892a7c67 | 141 | #endif |
1da177e4 | 142 | |
b7b40ade | 143 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH |
9c1721aa | 144 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) |
1da177e4 | 145 | { |
9c1721aa | 146 | return __raw_spin_trylock_bh(lock); |
1da177e4 | 147 | } |
9c1721aa | 148 | EXPORT_SYMBOL(_raw_spin_trylock_bh); |
892a7c67 | 149 | #endif |
1da177e4 | 150 | |
b7b40ade | 151 | #ifndef CONFIG_INLINE_SPIN_LOCK |
9c1721aa | 152 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) |
1da177e4 | 153 | { |
9c1721aa | 154 | __raw_spin_lock(lock); |
1da177e4 | 155 | } |
9c1721aa | 156 | EXPORT_SYMBOL(_raw_spin_lock); |
892a7c67 | 157 | #endif |
1da177e4 | 158 | |
6beb0009 | 159 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
9c1721aa | 160 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) |
1da177e4 | 161 | { |
9c1721aa | 162 | return __raw_spin_lock_irqsave(lock); |
1da177e4 | 163 | } |
9c1721aa | 164 | EXPORT_SYMBOL(_raw_spin_lock_irqsave); |
892a7c67 | 165 | #endif |
1da177e4 | 166 | |
6beb0009 | 167 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ |
9c1721aa | 168 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) |
1da177e4 | 169 | { |
9c1721aa | 170 | __raw_spin_lock_irq(lock); |
1da177e4 | 171 | } |
9c1721aa | 172 | EXPORT_SYMBOL(_raw_spin_lock_irq); |
892a7c67 | 173 | #endif |
1da177e4 | 174 | |
6beb0009 | 175 | #ifndef CONFIG_INLINE_SPIN_LOCK_BH |
9c1721aa | 176 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) |
1da177e4 | 177 | { |
9c1721aa | 178 | __raw_spin_lock_bh(lock); |
1da177e4 | 179 | } |
9c1721aa | 180 | EXPORT_SYMBOL(_raw_spin_lock_bh); |
892a7c67 | 181 | #endif |
1da177e4 | 182 | |
e335e3eb | 183 | #ifdef CONFIG_UNINLINE_SPIN_UNLOCK |
9c1721aa | 184 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4 | 185 | { |
9c1721aa | 186 | __raw_spin_unlock(lock); |
1da177e4 | 187 | } |
9c1721aa | 188 | EXPORT_SYMBOL(_raw_spin_unlock); |
892a7c67 | 189 | #endif |
1da177e4 | 190 | |
b7b40ade | 191 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
9c1721aa | 192 | void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) |
1da177e4 | 193 | { |
9c1721aa | 194 | __raw_spin_unlock_irqrestore(lock, flags); |
1da177e4 | 195 | } |
9c1721aa | 196 | EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); |
892a7c67 | 197 | #endif |
1da177e4 | 198 | |
b7b40ade | 199 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
9c1721aa | 200 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) |
1da177e4 | 201 | { |
9c1721aa | 202 | __raw_spin_unlock_irq(lock); |
1da177e4 | 203 | } |
9c1721aa | 204 | EXPORT_SYMBOL(_raw_spin_unlock_irq); |
892a7c67 | 205 | #endif |
1da177e4 | 206 | |
b7b40ade | 207 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH |
9c1721aa | 208 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) |
1da177e4 | 209 | { |
9c1721aa | 210 | __raw_spin_unlock_bh(lock); |
1da177e4 | 211 | } |
9c1721aa | 212 | EXPORT_SYMBOL(_raw_spin_unlock_bh); |
892a7c67 | 213 | #endif |
1da177e4 | 214 | |
8282947f TG |
215 | #ifndef CONFIG_PREEMPT_RT |
216 | ||
b7b40ade | 217 | #ifndef CONFIG_INLINE_READ_TRYLOCK |
9c1721aa | 218 | int __lockfunc _raw_read_trylock(rwlock_t *lock) |
1da177e4 | 219 | { |
9c1721aa | 220 | return __raw_read_trylock(lock); |
1da177e4 | 221 | } |
9c1721aa | 222 | EXPORT_SYMBOL(_raw_read_trylock); |
892a7c67 | 223 | #endif |
1da177e4 | 224 | |
b7b40ade | 225 | #ifndef CONFIG_INLINE_READ_LOCK |
9c1721aa | 226 | void __lockfunc _raw_read_lock(rwlock_t *lock) |
1da177e4 | 227 | { |
9c1721aa | 228 | __raw_read_lock(lock); |
1da177e4 | 229 | } |
9c1721aa | 230 | EXPORT_SYMBOL(_raw_read_lock); |
892a7c67 | 231 | #endif |
1da177e4 | 232 | |
b7b40ade | 233 | #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE |
9c1721aa | 234 | unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) |
1da177e4 | 235 | { |
9c1721aa | 236 | return __raw_read_lock_irqsave(lock); |
1da177e4 | 237 | } |
9c1721aa | 238 | EXPORT_SYMBOL(_raw_read_lock_irqsave); |
892a7c67 | 239 | #endif |
1da177e4 | 240 | |
b7b40ade | 241 | #ifndef CONFIG_INLINE_READ_LOCK_IRQ |
9c1721aa | 242 | void __lockfunc _raw_read_lock_irq(rwlock_t *lock) |
1da177e4 | 243 | { |
9c1721aa | 244 | __raw_read_lock_irq(lock); |
1da177e4 | 245 | } |
9c1721aa | 246 | EXPORT_SYMBOL(_raw_read_lock_irq); |
892a7c67 | 247 | #endif |
1da177e4 | 248 | |
b7b40ade | 249 | #ifndef CONFIG_INLINE_READ_LOCK_BH |
9c1721aa | 250 | void __lockfunc _raw_read_lock_bh(rwlock_t *lock) |
1da177e4 | 251 | { |
9c1721aa | 252 | __raw_read_lock_bh(lock); |
1da177e4 | 253 | } |
9c1721aa | 254 | EXPORT_SYMBOL(_raw_read_lock_bh); |
892a7c67 | 255 | #endif |
1da177e4 | 256 | |
6beb0009 | 257 | #ifndef CONFIG_INLINE_READ_UNLOCK |
9c1721aa | 258 | void __lockfunc _raw_read_unlock(rwlock_t *lock) |
1da177e4 | 259 | { |
9c1721aa | 260 | __raw_read_unlock(lock); |
1da177e4 | 261 | } |
9c1721aa | 262 | EXPORT_SYMBOL(_raw_read_unlock); |
892a7c67 | 263 | #endif |
1da177e4 | 264 | |
6beb0009 | 265 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE |
9c1721aa | 266 | void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
1da177e4 | 267 | { |
9c1721aa | 268 | __raw_read_unlock_irqrestore(lock, flags); |
1da177e4 | 269 | } |
9c1721aa | 270 | EXPORT_SYMBOL(_raw_read_unlock_irqrestore); |
892a7c67 | 271 | #endif |
1da177e4 | 272 | |
6beb0009 | 273 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ |
9c1721aa | 274 | void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) |
1da177e4 | 275 | { |
9c1721aa | 276 | __raw_read_unlock_irq(lock); |
1da177e4 | 277 | } |
9c1721aa | 278 | EXPORT_SYMBOL(_raw_read_unlock_irq); |
892a7c67 | 279 | #endif |
1da177e4 | 280 | |
6beb0009 | 281 | #ifndef CONFIG_INLINE_READ_UNLOCK_BH |
9c1721aa | 282 | void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) |
1da177e4 | 283 | { |
9c1721aa | 284 | __raw_read_unlock_bh(lock); |
1da177e4 | 285 | } |
9c1721aa | 286 | EXPORT_SYMBOL(_raw_read_unlock_bh); |
892a7c67 | 287 | #endif |
1da177e4 | 288 | |
b7b40ade | 289 | #ifndef CONFIG_INLINE_WRITE_TRYLOCK |
9c1721aa | 290 | int __lockfunc _raw_write_trylock(rwlock_t *lock) |
b7b40ade | 291 | { |
9c1721aa | 292 | return __raw_write_trylock(lock); |
b7b40ade | 293 | } |
9c1721aa | 294 | EXPORT_SYMBOL(_raw_write_trylock); |
b7b40ade TG |
295 | #endif |
296 | ||
297 | #ifndef CONFIG_INLINE_WRITE_LOCK | |
9c1721aa | 298 | void __lockfunc _raw_write_lock(rwlock_t *lock) |
b7b40ade | 299 | { |
9c1721aa | 300 | __raw_write_lock(lock); |
b7b40ade | 301 | } |
9c1721aa | 302 | EXPORT_SYMBOL(_raw_write_lock); |
b7b40ade TG |
303 | #endif |
304 | ||
305 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | |
9c1721aa | 306 | unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) |
b7b40ade | 307 | { |
9c1721aa | 308 | return __raw_write_lock_irqsave(lock); |
b7b40ade | 309 | } |
9c1721aa | 310 | EXPORT_SYMBOL(_raw_write_lock_irqsave); |
b7b40ade TG |
311 | #endif |
312 | ||
313 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ | |
9c1721aa | 314 | void __lockfunc _raw_write_lock_irq(rwlock_t *lock) |
b7b40ade | 315 | { |
9c1721aa | 316 | __raw_write_lock_irq(lock); |
b7b40ade | 317 | } |
9c1721aa | 318 | EXPORT_SYMBOL(_raw_write_lock_irq); |
b7b40ade TG |
319 | #endif |
320 | ||
321 | #ifndef CONFIG_INLINE_WRITE_LOCK_BH | |
9c1721aa | 322 | void __lockfunc _raw_write_lock_bh(rwlock_t *lock) |
b7b40ade | 323 | { |
9c1721aa | 324 | __raw_write_lock_bh(lock); |
b7b40ade | 325 | } |
9c1721aa | 326 | EXPORT_SYMBOL(_raw_write_lock_bh); |
b7b40ade TG |
327 | #endif |
328 | ||
329 | #ifndef CONFIG_INLINE_WRITE_UNLOCK | |
9c1721aa | 330 | void __lockfunc _raw_write_unlock(rwlock_t *lock) |
b7b40ade | 331 | { |
9c1721aa | 332 | __raw_write_unlock(lock); |
b7b40ade | 333 | } |
9c1721aa | 334 | EXPORT_SYMBOL(_raw_write_unlock); |
b7b40ade TG |
335 | #endif |
336 | ||
6beb0009 | 337 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE |
9c1721aa | 338 | void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
1da177e4 | 339 | { |
9c1721aa | 340 | __raw_write_unlock_irqrestore(lock, flags); |
1da177e4 | 341 | } |
9c1721aa | 342 | EXPORT_SYMBOL(_raw_write_unlock_irqrestore); |
892a7c67 | 343 | #endif |
1da177e4 | 344 | |
6beb0009 | 345 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ |
9c1721aa | 346 | void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) |
1da177e4 | 347 | { |
9c1721aa | 348 | __raw_write_unlock_irq(lock); |
1da177e4 | 349 | } |
9c1721aa | 350 | EXPORT_SYMBOL(_raw_write_unlock_irq); |
892a7c67 | 351 | #endif |
1da177e4 | 352 | |
6beb0009 | 353 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH |
9c1721aa | 354 | void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) |
1da177e4 | 355 | { |
9c1721aa | 356 | __raw_write_unlock_bh(lock); |
1da177e4 | 357 | } |
9c1721aa | 358 | EXPORT_SYMBOL(_raw_write_unlock_bh); |
892a7c67 | 359 | #endif |
1da177e4 | 360 | |
8282947f TG |
361 | #endif /* !CONFIG_PREEMPT_RT */ |
362 | ||
b7b40ade TG |
363 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
364 | ||
9c1721aa | 365 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) |
1da177e4 | 366 | { |
b7b40ade TG |
367 | preempt_disable(); |
368 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | |
9828ea9d | 369 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
1da177e4 | 370 | } |
9c1721aa | 371 | EXPORT_SYMBOL(_raw_spin_lock_nested); |
b7b40ade | 372 | |
9c1721aa | 373 | unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, |
b7b40ade TG |
374 | int subclass) |
375 | { | |
376 | unsigned long flags; | |
377 | ||
378 | local_irq_save(flags); | |
379 | preempt_disable(); | |
380 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | |
9828ea9d TG |
381 | LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock, |
382 | do_raw_spin_lock_flags, &flags); | |
b7b40ade TG |
383 | return flags; |
384 | } | |
9c1721aa | 385 | EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested); |
b7b40ade | 386 | |
9c1721aa | 387 | void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, |
b7b40ade TG |
388 | struct lockdep_map *nest_lock) |
389 | { | |
390 | preempt_disable(); | |
391 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); | |
9828ea9d | 392 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
b7b40ade | 393 | } |
9c1721aa | 394 | EXPORT_SYMBOL(_raw_spin_lock_nest_lock); |
b7b40ade | 395 | |
892a7c67 | 396 | #endif |
1da177e4 | 397 | |
0764d23c | 398 | notrace int in_lock_functions(unsigned long addr) |
1da177e4 LT |
399 | { |
400 | /* Linker adds these: start and end of __lockfunc functions */ | |
401 | extern char __lock_text_start[], __lock_text_end[]; | |
402 | ||
403 | return addr >= (unsigned long)__lock_text_start | |
404 | && addr < (unsigned long)__lock_text_end; | |
405 | } | |
406 | EXPORT_SYMBOL(in_lock_functions); |