Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
6053ee3b | 2 | /* |
67a6de49 | 3 | * kernel/locking/mutex.c |
6053ee3b IM |
4 | * |
5 | * Mutexes: blocking mutual exclusion locks | |
6 | * | |
7 | * Started by Ingo Molnar: | |
8 | * | |
9 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
10 | * | |
11 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | |
12 | * David Howells for suggestions and improvements. | |
13 | * | |
0d66bf6d PZ |
14 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
15 | * from the -rt tree, where it was originally implemented for rtmutexes | |
16 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale | |
17 | * and Sven Dietrich. | |
18 | * | |
387b1468 | 19 | * Also see Documentation/locking/mutex-design.rst. |
6053ee3b IM |
20 | */ |
21 | #include <linux/mutex.h> | |
1b375dc3 | 22 | #include <linux/ww_mutex.h> |
174cd4b1 | 23 | #include <linux/sched/signal.h> |
8bd75c77 | 24 | #include <linux/sched/rt.h> |
84f001e1 | 25 | #include <linux/sched/wake_q.h> |
b17b0153 | 26 | #include <linux/sched/debug.h> |
9984de1a | 27 | #include <linux/export.h> |
6053ee3b IM |
28 | #include <linux/spinlock.h> |
29 | #include <linux/interrupt.h> | |
9a11b49a | 30 | #include <linux/debug_locks.h> |
7a215f89 | 31 | #include <linux/osq_lock.h> |
6053ee3b | 32 | |
16edd9b5 NK |
33 | #define CREATE_TRACE_POINTS |
34 | #include <trace/events/lock.h> | |
35 | ||
bb630f9f | 36 | #ifndef CONFIG_PREEMPT_RT |
a321fb90 TG |
37 | #include "mutex.h" |
38 | ||
6053ee3b | 39 | #ifdef CONFIG_DEBUG_MUTEXES |
e6b4457b | 40 | # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond) |
6053ee3b | 41 | #else |
e6b4457b | 42 | # define MUTEX_WARN_ON(cond) |
6053ee3b IM |
43 | #endif |
44 | ||
ef5d4707 IM |
45 | void |
46 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |
6053ee3b | 47 | { |
3ca0ff57 | 48 | atomic_long_set(&lock->owner, 0); |
ebf4c55c | 49 | raw_spin_lock_init(&lock->wait_lock); |
6053ee3b | 50 | INIT_LIST_HEAD(&lock->wait_list); |
2bd2c92c | 51 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
4d9d951e | 52 | osq_lock_init(&lock->osq); |
2bd2c92c | 53 | #endif |
6053ee3b | 54 | |
ef5d4707 | 55 | debug_mutex_init(lock, name, key); |
6053ee3b | 56 | } |
6053ee3b IM |
57 | EXPORT_SYMBOL(__mutex_init); |
58 | ||
3ca0ff57 PZ |
59 | /* |
60 | * @owner: contains: 'struct task_struct *' to the current lock owner, | |
61 | * NULL means not owned. Since task_struct pointers are aligned at | |
e274795e | 62 | * at least L1_CACHE_BYTES, we have low bits to store extra state. |
3ca0ff57 PZ |
63 | * |
64 | * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. | |
9d659ae1 | 65 | * Bit1 indicates unlock needs to hand the lock to the top-waiter |
e274795e | 66 | * Bit2 indicates handoff has been done and we're waiting for pickup. |
3ca0ff57 PZ |
67 | */ |
68 | #define MUTEX_FLAG_WAITERS 0x01 | |
9d659ae1 | 69 | #define MUTEX_FLAG_HANDOFF 0x02 |
e274795e | 70 | #define MUTEX_FLAG_PICKUP 0x04 |
3ca0ff57 | 71 | |
e274795e | 72 | #define MUTEX_FLAGS 0x07 |
3ca0ff57 | 73 | |
5f35d5a6 MO |
74 | /* |
75 | * Internal helper function; C doesn't allow us to hide it :/ | |
76 | * | |
77 | * DO NOT USE (outside of mutex code). | |
78 | */ | |
79 | static inline struct task_struct *__mutex_owner(struct mutex *lock) | |
80 | { | |
a037d269 | 81 | return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); |
5f35d5a6 MO |
82 | } |
83 | ||
3ca0ff57 PZ |
84 | static inline struct task_struct *__owner_task(unsigned long owner) |
85 | { | |
86 | return (struct task_struct *)(owner & ~MUTEX_FLAGS); | |
87 | } | |
88 | ||
5f35d5a6 MO |
89 | bool mutex_is_locked(struct mutex *lock) |
90 | { | |
91 | return __mutex_owner(lock) != NULL; | |
92 | } | |
93 | EXPORT_SYMBOL(mutex_is_locked); | |
94 | ||
3ca0ff57 PZ |
95 | static inline unsigned long __owner_flags(unsigned long owner) |
96 | { | |
97 | return owner & MUTEX_FLAGS; | |
98 | } | |
99 | ||
12235da8 ML |
100 | /* |
101 | * Returns: __mutex_owner(lock) on failure or NULL on success. | |
102 | */ | |
ad90880d | 103 | static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff) |
3ca0ff57 PZ |
104 | { |
105 | unsigned long owner, curr = (unsigned long)current; | |
106 | ||
107 | owner = atomic_long_read(&lock->owner); | |
108 | for (;;) { /* must loop, can race against a flag */ | |
ab4e4d9f | 109 | unsigned long flags = __owner_flags(owner); |
e274795e | 110 | unsigned long task = owner & ~MUTEX_FLAGS; |
9d659ae1 | 111 | |
e274795e | 112 | if (task) { |
ad90880d PZ |
113 | if (flags & MUTEX_FLAG_PICKUP) { |
114 | if (task != curr) | |
115 | break; | |
116 | flags &= ~MUTEX_FLAG_PICKUP; | |
117 | } else if (handoff) { | |
118 | if (flags & MUTEX_FLAG_HANDOFF) | |
119 | break; | |
120 | flags |= MUTEX_FLAG_HANDOFF; | |
121 | } else { | |
e274795e | 122 | break; |
ad90880d | 123 | } |
e274795e | 124 | } else { |
e6b4457b | 125 | MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP)); |
ad90880d | 126 | task = curr; |
9d659ae1 PZ |
127 | } |
128 | ||
ad90880d PZ |
129 | if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) { |
130 | if (task == curr) | |
131 | return NULL; | |
132 | break; | |
133 | } | |
3ca0ff57 | 134 | } |
e274795e PZ |
135 | |
136 | return __owner_task(owner); | |
137 | } | |
138 | ||
ad90880d PZ |
139 | /* |
140 | * Trylock or set HANDOFF | |
141 | */ | |
142 | static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff) | |
143 | { | |
144 | return !__mutex_trylock_common(lock, handoff); | |
145 | } | |
146 | ||
e274795e PZ |
147 | /* |
148 | * Actual trylock that will work on any unlocked state. | |
149 | */ | |
150 | static inline bool __mutex_trylock(struct mutex *lock) | |
151 | { | |
ad90880d | 152 | return !__mutex_trylock_common(lock, false); |
3ca0ff57 PZ |
153 | } |
154 | ||
155 | #ifndef CONFIG_DEBUG_LOCK_ALLOC | |
156 | /* | |
157 | * Lockdep annotations are contained to the slow paths for simplicity. | |
158 | * There is nothing that would stop spreading the lockdep annotations outwards | |
159 | * except more code. | |
160 | */ | |
161 | ||
162 | /* | |
163 | * Optimistic trylock that only works in the uncontended case. Make sure to | |
164 | * follow with a __mutex_trylock() before failing. | |
165 | */ | |
166 | static __always_inline bool __mutex_trylock_fast(struct mutex *lock) | |
167 | { | |
168 | unsigned long curr = (unsigned long)current; | |
c427f695 | 169 | unsigned long zero = 0UL; |
3ca0ff57 | 170 | |
c427f695 | 171 | if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) |
3ca0ff57 PZ |
172 | return true; |
173 | ||
174 | return false; | |
175 | } | |
176 | ||
177 | static __always_inline bool __mutex_unlock_fast(struct mutex *lock) | |
178 | { | |
179 | unsigned long curr = (unsigned long)current; | |
180 | ||
ab4e4d9f | 181 | return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); |
3ca0ff57 PZ |
182 | } |
183 | #endif | |
184 | ||
185 | static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) | |
186 | { | |
187 | atomic_long_or(flag, &lock->owner); | |
188 | } | |
189 | ||
190 | static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) | |
191 | { | |
192 | atomic_long_andnot(flag, &lock->owner); | |
193 | } | |
194 | ||
9d659ae1 PZ |
195 | static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) |
196 | { | |
197 | return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; | |
198 | } | |
199 | ||
08295b3b TH |
200 | /* |
201 | * Add @waiter to a given location in the lock wait_list and set the | |
202 | * FLAG_WAITERS flag if it's the first waiter. | |
203 | */ | |
3a010c49 | 204 | static void |
08295b3b TH |
205 | __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
206 | struct list_head *list) | |
207 | { | |
208 | debug_mutex_add_waiter(lock, waiter, current); | |
209 | ||
210 | list_add_tail(&waiter->list, list); | |
211 | if (__mutex_waiter_is_first(lock, waiter)) | |
212 | __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); | |
213 | } | |
214 | ||
3a010c49 Z |
215 | static void |
216 | __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) | |
217 | { | |
218 | list_del(&waiter->list); | |
219 | if (likely(list_empty(&lock->wait_list))) | |
220 | __mutex_clear_flag(lock, MUTEX_FLAGS); | |
221 | ||
222 | debug_mutex_remove_waiter(lock, waiter, current); | |
223 | } | |
224 | ||
9d659ae1 PZ |
225 | /* |
226 | * Give up ownership to a specific task, when @task = NULL, this is equivalent | |
e2db7592 | 227 | * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves |
e274795e PZ |
228 | * WAITERS. Provides RELEASE semantics like a regular unlock, the |
229 | * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. | |
9d659ae1 PZ |
230 | */ |
231 | static void __mutex_handoff(struct mutex *lock, struct task_struct *task) | |
232 | { | |
233 | unsigned long owner = atomic_long_read(&lock->owner); | |
234 | ||
235 | for (;;) { | |
ab4e4d9f | 236 | unsigned long new; |
9d659ae1 | 237 | |
e6b4457b PZ |
238 | MUTEX_WARN_ON(__owner_task(owner) != current); |
239 | MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); | |
9d659ae1 PZ |
240 | |
241 | new = (owner & MUTEX_FLAG_WAITERS); | |
242 | new |= (unsigned long)task; | |
e274795e PZ |
243 | if (task) |
244 | new |= MUTEX_FLAG_PICKUP; | |
9d659ae1 | 245 | |
ab4e4d9f | 246 | if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new)) |
9d659ae1 | 247 | break; |
9d659ae1 PZ |
248 | } |
249 | } | |
250 | ||
e4564f79 | 251 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
252 | /* |
253 | * We split the mutex lock/unlock logic into separate fastpath and | |
254 | * slowpath functions, to reduce the register pressure on the fastpath. | |
255 | * We also put the fastpath first in the kernel image, to make sure the | |
256 | * branch is predicted by the CPU as default-untaken. | |
257 | */ | |
3ca0ff57 | 258 | static void __sched __mutex_lock_slowpath(struct mutex *lock); |
6053ee3b | 259 | |
ef5dc121 | 260 | /** |
6053ee3b IM |
261 | * mutex_lock - acquire the mutex |
262 | * @lock: the mutex to be acquired | |
263 | * | |
264 | * Lock the mutex exclusively for this task. If the mutex is not | |
265 | * available right now, it will sleep until it can get it. | |
266 | * | |
267 | * The mutex must later on be released by the same task that | |
268 | * acquired it. Recursive locking is not allowed. The task | |
269 | * may not exit without first unlocking the mutex. Also, kernel | |
139b6fd2 | 270 | * memory where the mutex resides must not be freed with |
6053ee3b IM |
271 | * the mutex still locked. The mutex must first be initialized |
272 | * (or statically defined) before it can be locked. memset()-ing | |
273 | * the mutex to 0 is not allowed. | |
274 | * | |
7b4ff1ad MCC |
275 | * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging |
276 | * checks that will enforce the restrictions and will also do | |
277 | * deadlock debugging) | |
6053ee3b IM |
278 | * |
279 | * This function is similar to (but not equivalent to) down(). | |
280 | */ | |
b09d2501 | 281 | void __sched mutex_lock(struct mutex *lock) |
6053ee3b | 282 | { |
c544bdb1 | 283 | might_sleep(); |
6053ee3b | 284 | |
3ca0ff57 PZ |
285 | if (!__mutex_trylock_fast(lock)) |
286 | __mutex_lock_slowpath(lock); | |
287 | } | |
6053ee3b | 288 | EXPORT_SYMBOL(mutex_lock); |
e4564f79 | 289 | #endif |
6053ee3b | 290 | |
2674bd18 | 291 | #include "ww_mutex.h" |
76916515 | 292 | |
41fcb9f2 | 293 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
c516df97 | 294 | |
ad90880d PZ |
295 | /* |
296 | * Trylock variant that returns the owning task on failure. | |
297 | */ | |
298 | static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) | |
299 | { | |
300 | return __mutex_trylock_common(lock, false); | |
301 | } | |
302 | ||
c516df97 NH |
303 | static inline |
304 | bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, | |
305 | struct mutex_waiter *waiter) | |
306 | { | |
307 | struct ww_mutex *ww; | |
308 | ||
309 | ww = container_of(lock, struct ww_mutex, base); | |
4bd19084 DB |
310 | |
311 | /* | |
c516df97 NH |
312 | * If ww->ctx is set the contents are undefined, only |
313 | * by acquiring wait_lock there is a guarantee that | |
314 | * they are not invalid when reading. | |
315 | * | |
316 | * As such, when deadlock detection needs to be | |
317 | * performed the optimistic spinning cannot be done. | |
318 | * | |
319 | * Check this in every inner iteration because we may | |
320 | * be racing against another thread's ww_mutex_lock. | |
4bd19084 | 321 | */ |
c516df97 NH |
322 | if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) |
323 | return false; | |
324 | ||
325 | /* | |
326 | * If we aren't on the wait list yet, cancel the spin | |
327 | * if there are waiters. We want to avoid stealing the | |
328 | * lock from a waiter with an earlier stamp, since the | |
329 | * other thread may already own a lock that we also | |
330 | * need. | |
331 | */ | |
332 | if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) | |
333 | return false; | |
334 | ||
335 | /* | |
336 | * Similarly, stop spinning if we are no longer the | |
337 | * first waiter. | |
338 | */ | |
339 | if (waiter && !__mutex_waiter_is_first(lock, waiter)) | |
340 | return false; | |
341 | ||
342 | return true; | |
4bd19084 | 343 | } |
76916515 | 344 | |
41fcb9f2 | 345 | /* |
25f13b40 NH |
346 | * Look out! "owner" is an entirely speculative pointer access and not |
347 | * reliable. | |
348 | * | |
349 | * "noinline" so that this function shows up on perf profiles. | |
41fcb9f2 WL |
350 | */ |
351 | static noinline | |
25f13b40 | 352 | bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, |
c516df97 | 353 | struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) |
41fcb9f2 | 354 | { |
01ac33c1 | 355 | bool ret = true; |
be1f7bf2 | 356 | |
6c2787f2 YX |
357 | lockdep_assert_preemption_disabled(); |
358 | ||
3ca0ff57 | 359 | while (__mutex_owner(lock) == owner) { |
be1f7bf2 JL |
360 | /* |
361 | * Ensure we emit the owner->on_cpu, dereference _after_ | |
6c2787f2 YX |
362 | * checking lock->owner still matches owner. And we already |
363 | * disabled preemption which is equal to the RCU read-side | |
364 | * crital section in optimistic spinning code. Thus the | |
365 | * task_strcut structure won't go away during the spinning | |
366 | * period | |
be1f7bf2 JL |
367 | */ |
368 | barrier(); | |
369 | ||
05ffc951 PX |
370 | /* |
371 | * Use vcpu_is_preempted to detect lock holder preemption issue. | |
372 | */ | |
c0bed69d | 373 | if (!owner_on_cpu(owner) || need_resched()) { |
be1f7bf2 JL |
374 | ret = false; |
375 | break; | |
376 | } | |
41fcb9f2 | 377 | |
c516df97 NH |
378 | if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { |
379 | ret = false; | |
380 | break; | |
25f13b40 NH |
381 | } |
382 | ||
f2f09a4c | 383 | cpu_relax(); |
41fcb9f2 | 384 | } |
41fcb9f2 | 385 | |
be1f7bf2 | 386 | return ret; |
41fcb9f2 | 387 | } |
2bd2c92c WL |
388 | |
389 | /* | |
390 | * Initial check for entering the mutex spinning loop | |
391 | */ | |
392 | static inline int mutex_can_spin_on_owner(struct mutex *lock) | |
393 | { | |
1e40c2ed | 394 | struct task_struct *owner; |
2bd2c92c WL |
395 | int retval = 1; |
396 | ||
6c2787f2 YX |
397 | lockdep_assert_preemption_disabled(); |
398 | ||
46af29e4 JL |
399 | if (need_resched()) |
400 | return 0; | |
401 | ||
6c2787f2 YX |
402 | /* |
403 | * We already disabled preemption which is equal to the RCU read-side | |
404 | * crital section in optimistic spinning code. Thus the task_strcut | |
405 | * structure won't go away during the spinning period. | |
406 | */ | |
3ca0ff57 | 407 | owner = __mutex_owner(lock); |
1e40c2ed | 408 | if (owner) |
c0bed69d | 409 | retval = owner_on_cpu(owner); |
3ca0ff57 | 410 | |
2bd2c92c | 411 | /* |
3ca0ff57 PZ |
412 | * If lock->owner is not set, the mutex has been released. Return true |
413 | * such that we'll trylock in the spin path, which is a faster option | |
414 | * than the blocking slow path. | |
2bd2c92c WL |
415 | */ |
416 | return retval; | |
417 | } | |
76916515 | 418 | |
76916515 DB |
419 | /* |
420 | * Optimistic spinning. | |
421 | * | |
422 | * We try to spin for acquisition when we find that the lock owner | |
423 | * is currently running on a (different) CPU and while we don't | |
424 | * need to reschedule. The rationale is that if the lock owner is | |
425 | * running, it is likely to release the lock soon. | |
426 | * | |
76916515 DB |
427 | * The mutex spinners are queued up using MCS lock so that only one |
428 | * spinner can compete for the mutex. However, if mutex spinning isn't | |
429 | * going to happen, there is no point in going through the lock/unlock | |
430 | * overhead. | |
431 | * | |
432 | * Returns true when the lock was taken, otherwise false, indicating | |
433 | * that we need to jump to the slowpath and sleep. | |
b341afb3 WL |
434 | * |
435 | * The waiter flag is set to true if the spinner is a waiter in the wait | |
436 | * queue. The waiter-spinner will spin on the lock directly and concurrently | |
437 | * with the spinner at the head of the OSQ, if present, until the owner is | |
438 | * changed to itself. | |
76916515 | 439 | */ |
427b1820 PZ |
440 | static __always_inline bool |
441 | mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, | |
5de2055d | 442 | struct mutex_waiter *waiter) |
76916515 | 443 | { |
b341afb3 WL |
444 | if (!waiter) { |
445 | /* | |
446 | * The purpose of the mutex_can_spin_on_owner() function is | |
447 | * to eliminate the overhead of osq_lock() and osq_unlock() | |
448 | * in case spinning isn't possible. As a waiter-spinner | |
449 | * is not going to take OSQ lock anyway, there is no need | |
450 | * to call mutex_can_spin_on_owner(). | |
451 | */ | |
452 | if (!mutex_can_spin_on_owner(lock)) | |
453 | goto fail; | |
76916515 | 454 | |
b341afb3 WL |
455 | /* |
456 | * In order to avoid a stampede of mutex spinners trying to | |
457 | * acquire the mutex all at once, the spinners need to take a | |
458 | * MCS (queued) lock first before spinning on the owner field. | |
459 | */ | |
460 | if (!osq_lock(&lock->osq)) | |
461 | goto fail; | |
462 | } | |
76916515 | 463 | |
b341afb3 | 464 | for (;;) { |
76916515 DB |
465 | struct task_struct *owner; |
466 | ||
e274795e PZ |
467 | /* Try to acquire the mutex... */ |
468 | owner = __mutex_trylock_or_owner(lock); | |
469 | if (!owner) | |
470 | break; | |
76916515 DB |
471 | |
472 | /* | |
e274795e | 473 | * There's an owner, wait for it to either |
76916515 DB |
474 | * release the lock or go to sleep. |
475 | */ | |
c516df97 | 476 | if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) |
e274795e | 477 | goto fail_unlock; |
b341afb3 | 478 | |
76916515 DB |
479 | /* |
480 | * The cpu_relax() call is a compiler barrier which forces | |
481 | * everything in this loop to be re-loaded. We don't need | |
482 | * memory barriers as we'll eventually observe the right | |
483 | * values at the cost of a few extra spins. | |
484 | */ | |
f2f09a4c | 485 | cpu_relax(); |
76916515 DB |
486 | } |
487 | ||
b341afb3 WL |
488 | if (!waiter) |
489 | osq_unlock(&lock->osq); | |
490 | ||
491 | return true; | |
492 | ||
493 | ||
494 | fail_unlock: | |
495 | if (!waiter) | |
496 | osq_unlock(&lock->osq); | |
497 | ||
498 | fail: | |
76916515 DB |
499 | /* |
500 | * If we fell out of the spin path because of need_resched(), | |
501 | * reschedule now, before we try-lock the mutex. This avoids getting | |
502 | * scheduled out right after we obtained the mutex. | |
503 | */ | |
6f942a1f PZ |
504 | if (need_resched()) { |
505 | /* | |
506 | * We _should_ have TASK_RUNNING here, but just in case | |
507 | * we do not, make it so, otherwise we might get stuck. | |
508 | */ | |
509 | __set_current_state(TASK_RUNNING); | |
76916515 | 510 | schedule_preempt_disabled(); |
6f942a1f | 511 | } |
76916515 DB |
512 | |
513 | return false; | |
514 | } | |
515 | #else | |
427b1820 PZ |
516 | static __always_inline bool |
517 | mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, | |
5de2055d | 518 | struct mutex_waiter *waiter) |
76916515 DB |
519 | { |
520 | return false; | |
521 | } | |
41fcb9f2 WL |
522 | #endif |
523 | ||
3ca0ff57 | 524 | static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); |
6053ee3b | 525 | |
ef5dc121 | 526 | /** |
6053ee3b IM |
527 | * mutex_unlock - release the mutex |
528 | * @lock: the mutex to be released | |
529 | * | |
530 | * Unlock a mutex that has been locked by this task previously. | |
531 | * | |
532 | * This function must not be used in interrupt context. Unlocking | |
533 | * of a not locked mutex is not allowed. | |
534 | * | |
535 | * This function is similar to (but not equivalent to) up(). | |
536 | */ | |
7ad5b3a5 | 537 | void __sched mutex_unlock(struct mutex *lock) |
6053ee3b | 538 | { |
3ca0ff57 PZ |
539 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
540 | if (__mutex_unlock_fast(lock)) | |
541 | return; | |
0d66bf6d | 542 | #endif |
3ca0ff57 | 543 | __mutex_unlock_slowpath(lock, _RET_IP_); |
6053ee3b | 544 | } |
6053ee3b IM |
545 | EXPORT_SYMBOL(mutex_unlock); |
546 | ||
040a0a37 ML |
547 | /** |
548 | * ww_mutex_unlock - release the w/w mutex | |
549 | * @lock: the mutex to be released | |
550 | * | |
551 | * Unlock a mutex that has been locked by this task previously with any of the | |
552 | * ww_mutex_lock* functions (with or without an acquire context). It is | |
553 | * forbidden to release the locks after releasing the acquire context. | |
554 | * | |
555 | * This function must not be used in interrupt context. Unlocking | |
556 | * of a unlocked mutex is not allowed. | |
557 | */ | |
558 | void __sched ww_mutex_unlock(struct ww_mutex *lock) | |
559 | { | |
aaa77de1 | 560 | __ww_mutex_unlock(lock); |
3ca0ff57 | 561 | mutex_unlock(&lock->base); |
040a0a37 ML |
562 | } |
563 | EXPORT_SYMBOL(ww_mutex_unlock); | |
564 | ||
6053ee3b IM |
565 | /* |
566 | * Lock a mutex (possibly interruptible), slowpath: | |
567 | */ | |
040a0a37 | 568 | static __always_inline int __sched |
2f064a59 | 569 | __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, |
040a0a37 | 570 | struct lockdep_map *nest_lock, unsigned long ip, |
b0267507 | 571 | struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) |
6053ee3b | 572 | { |
6053ee3b | 573 | struct mutex_waiter waiter; |
a40ca565 | 574 | struct ww_mutex *ww; |
040a0a37 | 575 | int ret; |
6053ee3b | 576 | |
5de2055d WL |
577 | if (!use_ww_ctx) |
578 | ww_ctx = NULL; | |
579 | ||
427b1820 | 580 | might_sleep(); |
ea9e0fb8 | 581 | |
e6b4457b | 582 | MUTEX_WARN_ON(lock->magic != lock); |
6c11c6e3 | 583 | |
427b1820 | 584 | ww = container_of(lock, struct ww_mutex, base); |
5de2055d | 585 | if (ww_ctx) { |
0422e83d CW |
586 | if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) |
587 | return -EALREADY; | |
08295b3b TH |
588 | |
589 | /* | |
590 | * Reset the wounded flag after a kill. No other process can | |
591 | * race and wound us here since they can't have a valid owner | |
592 | * pointer if we don't have any locks held. | |
593 | */ | |
594 | if (ww_ctx->acquired == 0) | |
595 | ww_ctx->wounded = 0; | |
cf702edd PZ |
596 | |
597 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
598 | nest_lock = &ww_ctx->dep_map; | |
599 | #endif | |
0422e83d CW |
600 | } |
601 | ||
41719b03 | 602 | preempt_disable(); |
e4c70a66 | 603 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
c0226027 | 604 | |
dc1f7893 | 605 | trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN); |
e274795e | 606 | if (__mutex_trylock(lock) || |
5de2055d | 607 | mutex_optimistic_spin(lock, ww_ctx, NULL)) { |
76916515 | 608 | /* got the lock, yay! */ |
3ca0ff57 | 609 | lock_acquired(&lock->dep_map, ip); |
5de2055d | 610 | if (ww_ctx) |
3ca0ff57 | 611 | ww_mutex_set_context_fastpath(ww, ww_ctx); |
dc1f7893 | 612 | trace_contention_end(lock, 0); |
76916515 DB |
613 | preempt_enable(); |
614 | return 0; | |
0d66bf6d | 615 | } |
76916515 | 616 | |
ebf4c55c | 617 | raw_spin_lock(&lock->wait_lock); |
1e820c96 | 618 | /* |
3ca0ff57 | 619 | * After waiting to acquire the wait_lock, try again. |
1e820c96 | 620 | */ |
659cf9f5 | 621 | if (__mutex_trylock(lock)) { |
5de2055d | 622 | if (ww_ctx) |
55f036ca | 623 | __ww_mutex_check_waiters(lock, ww_ctx); |
659cf9f5 | 624 | |
ec83f425 | 625 | goto skip_wait; |
659cf9f5 | 626 | } |
ec83f425 | 627 | |
9a11b49a | 628 | debug_mutex_lock_common(lock, &waiter); |
c0afb0ff | 629 | waiter.task = current; |
b857174e | 630 | if (use_ww_ctx) |
c0afb0ff | 631 | waiter.ww_ctx = ww_ctx; |
6053ee3b | 632 | |
6baa5c60 NH |
633 | lock_contended(&lock->dep_map, ip); |
634 | ||
635 | if (!use_ww_ctx) { | |
636 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | |
08295b3b | 637 | __mutex_add_waiter(lock, &waiter, &lock->wait_list); |
6baa5c60 | 638 | } else { |
55f036ca PZ |
639 | /* |
640 | * Add in stamp order, waking up waiters that must kill | |
641 | * themselves. | |
642 | */ | |
6baa5c60 NH |
643 | ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); |
644 | if (ret) | |
55f036ca | 645 | goto err_early_kill; |
6baa5c60 NH |
646 | } |
647 | ||
642fa448 | 648 | set_current_state(state); |
dc1f7893 | 649 | trace_contention_begin(lock, LCB_F_MUTEX); |
6053ee3b | 650 | for (;;) { |
048661a1 PZ |
651 | bool first; |
652 | ||
5bbd7e64 PZ |
653 | /* |
654 | * Once we hold wait_lock, we're serialized against | |
655 | * mutex_unlock() handing the lock off to us, do a trylock | |
656 | * before testing the error conditions to make sure we pick up | |
657 | * the handoff. | |
658 | */ | |
e274795e | 659 | if (__mutex_trylock(lock)) |
5bbd7e64 | 660 | goto acquired; |
6053ee3b IM |
661 | |
662 | /* | |
55f036ca | 663 | * Check for signals and kill conditions while holding |
5bbd7e64 PZ |
664 | * wait_lock. This ensures the lock cancellation is ordered |
665 | * against mutex_unlock() and wake-ups do not go missing. | |
6053ee3b | 666 | */ |
3bb5f4ac | 667 | if (signal_pending_state(state, current)) { |
040a0a37 ML |
668 | ret = -EINTR; |
669 | goto err; | |
670 | } | |
6053ee3b | 671 | |
5de2055d | 672 | if (ww_ctx) { |
55f036ca | 673 | ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); |
040a0a37 ML |
674 | if (ret) |
675 | goto err; | |
6053ee3b | 676 | } |
040a0a37 | 677 | |
ebf4c55c | 678 | raw_spin_unlock(&lock->wait_lock); |
bd2f5536 | 679 | schedule_preempt_disabled(); |
9d659ae1 | 680 | |
048661a1 | 681 | first = __mutex_waiter_is_first(lock, &waiter); |
5bbd7e64 | 682 | |
642fa448 | 683 | set_current_state(state); |
5bbd7e64 PZ |
684 | /* |
685 | * Here we order against unlock; we must either see it change | |
686 | * state back to RUNNING and fall through the next schedule(), | |
687 | * or we must see its unlock and acquire. | |
688 | */ | |
dc1f7893 | 689 | if (__mutex_trylock_or_handoff(lock, first)) |
5bbd7e64 PZ |
690 | break; |
691 | ||
dc1f7893 PZ |
692 | if (first) { |
693 | trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN); | |
694 | if (mutex_optimistic_spin(lock, ww_ctx, &waiter)) | |
695 | break; | |
696 | trace_contention_begin(lock, LCB_F_MUTEX); | |
697 | } | |
698 | ||
ebf4c55c | 699 | raw_spin_lock(&lock->wait_lock); |
6053ee3b | 700 | } |
ebf4c55c | 701 | raw_spin_lock(&lock->wait_lock); |
5bbd7e64 | 702 | acquired: |
642fa448 | 703 | __set_current_state(TASK_RUNNING); |
51587bcf | 704 | |
5de2055d | 705 | if (ww_ctx) { |
08295b3b TH |
706 | /* |
707 | * Wound-Wait; we stole the lock (!first_waiter), check the | |
708 | * waiters as anyone might want to wound us. | |
709 | */ | |
710 | if (!ww_ctx->is_wait_die && | |
711 | !__mutex_waiter_is_first(lock, &waiter)) | |
712 | __ww_mutex_check_waiters(lock, ww_ctx); | |
713 | } | |
714 | ||
3a010c49 | 715 | __mutex_remove_waiter(lock, &waiter); |
3ca0ff57 | 716 | |
ec83f425 | 717 | debug_mutex_free_waiter(&waiter); |
6053ee3b | 718 | |
ec83f425 DB |
719 | skip_wait: |
720 | /* got the lock - cleanup and rejoice! */ | |
c7e78cff | 721 | lock_acquired(&lock->dep_map, ip); |
ee042be1 | 722 | trace_contention_end(lock, 0); |
6053ee3b | 723 | |
5de2055d | 724 | if (ww_ctx) |
55f036ca | 725 | ww_mutex_lock_acquired(ww, ww_ctx); |
040a0a37 | 726 | |
ebf4c55c | 727 | raw_spin_unlock(&lock->wait_lock); |
41719b03 | 728 | preempt_enable(); |
6053ee3b | 729 | return 0; |
040a0a37 ML |
730 | |
731 | err: | |
642fa448 | 732 | __set_current_state(TASK_RUNNING); |
3a010c49 | 733 | __mutex_remove_waiter(lock, &waiter); |
55f036ca | 734 | err_early_kill: |
dc1f7893 | 735 | trace_contention_end(lock, ret); |
ebf4c55c | 736 | raw_spin_unlock(&lock->wait_lock); |
040a0a37 | 737 | debug_mutex_free_waiter(&waiter); |
5facae4f | 738 | mutex_release(&lock->dep_map, ip); |
040a0a37 ML |
739 | preempt_enable(); |
740 | return ret; | |
6053ee3b IM |
741 | } |
742 | ||
427b1820 | 743 | static int __sched |
2f064a59 | 744 | __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, |
427b1820 PZ |
745 | struct lockdep_map *nest_lock, unsigned long ip) |
746 | { | |
747 | return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); | |
748 | } | |
749 | ||
750 | static int __sched | |
2f064a59 | 751 | __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, |
cf702edd | 752 | unsigned long ip, struct ww_acquire_ctx *ww_ctx) |
427b1820 | 753 | { |
cf702edd | 754 | return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true); |
427b1820 PZ |
755 | } |
756 | ||
12235da8 ML |
757 | /** |
758 | * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context | |
759 | * @ww: mutex to lock | |
760 | * @ww_ctx: optional w/w acquire context | |
761 | * | |
762 | * Trylocks a mutex with the optional acquire context; no deadlock detection is | |
763 | * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. | |
764 | * | |
765 | * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is | |
766 | * specified, -EALREADY handling may happen in calls to ww_mutex_trylock. | |
767 | * | |
768 | * A mutex acquired with this function must be released with ww_mutex_unlock. | |
769 | */ | |
770 | int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) | |
771 | { | |
772 | if (!ww_ctx) | |
773 | return mutex_trylock(&ww->base); | |
774 | ||
775 | MUTEX_WARN_ON(ww->base.magic != &ww->base); | |
776 | ||
777 | /* | |
778 | * Reset the wounded flag after a kill. No other process can | |
779 | * race and wound us here, since they can't have a valid owner | |
780 | * pointer if we don't have any locks held. | |
781 | */ | |
782 | if (ww_ctx->acquired == 0) | |
783 | ww_ctx->wounded = 0; | |
784 | ||
785 | if (__mutex_trylock(&ww->base)) { | |
786 | ww_mutex_set_context_fastpath(ww, ww_ctx); | |
787 | mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_); | |
788 | return 1; | |
789 | } | |
790 | ||
791 | return 0; | |
792 | } | |
793 | EXPORT_SYMBOL(ww_mutex_trylock); | |
794 | ||
ef5d4707 IM |
795 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
796 | void __sched | |
797 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | |
798 | { | |
427b1820 | 799 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
ef5d4707 IM |
800 | } |
801 | ||
802 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | |
d63a5a74 | 803 | |
e4c70a66 PZ |
804 | void __sched |
805 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) | |
806 | { | |
427b1820 | 807 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); |
e4c70a66 | 808 | } |
e4c70a66 PZ |
809 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
810 | ||
ad776537 LH |
811 | int __sched |
812 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | |
813 | { | |
427b1820 | 814 | return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
ad776537 LH |
815 | } |
816 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | |
817 | ||
d63a5a74 N |
818 | int __sched |
819 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |
820 | { | |
427b1820 | 821 | return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); |
d63a5a74 | 822 | } |
d63a5a74 | 823 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
040a0a37 | 824 | |
1460cb65 TH |
825 | void __sched |
826 | mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) | |
827 | { | |
828 | int token; | |
829 | ||
830 | might_sleep(); | |
831 | ||
832 | token = io_schedule_prepare(); | |
833 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, | |
834 | subclass, NULL, _RET_IP_, NULL, 0); | |
835 | io_schedule_finish(token); | |
836 | } | |
837 | EXPORT_SYMBOL_GPL(mutex_lock_io_nested); | |
838 | ||
23010027 DV |
839 | static inline int |
840 | ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
841 | { | |
842 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | |
843 | unsigned tmp; | |
844 | ||
845 | if (ctx->deadlock_inject_countdown-- == 0) { | |
846 | tmp = ctx->deadlock_inject_interval; | |
847 | if (tmp > UINT_MAX/4) | |
848 | tmp = UINT_MAX; | |
849 | else | |
850 | tmp = tmp*2 + tmp + tmp/2; | |
851 | ||
852 | ctx->deadlock_inject_interval = tmp; | |
853 | ctx->deadlock_inject_countdown = tmp; | |
854 | ctx->contending_lock = lock; | |
855 | ||
856 | ww_mutex_unlock(lock); | |
857 | ||
858 | return -EDEADLK; | |
859 | } | |
860 | #endif | |
861 | ||
862 | return 0; | |
863 | } | |
040a0a37 ML |
864 | |
865 | int __sched | |
c5470b22 | 866 | ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
040a0a37 | 867 | { |
23010027 DV |
868 | int ret; |
869 | ||
040a0a37 | 870 | might_sleep(); |
427b1820 | 871 | ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, |
cf702edd | 872 | 0, _RET_IP_, ctx); |
ea9e0fb8 | 873 | if (!ret && ctx && ctx->acquired > 1) |
23010027 DV |
874 | return ww_mutex_deadlock_injection(lock, ctx); |
875 | ||
876 | return ret; | |
040a0a37 | 877 | } |
c5470b22 | 878 | EXPORT_SYMBOL_GPL(ww_mutex_lock); |
040a0a37 ML |
879 | |
880 | int __sched | |
c5470b22 | 881 | ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
040a0a37 | 882 | { |
23010027 DV |
883 | int ret; |
884 | ||
040a0a37 | 885 | might_sleep(); |
427b1820 | 886 | ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, |
cf702edd | 887 | 0, _RET_IP_, ctx); |
23010027 | 888 | |
ea9e0fb8 | 889 | if (!ret && ctx && ctx->acquired > 1) |
23010027 DV |
890 | return ww_mutex_deadlock_injection(lock, ctx); |
891 | ||
892 | return ret; | |
040a0a37 | 893 | } |
c5470b22 | 894 | EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); |
040a0a37 | 895 | |
ef5d4707 IM |
896 | #endif |
897 | ||
6053ee3b IM |
898 | /* |
899 | * Release the lock, slowpath: | |
900 | */ | |
3ca0ff57 | 901 | static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) |
6053ee3b | 902 | { |
9d659ae1 | 903 | struct task_struct *next = NULL; |
194a6b5b | 904 | DEFINE_WAKE_Q(wake_q); |
b9c16a0e | 905 | unsigned long owner; |
6053ee3b | 906 | |
5facae4f | 907 | mutex_release(&lock->dep_map, ip); |
3ca0ff57 | 908 | |
6053ee3b | 909 | /* |
9d659ae1 PZ |
910 | * Release the lock before (potentially) taking the spinlock such that |
911 | * other contenders can get on with things ASAP. | |
912 | * | |
913 | * Except when HANDOFF, in that case we must not clear the owner field, | |
914 | * but instead set it to the top waiter. | |
6053ee3b | 915 | */ |
9d659ae1 PZ |
916 | owner = atomic_long_read(&lock->owner); |
917 | for (;;) { | |
e6b4457b PZ |
918 | MUTEX_WARN_ON(__owner_task(owner) != current); |
919 | MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); | |
9d659ae1 PZ |
920 | |
921 | if (owner & MUTEX_FLAG_HANDOFF) | |
922 | break; | |
923 | ||
ab4e4d9f | 924 | if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) { |
9d659ae1 PZ |
925 | if (owner & MUTEX_FLAG_WAITERS) |
926 | break; | |
927 | ||
928 | return; | |
929 | } | |
9d659ae1 | 930 | } |
6053ee3b | 931 | |
ebf4c55c | 932 | raw_spin_lock(&lock->wait_lock); |
1d8fe7dc | 933 | debug_mutex_unlock(lock); |
6053ee3b IM |
934 | if (!list_empty(&lock->wait_list)) { |
935 | /* get the first entry from the wait-list: */ | |
936 | struct mutex_waiter *waiter = | |
9d659ae1 PZ |
937 | list_first_entry(&lock->wait_list, |
938 | struct mutex_waiter, list); | |
939 | ||
940 | next = waiter->task; | |
6053ee3b IM |
941 | |
942 | debug_mutex_wake_waiter(lock, waiter); | |
9d659ae1 | 943 | wake_q_add(&wake_q, next); |
6053ee3b IM |
944 | } |
945 | ||
9d659ae1 PZ |
946 | if (owner & MUTEX_FLAG_HANDOFF) |
947 | __mutex_handoff(lock, next); | |
948 | ||
ebf4c55c | 949 | raw_spin_unlock(&lock->wait_lock); |
9d659ae1 | 950 | |
1329ce6f | 951 | wake_up_q(&wake_q); |
6053ee3b IM |
952 | } |
953 | ||
e4564f79 | 954 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
955 | /* |
956 | * Here come the less common (and hence less performance-critical) APIs: | |
957 | * mutex_lock_interruptible() and mutex_trylock(). | |
958 | */ | |
7ad5b3a5 | 959 | static noinline int __sched |
a41b56ef | 960 | __mutex_lock_killable_slowpath(struct mutex *lock); |
ad776537 | 961 | |
7ad5b3a5 | 962 | static noinline int __sched |
a41b56ef | 963 | __mutex_lock_interruptible_slowpath(struct mutex *lock); |
6053ee3b | 964 | |
ef5dc121 | 965 | /** |
45dbac0e MW |
966 | * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. |
967 | * @lock: The mutex to be acquired. | |
6053ee3b | 968 | * |
45dbac0e MW |
969 | * Lock the mutex like mutex_lock(). If a signal is delivered while the |
970 | * process is sleeping, this function will return without acquiring the | |
971 | * mutex. | |
6053ee3b | 972 | * |
45dbac0e MW |
973 | * Context: Process context. |
974 | * Return: 0 if the lock was successfully acquired or %-EINTR if a | |
975 | * signal arrived. | |
6053ee3b | 976 | */ |
7ad5b3a5 | 977 | int __sched mutex_lock_interruptible(struct mutex *lock) |
6053ee3b | 978 | { |
c544bdb1 | 979 | might_sleep(); |
3ca0ff57 PZ |
980 | |
981 | if (__mutex_trylock_fast(lock)) | |
a41b56ef | 982 | return 0; |
3ca0ff57 PZ |
983 | |
984 | return __mutex_lock_interruptible_slowpath(lock); | |
6053ee3b IM |
985 | } |
986 | ||
987 | EXPORT_SYMBOL(mutex_lock_interruptible); | |
988 | ||
45dbac0e MW |
989 | /** |
990 | * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. | |
991 | * @lock: The mutex to be acquired. | |
992 | * | |
993 | * Lock the mutex like mutex_lock(). If a signal which will be fatal to | |
994 | * the current process is delivered while the process is sleeping, this | |
995 | * function will return without acquiring the mutex. | |
996 | * | |
997 | * Context: Process context. | |
998 | * Return: 0 if the lock was successfully acquired or %-EINTR if a | |
999 | * fatal signal arrived. | |
1000 | */ | |
7ad5b3a5 | 1001 | int __sched mutex_lock_killable(struct mutex *lock) |
ad776537 LH |
1002 | { |
1003 | might_sleep(); | |
3ca0ff57 PZ |
1004 | |
1005 | if (__mutex_trylock_fast(lock)) | |
a41b56ef | 1006 | return 0; |
3ca0ff57 PZ |
1007 | |
1008 | return __mutex_lock_killable_slowpath(lock); | |
ad776537 LH |
1009 | } |
1010 | EXPORT_SYMBOL(mutex_lock_killable); | |
1011 | ||
45dbac0e MW |
1012 | /** |
1013 | * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O | |
1014 | * @lock: The mutex to be acquired. | |
1015 | * | |
1016 | * Lock the mutex like mutex_lock(). While the task is waiting for this | |
1017 | * mutex, it will be accounted as being in the IO wait state by the | |
1018 | * scheduler. | |
1019 | * | |
1020 | * Context: Process context. | |
1021 | */ | |
1460cb65 TH |
1022 | void __sched mutex_lock_io(struct mutex *lock) |
1023 | { | |
1024 | int token; | |
1025 | ||
1026 | token = io_schedule_prepare(); | |
1027 | mutex_lock(lock); | |
1028 | io_schedule_finish(token); | |
1029 | } | |
1030 | EXPORT_SYMBOL_GPL(mutex_lock_io); | |
1031 | ||
3ca0ff57 PZ |
1032 | static noinline void __sched |
1033 | __mutex_lock_slowpath(struct mutex *lock) | |
e4564f79 | 1034 | { |
427b1820 | 1035 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
e4564f79 PZ |
1036 | } |
1037 | ||
7ad5b3a5 | 1038 | static noinline int __sched |
a41b56ef | 1039 | __mutex_lock_killable_slowpath(struct mutex *lock) |
ad776537 | 1040 | { |
427b1820 | 1041 | return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
ad776537 LH |
1042 | } |
1043 | ||
7ad5b3a5 | 1044 | static noinline int __sched |
a41b56ef | 1045 | __mutex_lock_interruptible_slowpath(struct mutex *lock) |
6053ee3b | 1046 | { |
427b1820 | 1047 | return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
040a0a37 ML |
1048 | } |
1049 | ||
1050 | static noinline int __sched | |
1051 | __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
1052 | { | |
cf702edd | 1053 | return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, |
427b1820 | 1054 | _RET_IP_, ctx); |
6053ee3b | 1055 | } |
040a0a37 ML |
1056 | |
1057 | static noinline int __sched | |
1058 | __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, | |
1059 | struct ww_acquire_ctx *ctx) | |
1060 | { | |
cf702edd | 1061 | return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, |
427b1820 | 1062 | _RET_IP_, ctx); |
040a0a37 ML |
1063 | } |
1064 | ||
e4564f79 | 1065 | #endif |
6053ee3b | 1066 | |
ef5dc121 RD |
1067 | /** |
1068 | * mutex_trylock - try to acquire the mutex, without waiting | |
6053ee3b IM |
1069 | * @lock: the mutex to be acquired |
1070 | * | |
1071 | * Try to acquire the mutex atomically. Returns 1 if the mutex | |
1072 | * has been acquired successfully, and 0 on contention. | |
1073 | * | |
1074 | * NOTE: this function follows the spin_trylock() convention, so | |
ef5dc121 | 1075 | * it is negated from the down_trylock() return values! Be careful |
6053ee3b IM |
1076 | * about this when converting semaphore users to mutexes. |
1077 | * | |
1078 | * This function must not be used in interrupt context. The | |
1079 | * mutex must be released by the same task that acquired it. | |
1080 | */ | |
7ad5b3a5 | 1081 | int __sched mutex_trylock(struct mutex *lock) |
6053ee3b | 1082 | { |
6c11c6e3 SAS |
1083 | bool locked; |
1084 | ||
e6b4457b | 1085 | MUTEX_WARN_ON(lock->magic != lock); |
0d66bf6d | 1086 | |
6c11c6e3 | 1087 | locked = __mutex_trylock(lock); |
3ca0ff57 PZ |
1088 | if (locked) |
1089 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | |
0d66bf6d | 1090 | |
3ca0ff57 | 1091 | return locked; |
6053ee3b | 1092 | } |
6053ee3b | 1093 | EXPORT_SYMBOL(mutex_trylock); |
a511e3f9 | 1094 | |
040a0a37 ML |
1095 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
1096 | int __sched | |
c5470b22 | 1097 | ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
040a0a37 | 1098 | { |
040a0a37 ML |
1099 | might_sleep(); |
1100 | ||
3ca0ff57 | 1101 | if (__mutex_trylock_fast(&lock->base)) { |
ea9e0fb8 NH |
1102 | if (ctx) |
1103 | ww_mutex_set_context_fastpath(lock, ctx); | |
3ca0ff57 PZ |
1104 | return 0; |
1105 | } | |
1106 | ||
1107 | return __ww_mutex_lock_slowpath(lock, ctx); | |
040a0a37 | 1108 | } |
c5470b22 | 1109 | EXPORT_SYMBOL(ww_mutex_lock); |
040a0a37 ML |
1110 | |
1111 | int __sched | |
c5470b22 | 1112 | ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
040a0a37 | 1113 | { |
040a0a37 ML |
1114 | might_sleep(); |
1115 | ||
3ca0ff57 | 1116 | if (__mutex_trylock_fast(&lock->base)) { |
ea9e0fb8 NH |
1117 | if (ctx) |
1118 | ww_mutex_set_context_fastpath(lock, ctx); | |
3ca0ff57 PZ |
1119 | return 0; |
1120 | } | |
1121 | ||
1122 | return __ww_mutex_lock_interruptible_slowpath(lock, ctx); | |
040a0a37 | 1123 | } |
c5470b22 | 1124 | EXPORT_SYMBOL(ww_mutex_lock_interruptible); |
040a0a37 | 1125 | |
bb630f9f TG |
1126 | #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ |
1127 | #endif /* !CONFIG_PREEMPT_RT */ | |
040a0a37 | 1128 | |
a511e3f9 AM |
1129 | /** |
1130 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 | |
1131 | * @cnt: the atomic which we are to dec | |
1132 | * @lock: the mutex to return holding if we dec to 0 | |
1133 | * | |
1134 | * return true and hold lock if we dec to 0, return false otherwise | |
1135 | */ | |
1136 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | |
1137 | { | |
1138 | /* dec if we can't possibly hit 0 */ | |
1139 | if (atomic_add_unless(cnt, -1, 1)) | |
1140 | return 0; | |
1141 | /* we might hit 0, so take the lock */ | |
1142 | mutex_lock(lock); | |
1143 | if (!atomic_dec_and_test(cnt)) { | |
1144 | /* when we actually did the dec, we didn't hit 0 */ | |
1145 | mutex_unlock(lock); | |
1146 | return 0; | |
1147 | } | |
1148 | /* we hit 0, and we hold the lock */ | |
1149 | return 1; | |
1150 | } | |
1151 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |