Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
6053ee3b | 2 | /* |
67a6de49 | 3 | * kernel/locking/mutex.c |
6053ee3b IM |
4 | * |
5 | * Mutexes: blocking mutual exclusion locks | |
6 | * | |
7 | * Started by Ingo Molnar: | |
8 | * | |
9 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
10 | * | |
11 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | |
12 | * David Howells for suggestions and improvements. | |
13 | * | |
0d66bf6d PZ |
14 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
15 | * from the -rt tree, where it was originally implemented for rtmutexes | |
16 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale | |
17 | * and Sven Dietrich. | |
18 | * | |
387b1468 | 19 | * Also see Documentation/locking/mutex-design.rst. |
6053ee3b IM |
20 | */ |
21 | #include <linux/mutex.h> | |
1b375dc3 | 22 | #include <linux/ww_mutex.h> |
174cd4b1 | 23 | #include <linux/sched/signal.h> |
8bd75c77 | 24 | #include <linux/sched/rt.h> |
84f001e1 | 25 | #include <linux/sched/wake_q.h> |
b17b0153 | 26 | #include <linux/sched/debug.h> |
9984de1a | 27 | #include <linux/export.h> |
6053ee3b IM |
28 | #include <linux/spinlock.h> |
29 | #include <linux/interrupt.h> | |
9a11b49a | 30 | #include <linux/debug_locks.h> |
7a215f89 | 31 | #include <linux/osq_lock.h> |
6053ee3b | 32 | |
bb630f9f | 33 | #ifndef CONFIG_PREEMPT_RT |
a321fb90 TG |
34 | #include "mutex.h" |
35 | ||
6053ee3b | 36 | #ifdef CONFIG_DEBUG_MUTEXES |
e6b4457b | 37 | # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond) |
6053ee3b | 38 | #else |
e6b4457b | 39 | # define MUTEX_WARN_ON(cond) |
6053ee3b IM |
40 | #endif |
41 | ||
ef5d4707 IM |
42 | void |
43 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |
6053ee3b | 44 | { |
3ca0ff57 | 45 | atomic_long_set(&lock->owner, 0); |
ebf4c55c | 46 | raw_spin_lock_init(&lock->wait_lock); |
6053ee3b | 47 | INIT_LIST_HEAD(&lock->wait_list); |
2bd2c92c | 48 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
4d9d951e | 49 | osq_lock_init(&lock->osq); |
2bd2c92c | 50 | #endif |
6053ee3b | 51 | |
ef5d4707 | 52 | debug_mutex_init(lock, name, key); |
6053ee3b | 53 | } |
6053ee3b IM |
54 | EXPORT_SYMBOL(__mutex_init); |
55 | ||
3ca0ff57 PZ |
56 | /* |
57 | * @owner: contains: 'struct task_struct *' to the current lock owner, | |
58 | * NULL means not owned. Since task_struct pointers are aligned at | |
e274795e | 59 | * at least L1_CACHE_BYTES, we have low bits to store extra state. |
3ca0ff57 PZ |
60 | * |
61 | * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. | |
9d659ae1 | 62 | * Bit1 indicates unlock needs to hand the lock to the top-waiter |
e274795e | 63 | * Bit2 indicates handoff has been done and we're waiting for pickup. |
3ca0ff57 PZ |
64 | */ |
65 | #define MUTEX_FLAG_WAITERS 0x01 | |
9d659ae1 | 66 | #define MUTEX_FLAG_HANDOFF 0x02 |
e274795e | 67 | #define MUTEX_FLAG_PICKUP 0x04 |
3ca0ff57 | 68 | |
e274795e | 69 | #define MUTEX_FLAGS 0x07 |
3ca0ff57 | 70 | |
5f35d5a6 MO |
71 | /* |
72 | * Internal helper function; C doesn't allow us to hide it :/ | |
73 | * | |
74 | * DO NOT USE (outside of mutex code). | |
75 | */ | |
76 | static inline struct task_struct *__mutex_owner(struct mutex *lock) | |
77 | { | |
a037d269 | 78 | return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); |
5f35d5a6 MO |
79 | } |
80 | ||
3ca0ff57 PZ |
81 | static inline struct task_struct *__owner_task(unsigned long owner) |
82 | { | |
83 | return (struct task_struct *)(owner & ~MUTEX_FLAGS); | |
84 | } | |
85 | ||
5f35d5a6 MO |
86 | bool mutex_is_locked(struct mutex *lock) |
87 | { | |
88 | return __mutex_owner(lock) != NULL; | |
89 | } | |
90 | EXPORT_SYMBOL(mutex_is_locked); | |
91 | ||
3ca0ff57 PZ |
92 | static inline unsigned long __owner_flags(unsigned long owner) |
93 | { | |
94 | return owner & MUTEX_FLAGS; | |
95 | } | |
96 | ||
12235da8 ML |
97 | /* |
98 | * Returns: __mutex_owner(lock) on failure or NULL on success. | |
99 | */ | |
ad90880d | 100 | static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff) |
3ca0ff57 PZ |
101 | { |
102 | unsigned long owner, curr = (unsigned long)current; | |
103 | ||
104 | owner = atomic_long_read(&lock->owner); | |
105 | for (;;) { /* must loop, can race against a flag */ | |
ab4e4d9f | 106 | unsigned long flags = __owner_flags(owner); |
e274795e | 107 | unsigned long task = owner & ~MUTEX_FLAGS; |
9d659ae1 | 108 | |
e274795e | 109 | if (task) { |
ad90880d PZ |
110 | if (flags & MUTEX_FLAG_PICKUP) { |
111 | if (task != curr) | |
112 | break; | |
113 | flags &= ~MUTEX_FLAG_PICKUP; | |
114 | } else if (handoff) { | |
115 | if (flags & MUTEX_FLAG_HANDOFF) | |
116 | break; | |
117 | flags |= MUTEX_FLAG_HANDOFF; | |
118 | } else { | |
e274795e | 119 | break; |
ad90880d | 120 | } |
e274795e | 121 | } else { |
e6b4457b | 122 | MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP)); |
ad90880d | 123 | task = curr; |
9d659ae1 PZ |
124 | } |
125 | ||
ad90880d PZ |
126 | if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) { |
127 | if (task == curr) | |
128 | return NULL; | |
129 | break; | |
130 | } | |
3ca0ff57 | 131 | } |
e274795e PZ |
132 | |
133 | return __owner_task(owner); | |
134 | } | |
135 | ||
ad90880d PZ |
136 | /* |
137 | * Trylock or set HANDOFF | |
138 | */ | |
139 | static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff) | |
140 | { | |
141 | return !__mutex_trylock_common(lock, handoff); | |
142 | } | |
143 | ||
e274795e PZ |
144 | /* |
145 | * Actual trylock that will work on any unlocked state. | |
146 | */ | |
147 | static inline bool __mutex_trylock(struct mutex *lock) | |
148 | { | |
ad90880d | 149 | return !__mutex_trylock_common(lock, false); |
3ca0ff57 PZ |
150 | } |
151 | ||
152 | #ifndef CONFIG_DEBUG_LOCK_ALLOC | |
153 | /* | |
154 | * Lockdep annotations are contained to the slow paths for simplicity. | |
155 | * There is nothing that would stop spreading the lockdep annotations outwards | |
156 | * except more code. | |
157 | */ | |
158 | ||
159 | /* | |
160 | * Optimistic trylock that only works in the uncontended case. Make sure to | |
161 | * follow with a __mutex_trylock() before failing. | |
162 | */ | |
163 | static __always_inline bool __mutex_trylock_fast(struct mutex *lock) | |
164 | { | |
165 | unsigned long curr = (unsigned long)current; | |
c427f695 | 166 | unsigned long zero = 0UL; |
3ca0ff57 | 167 | |
c427f695 | 168 | if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) |
3ca0ff57 PZ |
169 | return true; |
170 | ||
171 | return false; | |
172 | } | |
173 | ||
174 | static __always_inline bool __mutex_unlock_fast(struct mutex *lock) | |
175 | { | |
176 | unsigned long curr = (unsigned long)current; | |
177 | ||
ab4e4d9f | 178 | return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); |
3ca0ff57 PZ |
179 | } |
180 | #endif | |
181 | ||
182 | static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) | |
183 | { | |
184 | atomic_long_or(flag, &lock->owner); | |
185 | } | |
186 | ||
187 | static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) | |
188 | { | |
189 | atomic_long_andnot(flag, &lock->owner); | |
190 | } | |
191 | ||
9d659ae1 PZ |
192 | static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) |
193 | { | |
194 | return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; | |
195 | } | |
196 | ||
08295b3b TH |
197 | /* |
198 | * Add @waiter to a given location in the lock wait_list and set the | |
199 | * FLAG_WAITERS flag if it's the first waiter. | |
200 | */ | |
3a010c49 | 201 | static void |
08295b3b TH |
202 | __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
203 | struct list_head *list) | |
204 | { | |
205 | debug_mutex_add_waiter(lock, waiter, current); | |
206 | ||
207 | list_add_tail(&waiter->list, list); | |
208 | if (__mutex_waiter_is_first(lock, waiter)) | |
209 | __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); | |
210 | } | |
211 | ||
3a010c49 Z |
212 | static void |
213 | __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) | |
214 | { | |
215 | list_del(&waiter->list); | |
216 | if (likely(list_empty(&lock->wait_list))) | |
217 | __mutex_clear_flag(lock, MUTEX_FLAGS); | |
218 | ||
219 | debug_mutex_remove_waiter(lock, waiter, current); | |
220 | } | |
221 | ||
9d659ae1 PZ |
222 | /* |
223 | * Give up ownership to a specific task, when @task = NULL, this is equivalent | |
e2db7592 | 224 | * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves |
e274795e PZ |
225 | * WAITERS. Provides RELEASE semantics like a regular unlock, the |
226 | * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. | |
9d659ae1 PZ |
227 | */ |
228 | static void __mutex_handoff(struct mutex *lock, struct task_struct *task) | |
229 | { | |
230 | unsigned long owner = atomic_long_read(&lock->owner); | |
231 | ||
232 | for (;;) { | |
ab4e4d9f | 233 | unsigned long new; |
9d659ae1 | 234 | |
e6b4457b PZ |
235 | MUTEX_WARN_ON(__owner_task(owner) != current); |
236 | MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); | |
9d659ae1 PZ |
237 | |
238 | new = (owner & MUTEX_FLAG_WAITERS); | |
239 | new |= (unsigned long)task; | |
e274795e PZ |
240 | if (task) |
241 | new |= MUTEX_FLAG_PICKUP; | |
9d659ae1 | 242 | |
ab4e4d9f | 243 | if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new)) |
9d659ae1 | 244 | break; |
9d659ae1 PZ |
245 | } |
246 | } | |
247 | ||
e4564f79 | 248 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
249 | /* |
250 | * We split the mutex lock/unlock logic into separate fastpath and | |
251 | * slowpath functions, to reduce the register pressure on the fastpath. | |
252 | * We also put the fastpath first in the kernel image, to make sure the | |
253 | * branch is predicted by the CPU as default-untaken. | |
254 | */ | |
3ca0ff57 | 255 | static void __sched __mutex_lock_slowpath(struct mutex *lock); |
6053ee3b | 256 | |
ef5dc121 | 257 | /** |
6053ee3b IM |
258 | * mutex_lock - acquire the mutex |
259 | * @lock: the mutex to be acquired | |
260 | * | |
261 | * Lock the mutex exclusively for this task. If the mutex is not | |
262 | * available right now, it will sleep until it can get it. | |
263 | * | |
264 | * The mutex must later on be released by the same task that | |
265 | * acquired it. Recursive locking is not allowed. The task | |
266 | * may not exit without first unlocking the mutex. Also, kernel | |
139b6fd2 | 267 | * memory where the mutex resides must not be freed with |
6053ee3b IM |
268 | * the mutex still locked. The mutex must first be initialized |
269 | * (or statically defined) before it can be locked. memset()-ing | |
270 | * the mutex to 0 is not allowed. | |
271 | * | |
7b4ff1ad MCC |
272 | * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging |
273 | * checks that will enforce the restrictions and will also do | |
274 | * deadlock debugging) | |
6053ee3b IM |
275 | * |
276 | * This function is similar to (but not equivalent to) down(). | |
277 | */ | |
b09d2501 | 278 | void __sched mutex_lock(struct mutex *lock) |
6053ee3b | 279 | { |
c544bdb1 | 280 | might_sleep(); |
6053ee3b | 281 | |
3ca0ff57 PZ |
282 | if (!__mutex_trylock_fast(lock)) |
283 | __mutex_lock_slowpath(lock); | |
284 | } | |
6053ee3b | 285 | EXPORT_SYMBOL(mutex_lock); |
e4564f79 | 286 | #endif |
6053ee3b | 287 | |
2674bd18 | 288 | #include "ww_mutex.h" |
76916515 | 289 | |
41fcb9f2 | 290 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
c516df97 | 291 | |
ad90880d PZ |
292 | /* |
293 | * Trylock variant that returns the owning task on failure. | |
294 | */ | |
295 | static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) | |
296 | { | |
297 | return __mutex_trylock_common(lock, false); | |
298 | } | |
299 | ||
c516df97 NH |
300 | static inline |
301 | bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, | |
302 | struct mutex_waiter *waiter) | |
303 | { | |
304 | struct ww_mutex *ww; | |
305 | ||
306 | ww = container_of(lock, struct ww_mutex, base); | |
4bd19084 DB |
307 | |
308 | /* | |
c516df97 NH |
309 | * If ww->ctx is set the contents are undefined, only |
310 | * by acquiring wait_lock there is a guarantee that | |
311 | * they are not invalid when reading. | |
312 | * | |
313 | * As such, when deadlock detection needs to be | |
314 | * performed the optimistic spinning cannot be done. | |
315 | * | |
316 | * Check this in every inner iteration because we may | |
317 | * be racing against another thread's ww_mutex_lock. | |
4bd19084 | 318 | */ |
c516df97 NH |
319 | if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) |
320 | return false; | |
321 | ||
322 | /* | |
323 | * If we aren't on the wait list yet, cancel the spin | |
324 | * if there are waiters. We want to avoid stealing the | |
325 | * lock from a waiter with an earlier stamp, since the | |
326 | * other thread may already own a lock that we also | |
327 | * need. | |
328 | */ | |
329 | if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) | |
330 | return false; | |
331 | ||
332 | /* | |
333 | * Similarly, stop spinning if we are no longer the | |
334 | * first waiter. | |
335 | */ | |
336 | if (waiter && !__mutex_waiter_is_first(lock, waiter)) | |
337 | return false; | |
338 | ||
339 | return true; | |
4bd19084 | 340 | } |
76916515 | 341 | |
41fcb9f2 | 342 | /* |
25f13b40 NH |
343 | * Look out! "owner" is an entirely speculative pointer access and not |
344 | * reliable. | |
345 | * | |
346 | * "noinline" so that this function shows up on perf profiles. | |
41fcb9f2 WL |
347 | */ |
348 | static noinline | |
25f13b40 | 349 | bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, |
c516df97 | 350 | struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) |
41fcb9f2 | 351 | { |
01ac33c1 | 352 | bool ret = true; |
be1f7bf2 | 353 | |
41fcb9f2 | 354 | rcu_read_lock(); |
3ca0ff57 | 355 | while (__mutex_owner(lock) == owner) { |
be1f7bf2 JL |
356 | /* |
357 | * Ensure we emit the owner->on_cpu, dereference _after_ | |
01ac33c1 JL |
358 | * checking lock->owner still matches owner. If that fails, |
359 | * owner might point to freed memory. If it still matches, | |
be1f7bf2 JL |
360 | * the rcu_read_lock() ensures the memory stays valid. |
361 | */ | |
362 | barrier(); | |
363 | ||
05ffc951 PX |
364 | /* |
365 | * Use vcpu_is_preempted to detect lock holder preemption issue. | |
366 | */ | |
367 | if (!owner->on_cpu || need_resched() || | |
368 | vcpu_is_preempted(task_cpu(owner))) { | |
be1f7bf2 JL |
369 | ret = false; |
370 | break; | |
371 | } | |
41fcb9f2 | 372 | |
c516df97 NH |
373 | if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { |
374 | ret = false; | |
375 | break; | |
25f13b40 NH |
376 | } |
377 | ||
f2f09a4c | 378 | cpu_relax(); |
41fcb9f2 WL |
379 | } |
380 | rcu_read_unlock(); | |
381 | ||
be1f7bf2 | 382 | return ret; |
41fcb9f2 | 383 | } |
2bd2c92c WL |
384 | |
385 | /* | |
386 | * Initial check for entering the mutex spinning loop | |
387 | */ | |
388 | static inline int mutex_can_spin_on_owner(struct mutex *lock) | |
389 | { | |
1e40c2ed | 390 | struct task_struct *owner; |
2bd2c92c WL |
391 | int retval = 1; |
392 | ||
46af29e4 JL |
393 | if (need_resched()) |
394 | return 0; | |
395 | ||
2bd2c92c | 396 | rcu_read_lock(); |
3ca0ff57 | 397 | owner = __mutex_owner(lock); |
05ffc951 PX |
398 | |
399 | /* | |
400 | * As lock holder preemption issue, we both skip spinning if task is not | |
401 | * on cpu or its cpu is preempted | |
402 | */ | |
1e40c2ed | 403 | if (owner) |
05ffc951 | 404 | retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); |
2bd2c92c | 405 | rcu_read_unlock(); |
3ca0ff57 | 406 | |
2bd2c92c | 407 | /* |
3ca0ff57 PZ |
408 | * If lock->owner is not set, the mutex has been released. Return true |
409 | * such that we'll trylock in the spin path, which is a faster option | |
410 | * than the blocking slow path. | |
2bd2c92c WL |
411 | */ |
412 | return retval; | |
413 | } | |
76916515 | 414 | |
76916515 DB |
415 | /* |
416 | * Optimistic spinning. | |
417 | * | |
418 | * We try to spin for acquisition when we find that the lock owner | |
419 | * is currently running on a (different) CPU and while we don't | |
420 | * need to reschedule. The rationale is that if the lock owner is | |
421 | * running, it is likely to release the lock soon. | |
422 | * | |
76916515 DB |
423 | * The mutex spinners are queued up using MCS lock so that only one |
424 | * spinner can compete for the mutex. However, if mutex spinning isn't | |
425 | * going to happen, there is no point in going through the lock/unlock | |
426 | * overhead. | |
427 | * | |
428 | * Returns true when the lock was taken, otherwise false, indicating | |
429 | * that we need to jump to the slowpath and sleep. | |
b341afb3 WL |
430 | * |
431 | * The waiter flag is set to true if the spinner is a waiter in the wait | |
432 | * queue. The waiter-spinner will spin on the lock directly and concurrently | |
433 | * with the spinner at the head of the OSQ, if present, until the owner is | |
434 | * changed to itself. | |
76916515 | 435 | */ |
427b1820 PZ |
436 | static __always_inline bool |
437 | mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, | |
5de2055d | 438 | struct mutex_waiter *waiter) |
76916515 | 439 | { |
b341afb3 WL |
440 | if (!waiter) { |
441 | /* | |
442 | * The purpose of the mutex_can_spin_on_owner() function is | |
443 | * to eliminate the overhead of osq_lock() and osq_unlock() | |
444 | * in case spinning isn't possible. As a waiter-spinner | |
445 | * is not going to take OSQ lock anyway, there is no need | |
446 | * to call mutex_can_spin_on_owner(). | |
447 | */ | |
448 | if (!mutex_can_spin_on_owner(lock)) | |
449 | goto fail; | |
76916515 | 450 | |
b341afb3 WL |
451 | /* |
452 | * In order to avoid a stampede of mutex spinners trying to | |
453 | * acquire the mutex all at once, the spinners need to take a | |
454 | * MCS (queued) lock first before spinning on the owner field. | |
455 | */ | |
456 | if (!osq_lock(&lock->osq)) | |
457 | goto fail; | |
458 | } | |
76916515 | 459 | |
b341afb3 | 460 | for (;;) { |
76916515 DB |
461 | struct task_struct *owner; |
462 | ||
e274795e PZ |
463 | /* Try to acquire the mutex... */ |
464 | owner = __mutex_trylock_or_owner(lock); | |
465 | if (!owner) | |
466 | break; | |
76916515 DB |
467 | |
468 | /* | |
e274795e | 469 | * There's an owner, wait for it to either |
76916515 DB |
470 | * release the lock or go to sleep. |
471 | */ | |
c516df97 | 472 | if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) |
e274795e | 473 | goto fail_unlock; |
b341afb3 | 474 | |
76916515 DB |
475 | /* |
476 | * The cpu_relax() call is a compiler barrier which forces | |
477 | * everything in this loop to be re-loaded. We don't need | |
478 | * memory barriers as we'll eventually observe the right | |
479 | * values at the cost of a few extra spins. | |
480 | */ | |
f2f09a4c | 481 | cpu_relax(); |
76916515 DB |
482 | } |
483 | ||
b341afb3 WL |
484 | if (!waiter) |
485 | osq_unlock(&lock->osq); | |
486 | ||
487 | return true; | |
488 | ||
489 | ||
490 | fail_unlock: | |
491 | if (!waiter) | |
492 | osq_unlock(&lock->osq); | |
493 | ||
494 | fail: | |
76916515 DB |
495 | /* |
496 | * If we fell out of the spin path because of need_resched(), | |
497 | * reschedule now, before we try-lock the mutex. This avoids getting | |
498 | * scheduled out right after we obtained the mutex. | |
499 | */ | |
6f942a1f PZ |
500 | if (need_resched()) { |
501 | /* | |
502 | * We _should_ have TASK_RUNNING here, but just in case | |
503 | * we do not, make it so, otherwise we might get stuck. | |
504 | */ | |
505 | __set_current_state(TASK_RUNNING); | |
76916515 | 506 | schedule_preempt_disabled(); |
6f942a1f | 507 | } |
76916515 DB |
508 | |
509 | return false; | |
510 | } | |
511 | #else | |
427b1820 PZ |
512 | static __always_inline bool |
513 | mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, | |
5de2055d | 514 | struct mutex_waiter *waiter) |
76916515 DB |
515 | { |
516 | return false; | |
517 | } | |
41fcb9f2 WL |
518 | #endif |
519 | ||
3ca0ff57 | 520 | static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); |
6053ee3b | 521 | |
ef5dc121 | 522 | /** |
6053ee3b IM |
523 | * mutex_unlock - release the mutex |
524 | * @lock: the mutex to be released | |
525 | * | |
526 | * Unlock a mutex that has been locked by this task previously. | |
527 | * | |
528 | * This function must not be used in interrupt context. Unlocking | |
529 | * of a not locked mutex is not allowed. | |
530 | * | |
531 | * This function is similar to (but not equivalent to) up(). | |
532 | */ | |
7ad5b3a5 | 533 | void __sched mutex_unlock(struct mutex *lock) |
6053ee3b | 534 | { |
3ca0ff57 PZ |
535 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
536 | if (__mutex_unlock_fast(lock)) | |
537 | return; | |
0d66bf6d | 538 | #endif |
3ca0ff57 | 539 | __mutex_unlock_slowpath(lock, _RET_IP_); |
6053ee3b | 540 | } |
6053ee3b IM |
541 | EXPORT_SYMBOL(mutex_unlock); |
542 | ||
040a0a37 ML |
543 | /** |
544 | * ww_mutex_unlock - release the w/w mutex | |
545 | * @lock: the mutex to be released | |
546 | * | |
547 | * Unlock a mutex that has been locked by this task previously with any of the | |
548 | * ww_mutex_lock* functions (with or without an acquire context). It is | |
549 | * forbidden to release the locks after releasing the acquire context. | |
550 | * | |
551 | * This function must not be used in interrupt context. Unlocking | |
552 | * of a unlocked mutex is not allowed. | |
553 | */ | |
554 | void __sched ww_mutex_unlock(struct ww_mutex *lock) | |
555 | { | |
aaa77de1 | 556 | __ww_mutex_unlock(lock); |
3ca0ff57 | 557 | mutex_unlock(&lock->base); |
040a0a37 ML |
558 | } |
559 | EXPORT_SYMBOL(ww_mutex_unlock); | |
560 | ||
6053ee3b IM |
561 | /* |
562 | * Lock a mutex (possibly interruptible), slowpath: | |
563 | */ | |
040a0a37 | 564 | static __always_inline int __sched |
2f064a59 | 565 | __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, |
040a0a37 | 566 | struct lockdep_map *nest_lock, unsigned long ip, |
b0267507 | 567 | struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) |
6053ee3b | 568 | { |
6053ee3b | 569 | struct mutex_waiter waiter; |
a40ca565 | 570 | struct ww_mutex *ww; |
040a0a37 | 571 | int ret; |
6053ee3b | 572 | |
5de2055d WL |
573 | if (!use_ww_ctx) |
574 | ww_ctx = NULL; | |
575 | ||
427b1820 | 576 | might_sleep(); |
ea9e0fb8 | 577 | |
e6b4457b | 578 | MUTEX_WARN_ON(lock->magic != lock); |
6c11c6e3 | 579 | |
427b1820 | 580 | ww = container_of(lock, struct ww_mutex, base); |
5de2055d | 581 | if (ww_ctx) { |
0422e83d CW |
582 | if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) |
583 | return -EALREADY; | |
08295b3b TH |
584 | |
585 | /* | |
586 | * Reset the wounded flag after a kill. No other process can | |
587 | * race and wound us here since they can't have a valid owner | |
588 | * pointer if we don't have any locks held. | |
589 | */ | |
590 | if (ww_ctx->acquired == 0) | |
591 | ww_ctx->wounded = 0; | |
cf702edd PZ |
592 | |
593 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
594 | nest_lock = &ww_ctx->dep_map; | |
595 | #endif | |
0422e83d CW |
596 | } |
597 | ||
41719b03 | 598 | preempt_disable(); |
e4c70a66 | 599 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
c0226027 | 600 | |
e274795e | 601 | if (__mutex_trylock(lock) || |
5de2055d | 602 | mutex_optimistic_spin(lock, ww_ctx, NULL)) { |
76916515 | 603 | /* got the lock, yay! */ |
3ca0ff57 | 604 | lock_acquired(&lock->dep_map, ip); |
5de2055d | 605 | if (ww_ctx) |
3ca0ff57 | 606 | ww_mutex_set_context_fastpath(ww, ww_ctx); |
76916515 DB |
607 | preempt_enable(); |
608 | return 0; | |
0d66bf6d | 609 | } |
76916515 | 610 | |
ebf4c55c | 611 | raw_spin_lock(&lock->wait_lock); |
1e820c96 | 612 | /* |
3ca0ff57 | 613 | * After waiting to acquire the wait_lock, try again. |
1e820c96 | 614 | */ |
659cf9f5 | 615 | if (__mutex_trylock(lock)) { |
5de2055d | 616 | if (ww_ctx) |
55f036ca | 617 | __ww_mutex_check_waiters(lock, ww_ctx); |
659cf9f5 | 618 | |
ec83f425 | 619 | goto skip_wait; |
659cf9f5 | 620 | } |
ec83f425 | 621 | |
9a11b49a | 622 | debug_mutex_lock_common(lock, &waiter); |
c0afb0ff | 623 | waiter.task = current; |
b857174e | 624 | if (use_ww_ctx) |
c0afb0ff | 625 | waiter.ww_ctx = ww_ctx; |
6053ee3b | 626 | |
6baa5c60 NH |
627 | lock_contended(&lock->dep_map, ip); |
628 | ||
629 | if (!use_ww_ctx) { | |
630 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | |
08295b3b | 631 | __mutex_add_waiter(lock, &waiter, &lock->wait_list); |
6baa5c60 | 632 | } else { |
55f036ca PZ |
633 | /* |
634 | * Add in stamp order, waking up waiters that must kill | |
635 | * themselves. | |
636 | */ | |
6baa5c60 NH |
637 | ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); |
638 | if (ret) | |
55f036ca | 639 | goto err_early_kill; |
6baa5c60 NH |
640 | } |
641 | ||
642fa448 | 642 | set_current_state(state); |
6053ee3b | 643 | for (;;) { |
048661a1 PZ |
644 | bool first; |
645 | ||
5bbd7e64 PZ |
646 | /* |
647 | * Once we hold wait_lock, we're serialized against | |
648 | * mutex_unlock() handing the lock off to us, do a trylock | |
649 | * before testing the error conditions to make sure we pick up | |
650 | * the handoff. | |
651 | */ | |
e274795e | 652 | if (__mutex_trylock(lock)) |
5bbd7e64 | 653 | goto acquired; |
6053ee3b IM |
654 | |
655 | /* | |
55f036ca | 656 | * Check for signals and kill conditions while holding |
5bbd7e64 PZ |
657 | * wait_lock. This ensures the lock cancellation is ordered |
658 | * against mutex_unlock() and wake-ups do not go missing. | |
6053ee3b | 659 | */ |
3bb5f4ac | 660 | if (signal_pending_state(state, current)) { |
040a0a37 ML |
661 | ret = -EINTR; |
662 | goto err; | |
663 | } | |
6053ee3b | 664 | |
5de2055d | 665 | if (ww_ctx) { |
55f036ca | 666 | ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); |
040a0a37 ML |
667 | if (ret) |
668 | goto err; | |
6053ee3b | 669 | } |
040a0a37 | 670 | |
ebf4c55c | 671 | raw_spin_unlock(&lock->wait_lock); |
bd2f5536 | 672 | schedule_preempt_disabled(); |
9d659ae1 | 673 | |
048661a1 | 674 | first = __mutex_waiter_is_first(lock, &waiter); |
5bbd7e64 | 675 | |
642fa448 | 676 | set_current_state(state); |
5bbd7e64 PZ |
677 | /* |
678 | * Here we order against unlock; we must either see it change | |
679 | * state back to RUNNING and fall through the next schedule(), | |
680 | * or we must see its unlock and acquire. | |
681 | */ | |
ad90880d | 682 | if (__mutex_trylock_or_handoff(lock, first) || |
5de2055d | 683 | (first && mutex_optimistic_spin(lock, ww_ctx, &waiter))) |
5bbd7e64 PZ |
684 | break; |
685 | ||
ebf4c55c | 686 | raw_spin_lock(&lock->wait_lock); |
6053ee3b | 687 | } |
ebf4c55c | 688 | raw_spin_lock(&lock->wait_lock); |
5bbd7e64 | 689 | acquired: |
642fa448 | 690 | __set_current_state(TASK_RUNNING); |
51587bcf | 691 | |
5de2055d | 692 | if (ww_ctx) { |
08295b3b TH |
693 | /* |
694 | * Wound-Wait; we stole the lock (!first_waiter), check the | |
695 | * waiters as anyone might want to wound us. | |
696 | */ | |
697 | if (!ww_ctx->is_wait_die && | |
698 | !__mutex_waiter_is_first(lock, &waiter)) | |
699 | __ww_mutex_check_waiters(lock, ww_ctx); | |
700 | } | |
701 | ||
3a010c49 | 702 | __mutex_remove_waiter(lock, &waiter); |
3ca0ff57 | 703 | |
ec83f425 | 704 | debug_mutex_free_waiter(&waiter); |
6053ee3b | 705 | |
ec83f425 DB |
706 | skip_wait: |
707 | /* got the lock - cleanup and rejoice! */ | |
c7e78cff | 708 | lock_acquired(&lock->dep_map, ip); |
6053ee3b | 709 | |
5de2055d | 710 | if (ww_ctx) |
55f036ca | 711 | ww_mutex_lock_acquired(ww, ww_ctx); |
040a0a37 | 712 | |
ebf4c55c | 713 | raw_spin_unlock(&lock->wait_lock); |
41719b03 | 714 | preempt_enable(); |
6053ee3b | 715 | return 0; |
040a0a37 ML |
716 | |
717 | err: | |
642fa448 | 718 | __set_current_state(TASK_RUNNING); |
3a010c49 | 719 | __mutex_remove_waiter(lock, &waiter); |
55f036ca | 720 | err_early_kill: |
ebf4c55c | 721 | raw_spin_unlock(&lock->wait_lock); |
040a0a37 | 722 | debug_mutex_free_waiter(&waiter); |
5facae4f | 723 | mutex_release(&lock->dep_map, ip); |
040a0a37 ML |
724 | preempt_enable(); |
725 | return ret; | |
6053ee3b IM |
726 | } |
727 | ||
427b1820 | 728 | static int __sched |
2f064a59 | 729 | __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, |
427b1820 PZ |
730 | struct lockdep_map *nest_lock, unsigned long ip) |
731 | { | |
732 | return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); | |
733 | } | |
734 | ||
735 | static int __sched | |
2f064a59 | 736 | __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, |
cf702edd | 737 | unsigned long ip, struct ww_acquire_ctx *ww_ctx) |
427b1820 | 738 | { |
cf702edd | 739 | return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true); |
427b1820 PZ |
740 | } |
741 | ||
12235da8 ML |
742 | /** |
743 | * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context | |
744 | * @ww: mutex to lock | |
745 | * @ww_ctx: optional w/w acquire context | |
746 | * | |
747 | * Trylocks a mutex with the optional acquire context; no deadlock detection is | |
748 | * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. | |
749 | * | |
750 | * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is | |
751 | * specified, -EALREADY handling may happen in calls to ww_mutex_trylock. | |
752 | * | |
753 | * A mutex acquired with this function must be released with ww_mutex_unlock. | |
754 | */ | |
755 | int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) | |
756 | { | |
757 | if (!ww_ctx) | |
758 | return mutex_trylock(&ww->base); | |
759 | ||
760 | MUTEX_WARN_ON(ww->base.magic != &ww->base); | |
761 | ||
762 | /* | |
763 | * Reset the wounded flag after a kill. No other process can | |
764 | * race and wound us here, since they can't have a valid owner | |
765 | * pointer if we don't have any locks held. | |
766 | */ | |
767 | if (ww_ctx->acquired == 0) | |
768 | ww_ctx->wounded = 0; | |
769 | ||
770 | if (__mutex_trylock(&ww->base)) { | |
771 | ww_mutex_set_context_fastpath(ww, ww_ctx); | |
772 | mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_); | |
773 | return 1; | |
774 | } | |
775 | ||
776 | return 0; | |
777 | } | |
778 | EXPORT_SYMBOL(ww_mutex_trylock); | |
779 | ||
ef5d4707 IM |
780 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
781 | void __sched | |
782 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | |
783 | { | |
427b1820 | 784 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
ef5d4707 IM |
785 | } |
786 | ||
787 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | |
d63a5a74 | 788 | |
e4c70a66 PZ |
789 | void __sched |
790 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) | |
791 | { | |
427b1820 | 792 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); |
e4c70a66 | 793 | } |
e4c70a66 PZ |
794 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
795 | ||
ad776537 LH |
796 | int __sched |
797 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | |
798 | { | |
427b1820 | 799 | return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
ad776537 LH |
800 | } |
801 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | |
802 | ||
d63a5a74 N |
803 | int __sched |
804 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |
805 | { | |
427b1820 | 806 | return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); |
d63a5a74 | 807 | } |
d63a5a74 | 808 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
040a0a37 | 809 | |
1460cb65 TH |
810 | void __sched |
811 | mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) | |
812 | { | |
813 | int token; | |
814 | ||
815 | might_sleep(); | |
816 | ||
817 | token = io_schedule_prepare(); | |
818 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, | |
819 | subclass, NULL, _RET_IP_, NULL, 0); | |
820 | io_schedule_finish(token); | |
821 | } | |
822 | EXPORT_SYMBOL_GPL(mutex_lock_io_nested); | |
823 | ||
23010027 DV |
824 | static inline int |
825 | ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
826 | { | |
827 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | |
828 | unsigned tmp; | |
829 | ||
830 | if (ctx->deadlock_inject_countdown-- == 0) { | |
831 | tmp = ctx->deadlock_inject_interval; | |
832 | if (tmp > UINT_MAX/4) | |
833 | tmp = UINT_MAX; | |
834 | else | |
835 | tmp = tmp*2 + tmp + tmp/2; | |
836 | ||
837 | ctx->deadlock_inject_interval = tmp; | |
838 | ctx->deadlock_inject_countdown = tmp; | |
839 | ctx->contending_lock = lock; | |
840 | ||
841 | ww_mutex_unlock(lock); | |
842 | ||
843 | return -EDEADLK; | |
844 | } | |
845 | #endif | |
846 | ||
847 | return 0; | |
848 | } | |
040a0a37 ML |
849 | |
850 | int __sched | |
c5470b22 | 851 | ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
040a0a37 | 852 | { |
23010027 DV |
853 | int ret; |
854 | ||
040a0a37 | 855 | might_sleep(); |
427b1820 | 856 | ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, |
cf702edd | 857 | 0, _RET_IP_, ctx); |
ea9e0fb8 | 858 | if (!ret && ctx && ctx->acquired > 1) |
23010027 DV |
859 | return ww_mutex_deadlock_injection(lock, ctx); |
860 | ||
861 | return ret; | |
040a0a37 | 862 | } |
c5470b22 | 863 | EXPORT_SYMBOL_GPL(ww_mutex_lock); |
040a0a37 ML |
864 | |
865 | int __sched | |
c5470b22 | 866 | ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
040a0a37 | 867 | { |
23010027 DV |
868 | int ret; |
869 | ||
040a0a37 | 870 | might_sleep(); |
427b1820 | 871 | ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, |
cf702edd | 872 | 0, _RET_IP_, ctx); |
23010027 | 873 | |
ea9e0fb8 | 874 | if (!ret && ctx && ctx->acquired > 1) |
23010027 DV |
875 | return ww_mutex_deadlock_injection(lock, ctx); |
876 | ||
877 | return ret; | |
040a0a37 | 878 | } |
c5470b22 | 879 | EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); |
040a0a37 | 880 | |
ef5d4707 IM |
881 | #endif |
882 | ||
6053ee3b IM |
883 | /* |
884 | * Release the lock, slowpath: | |
885 | */ | |
3ca0ff57 | 886 | static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) |
6053ee3b | 887 | { |
9d659ae1 | 888 | struct task_struct *next = NULL; |
194a6b5b | 889 | DEFINE_WAKE_Q(wake_q); |
b9c16a0e | 890 | unsigned long owner; |
6053ee3b | 891 | |
5facae4f | 892 | mutex_release(&lock->dep_map, ip); |
3ca0ff57 | 893 | |
6053ee3b | 894 | /* |
9d659ae1 PZ |
895 | * Release the lock before (potentially) taking the spinlock such that |
896 | * other contenders can get on with things ASAP. | |
897 | * | |
898 | * Except when HANDOFF, in that case we must not clear the owner field, | |
899 | * but instead set it to the top waiter. | |
6053ee3b | 900 | */ |
9d659ae1 PZ |
901 | owner = atomic_long_read(&lock->owner); |
902 | for (;;) { | |
e6b4457b PZ |
903 | MUTEX_WARN_ON(__owner_task(owner) != current); |
904 | MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); | |
9d659ae1 PZ |
905 | |
906 | if (owner & MUTEX_FLAG_HANDOFF) | |
907 | break; | |
908 | ||
ab4e4d9f | 909 | if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) { |
9d659ae1 PZ |
910 | if (owner & MUTEX_FLAG_WAITERS) |
911 | break; | |
912 | ||
913 | return; | |
914 | } | |
9d659ae1 | 915 | } |
6053ee3b | 916 | |
ebf4c55c | 917 | raw_spin_lock(&lock->wait_lock); |
1d8fe7dc | 918 | debug_mutex_unlock(lock); |
6053ee3b IM |
919 | if (!list_empty(&lock->wait_list)) { |
920 | /* get the first entry from the wait-list: */ | |
921 | struct mutex_waiter *waiter = | |
9d659ae1 PZ |
922 | list_first_entry(&lock->wait_list, |
923 | struct mutex_waiter, list); | |
924 | ||
925 | next = waiter->task; | |
6053ee3b IM |
926 | |
927 | debug_mutex_wake_waiter(lock, waiter); | |
9d659ae1 | 928 | wake_q_add(&wake_q, next); |
6053ee3b IM |
929 | } |
930 | ||
9d659ae1 PZ |
931 | if (owner & MUTEX_FLAG_HANDOFF) |
932 | __mutex_handoff(lock, next); | |
933 | ||
ebf4c55c | 934 | raw_spin_unlock(&lock->wait_lock); |
9d659ae1 | 935 | |
1329ce6f | 936 | wake_up_q(&wake_q); |
6053ee3b IM |
937 | } |
938 | ||
e4564f79 | 939 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
940 | /* |
941 | * Here come the less common (and hence less performance-critical) APIs: | |
942 | * mutex_lock_interruptible() and mutex_trylock(). | |
943 | */ | |
7ad5b3a5 | 944 | static noinline int __sched |
a41b56ef | 945 | __mutex_lock_killable_slowpath(struct mutex *lock); |
ad776537 | 946 | |
7ad5b3a5 | 947 | static noinline int __sched |
a41b56ef | 948 | __mutex_lock_interruptible_slowpath(struct mutex *lock); |
6053ee3b | 949 | |
ef5dc121 | 950 | /** |
45dbac0e MW |
951 | * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. |
952 | * @lock: The mutex to be acquired. | |
6053ee3b | 953 | * |
45dbac0e MW |
954 | * Lock the mutex like mutex_lock(). If a signal is delivered while the |
955 | * process is sleeping, this function will return without acquiring the | |
956 | * mutex. | |
6053ee3b | 957 | * |
45dbac0e MW |
958 | * Context: Process context. |
959 | * Return: 0 if the lock was successfully acquired or %-EINTR if a | |
960 | * signal arrived. | |
6053ee3b | 961 | */ |
7ad5b3a5 | 962 | int __sched mutex_lock_interruptible(struct mutex *lock) |
6053ee3b | 963 | { |
c544bdb1 | 964 | might_sleep(); |
3ca0ff57 PZ |
965 | |
966 | if (__mutex_trylock_fast(lock)) | |
a41b56ef | 967 | return 0; |
3ca0ff57 PZ |
968 | |
969 | return __mutex_lock_interruptible_slowpath(lock); | |
6053ee3b IM |
970 | } |
971 | ||
972 | EXPORT_SYMBOL(mutex_lock_interruptible); | |
973 | ||
45dbac0e MW |
974 | /** |
975 | * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. | |
976 | * @lock: The mutex to be acquired. | |
977 | * | |
978 | * Lock the mutex like mutex_lock(). If a signal which will be fatal to | |
979 | * the current process is delivered while the process is sleeping, this | |
980 | * function will return without acquiring the mutex. | |
981 | * | |
982 | * Context: Process context. | |
983 | * Return: 0 if the lock was successfully acquired or %-EINTR if a | |
984 | * fatal signal arrived. | |
985 | */ | |
7ad5b3a5 | 986 | int __sched mutex_lock_killable(struct mutex *lock) |
ad776537 LH |
987 | { |
988 | might_sleep(); | |
3ca0ff57 PZ |
989 | |
990 | if (__mutex_trylock_fast(lock)) | |
a41b56ef | 991 | return 0; |
3ca0ff57 PZ |
992 | |
993 | return __mutex_lock_killable_slowpath(lock); | |
ad776537 LH |
994 | } |
995 | EXPORT_SYMBOL(mutex_lock_killable); | |
996 | ||
45dbac0e MW |
997 | /** |
998 | * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O | |
999 | * @lock: The mutex to be acquired. | |
1000 | * | |
1001 | * Lock the mutex like mutex_lock(). While the task is waiting for this | |
1002 | * mutex, it will be accounted as being in the IO wait state by the | |
1003 | * scheduler. | |
1004 | * | |
1005 | * Context: Process context. | |
1006 | */ | |
1460cb65 TH |
1007 | void __sched mutex_lock_io(struct mutex *lock) |
1008 | { | |
1009 | int token; | |
1010 | ||
1011 | token = io_schedule_prepare(); | |
1012 | mutex_lock(lock); | |
1013 | io_schedule_finish(token); | |
1014 | } | |
1015 | EXPORT_SYMBOL_GPL(mutex_lock_io); | |
1016 | ||
3ca0ff57 PZ |
1017 | static noinline void __sched |
1018 | __mutex_lock_slowpath(struct mutex *lock) | |
e4564f79 | 1019 | { |
427b1820 | 1020 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
e4564f79 PZ |
1021 | } |
1022 | ||
7ad5b3a5 | 1023 | static noinline int __sched |
a41b56ef | 1024 | __mutex_lock_killable_slowpath(struct mutex *lock) |
ad776537 | 1025 | { |
427b1820 | 1026 | return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
ad776537 LH |
1027 | } |
1028 | ||
7ad5b3a5 | 1029 | static noinline int __sched |
a41b56ef | 1030 | __mutex_lock_interruptible_slowpath(struct mutex *lock) |
6053ee3b | 1031 | { |
427b1820 | 1032 | return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
040a0a37 ML |
1033 | } |
1034 | ||
1035 | static noinline int __sched | |
1036 | __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
1037 | { | |
cf702edd | 1038 | return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, |
427b1820 | 1039 | _RET_IP_, ctx); |
6053ee3b | 1040 | } |
040a0a37 ML |
1041 | |
1042 | static noinline int __sched | |
1043 | __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, | |
1044 | struct ww_acquire_ctx *ctx) | |
1045 | { | |
cf702edd | 1046 | return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, |
427b1820 | 1047 | _RET_IP_, ctx); |
040a0a37 ML |
1048 | } |
1049 | ||
e4564f79 | 1050 | #endif |
6053ee3b | 1051 | |
ef5dc121 RD |
1052 | /** |
1053 | * mutex_trylock - try to acquire the mutex, without waiting | |
6053ee3b IM |
1054 | * @lock: the mutex to be acquired |
1055 | * | |
1056 | * Try to acquire the mutex atomically. Returns 1 if the mutex | |
1057 | * has been acquired successfully, and 0 on contention. | |
1058 | * | |
1059 | * NOTE: this function follows the spin_trylock() convention, so | |
ef5dc121 | 1060 | * it is negated from the down_trylock() return values! Be careful |
6053ee3b IM |
1061 | * about this when converting semaphore users to mutexes. |
1062 | * | |
1063 | * This function must not be used in interrupt context. The | |
1064 | * mutex must be released by the same task that acquired it. | |
1065 | */ | |
7ad5b3a5 | 1066 | int __sched mutex_trylock(struct mutex *lock) |
6053ee3b | 1067 | { |
6c11c6e3 SAS |
1068 | bool locked; |
1069 | ||
e6b4457b | 1070 | MUTEX_WARN_ON(lock->magic != lock); |
0d66bf6d | 1071 | |
6c11c6e3 | 1072 | locked = __mutex_trylock(lock); |
3ca0ff57 PZ |
1073 | if (locked) |
1074 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | |
0d66bf6d | 1075 | |
3ca0ff57 | 1076 | return locked; |
6053ee3b | 1077 | } |
6053ee3b | 1078 | EXPORT_SYMBOL(mutex_trylock); |
a511e3f9 | 1079 | |
040a0a37 ML |
1080 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
1081 | int __sched | |
c5470b22 | 1082 | ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
040a0a37 | 1083 | { |
040a0a37 ML |
1084 | might_sleep(); |
1085 | ||
3ca0ff57 | 1086 | if (__mutex_trylock_fast(&lock->base)) { |
ea9e0fb8 NH |
1087 | if (ctx) |
1088 | ww_mutex_set_context_fastpath(lock, ctx); | |
3ca0ff57 PZ |
1089 | return 0; |
1090 | } | |
1091 | ||
1092 | return __ww_mutex_lock_slowpath(lock, ctx); | |
040a0a37 | 1093 | } |
c5470b22 | 1094 | EXPORT_SYMBOL(ww_mutex_lock); |
040a0a37 ML |
1095 | |
1096 | int __sched | |
c5470b22 | 1097 | ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
040a0a37 | 1098 | { |
040a0a37 ML |
1099 | might_sleep(); |
1100 | ||
3ca0ff57 | 1101 | if (__mutex_trylock_fast(&lock->base)) { |
ea9e0fb8 NH |
1102 | if (ctx) |
1103 | ww_mutex_set_context_fastpath(lock, ctx); | |
3ca0ff57 PZ |
1104 | return 0; |
1105 | } | |
1106 | ||
1107 | return __ww_mutex_lock_interruptible_slowpath(lock, ctx); | |
040a0a37 | 1108 | } |
c5470b22 | 1109 | EXPORT_SYMBOL(ww_mutex_lock_interruptible); |
040a0a37 | 1110 | |
bb630f9f TG |
1111 | #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ |
1112 | #endif /* !CONFIG_PREEMPT_RT */ | |
040a0a37 | 1113 | |
a511e3f9 AM |
1114 | /** |
1115 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 | |
1116 | * @cnt: the atomic which we are to dec | |
1117 | * @lock: the mutex to return holding if we dec to 0 | |
1118 | * | |
1119 | * return true and hold lock if we dec to 0, return false otherwise | |
1120 | */ | |
1121 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | |
1122 | { | |
1123 | /* dec if we can't possibly hit 0 */ | |
1124 | if (atomic_add_unless(cnt, -1, 1)) | |
1125 | return 0; | |
1126 | /* we might hit 0, so take the lock */ | |
1127 | mutex_lock(lock); | |
1128 | if (!atomic_dec_and_test(cnt)) { | |
1129 | /* when we actually did the dec, we didn't hit 0 */ | |
1130 | mutex_unlock(lock); | |
1131 | return 0; | |
1132 | } | |
1133 | /* we hit 0, and we hold the lock */ | |
1134 | return 1; | |
1135 | } | |
1136 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |