Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[linux-2.6-block.git] / kernel / mutex.c
CommitLineData
6053ee3b
IM
1/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
0d66bf6d
PZ
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
6053ee3b
IM
18 * Also see Documentation/mutex-design.txt.
19 */
20#include <linux/mutex.h>
1b375dc3 21#include <linux/ww_mutex.h>
6053ee3b 22#include <linux/sched.h>
8bd75c77 23#include <linux/sched/rt.h>
9984de1a 24#include <linux/export.h>
6053ee3b
IM
25#include <linux/spinlock.h>
26#include <linux/interrupt.h>
9a11b49a 27#include <linux/debug_locks.h>
6053ee3b
IM
28
29/*
30 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
31 * which forces all calls into the slowpath:
32 */
33#ifdef CONFIG_DEBUG_MUTEXES
34# include "mutex-debug.h"
35# include <asm-generic/mutex-null.h>
36#else
37# include "mutex.h"
38# include <asm/mutex.h>
39#endif
40
0dc8c730 41/*
cc189d25
WL
42 * A negative mutex count indicates that waiters are sleeping waiting for the
43 * mutex.
0dc8c730 44 */
0dc8c730 45#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
0dc8c730 46
ef5d4707
IM
47void
48__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6053ee3b
IM
49{
50 atomic_set(&lock->count, 1);
51 spin_lock_init(&lock->wait_lock);
52 INIT_LIST_HEAD(&lock->wait_list);
0d66bf6d 53 mutex_clear_owner(lock);
2bd2c92c
WL
54#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
55 lock->spin_mlock = NULL;
56#endif
6053ee3b 57
ef5d4707 58 debug_mutex_init(lock, name, key);
6053ee3b
IM
59}
60
61EXPORT_SYMBOL(__mutex_init);
62
e4564f79 63#ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b
IM
64/*
65 * We split the mutex lock/unlock logic into separate fastpath and
66 * slowpath functions, to reduce the register pressure on the fastpath.
67 * We also put the fastpath first in the kernel image, to make sure the
68 * branch is predicted by the CPU as default-untaken.
69 */
7918baa5 70static __used noinline void __sched
9a11b49a 71__mutex_lock_slowpath(atomic_t *lock_count);
6053ee3b 72
ef5dc121 73/**
6053ee3b
IM
74 * mutex_lock - acquire the mutex
75 * @lock: the mutex to be acquired
76 *
77 * Lock the mutex exclusively for this task. If the mutex is not
78 * available right now, it will sleep until it can get it.
79 *
80 * The mutex must later on be released by the same task that
81 * acquired it. Recursive locking is not allowed. The task
82 * may not exit without first unlocking the mutex. Also, kernel
83 * memory where the mutex resides mutex must not be freed with
84 * the mutex still locked. The mutex must first be initialized
85 * (or statically defined) before it can be locked. memset()-ing
86 * the mutex to 0 is not allowed.
87 *
88 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
89 * checks that will enforce the restrictions and will also do
90 * deadlock debugging. )
91 *
92 * This function is similar to (but not equivalent to) down().
93 */
b09d2501 94void __sched mutex_lock(struct mutex *lock)
6053ee3b 95{
c544bdb1 96 might_sleep();
6053ee3b
IM
97 /*
98 * The locking fastpath is the 1->0 transition from
99 * 'unlocked' into 'locked' state.
6053ee3b
IM
100 */
101 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
0d66bf6d 102 mutex_set_owner(lock);
6053ee3b
IM
103}
104
105EXPORT_SYMBOL(mutex_lock);
e4564f79 106#endif
6053ee3b 107
41fcb9f2 108#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
2bd2c92c
WL
109/*
110 * In order to avoid a stampede of mutex spinners from acquiring the mutex
111 * more or less simultaneously, the spinners need to acquire a MCS lock
112 * first before spinning on the owner field.
113 *
114 * We don't inline mspin_lock() so that perf can correctly account for the
115 * time spent in this lock function.
116 */
117struct mspin_node {
118 struct mspin_node *next ;
119 int locked; /* 1 if lock acquired */
120};
121#define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock))
122
123static noinline
124void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
125{
126 struct mspin_node *prev;
127
128 /* Init node */
129 node->locked = 0;
130 node->next = NULL;
131
132 prev = xchg(lock, node);
133 if (likely(prev == NULL)) {
134 /* Lock acquired */
135 node->locked = 1;
136 return;
137 }
138 ACCESS_ONCE(prev->next) = node;
139 smp_wmb();
140 /* Wait until the lock holder passes the lock down */
141 while (!ACCESS_ONCE(node->locked))
142 arch_mutex_cpu_relax();
143}
144
145static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
146{
147 struct mspin_node *next = ACCESS_ONCE(node->next);
148
149 if (likely(!next)) {
150 /*
151 * Release the lock by setting it to NULL
152 */
153 if (cmpxchg(lock, node, NULL) == node)
154 return;
155 /* Wait until the next pointer is set */
156 while (!(next = ACCESS_ONCE(node->next)))
157 arch_mutex_cpu_relax();
158 }
159 ACCESS_ONCE(next->locked) = 1;
160 smp_wmb();
161}
162
41fcb9f2
WL
163/*
164 * Mutex spinning code migrated from kernel/sched/core.c
165 */
166
167static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
168{
169 if (lock->owner != owner)
170 return false;
171
172 /*
173 * Ensure we emit the owner->on_cpu, dereference _after_ checking
174 * lock->owner still matches owner, if that fails, owner might
175 * point to free()d memory, if it still matches, the rcu_read_lock()
176 * ensures the memory stays valid.
177 */
178 barrier();
179
180 return owner->on_cpu;
181}
182
183/*
184 * Look out! "owner" is an entirely speculative pointer
185 * access and not reliable.
186 */
187static noinline
188int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
189{
190 rcu_read_lock();
191 while (owner_running(lock, owner)) {
192 if (need_resched())
193 break;
194
195 arch_mutex_cpu_relax();
196 }
197 rcu_read_unlock();
198
199 /*
200 * We break out the loop above on need_resched() and when the
201 * owner changed, which is a sign for heavy contention. Return
202 * success only when lock->owner is NULL.
203 */
204 return lock->owner == NULL;
205}
2bd2c92c
WL
206
207/*
208 * Initial check for entering the mutex spinning loop
209 */
210static inline int mutex_can_spin_on_owner(struct mutex *lock)
211{
212 int retval = 1;
213
214 rcu_read_lock();
215 if (lock->owner)
216 retval = lock->owner->on_cpu;
217 rcu_read_unlock();
218 /*
219 * if lock->owner is not set, the mutex owner may have just acquired
220 * it and not set the owner yet or the mutex has been released.
221 */
222 return retval;
223}
41fcb9f2
WL
224#endif
225
7918baa5 226static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
6053ee3b 227
ef5dc121 228/**
6053ee3b
IM
229 * mutex_unlock - release the mutex
230 * @lock: the mutex to be released
231 *
232 * Unlock a mutex that has been locked by this task previously.
233 *
234 * This function must not be used in interrupt context. Unlocking
235 * of a not locked mutex is not allowed.
236 *
237 * This function is similar to (but not equivalent to) up().
238 */
7ad5b3a5 239void __sched mutex_unlock(struct mutex *lock)
6053ee3b
IM
240{
241 /*
242 * The unlocking fastpath is the 0->1 transition from 'locked'
243 * into 'unlocked' state:
6053ee3b 244 */
0d66bf6d
PZ
245#ifndef CONFIG_DEBUG_MUTEXES
246 /*
247 * When debugging is enabled we must not clear the owner before time,
248 * the slow path will always be taken, and that clears the owner field
249 * after verifying that it was indeed current.
250 */
251 mutex_clear_owner(lock);
252#endif
6053ee3b
IM
253 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
254}
255
256EXPORT_SYMBOL(mutex_unlock);
257
040a0a37
ML
258/**
259 * ww_mutex_unlock - release the w/w mutex
260 * @lock: the mutex to be released
261 *
262 * Unlock a mutex that has been locked by this task previously with any of the
263 * ww_mutex_lock* functions (with or without an acquire context). It is
264 * forbidden to release the locks after releasing the acquire context.
265 *
266 * This function must not be used in interrupt context. Unlocking
267 * of a unlocked mutex is not allowed.
268 */
269void __sched ww_mutex_unlock(struct ww_mutex *lock)
270{
271 /*
272 * The unlocking fastpath is the 0->1 transition from 'locked'
273 * into 'unlocked' state:
274 */
275 if (lock->ctx) {
276#ifdef CONFIG_DEBUG_MUTEXES
277 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
278#endif
279 if (lock->ctx->acquired > 0)
280 lock->ctx->acquired--;
281 lock->ctx = NULL;
282 }
283
284#ifndef CONFIG_DEBUG_MUTEXES
285 /*
286 * When debugging is enabled we must not clear the owner before time,
287 * the slow path will always be taken, and that clears the owner field
288 * after verifying that it was indeed current.
289 */
290 mutex_clear_owner(&lock->base);
291#endif
292 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
293}
294EXPORT_SYMBOL(ww_mutex_unlock);
295
296static inline int __sched
297__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
298{
299 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
300 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
301
302 if (!hold_ctx)
303 return 0;
304
305 if (unlikely(ctx == hold_ctx))
306 return -EALREADY;
307
308 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
309 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
310#ifdef CONFIG_DEBUG_MUTEXES
311 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
312 ctx->contending_lock = ww;
313#endif
314 return -EDEADLK;
315 }
316
317 return 0;
318}
319
320static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
321 struct ww_acquire_ctx *ww_ctx)
322{
323#ifdef CONFIG_DEBUG_MUTEXES
324 /*
325 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
326 * but released with a normal mutex_unlock in this call.
327 *
328 * This should never happen, always use ww_mutex_unlock.
329 */
330 DEBUG_LOCKS_WARN_ON(ww->ctx);
331
332 /*
333 * Not quite done after calling ww_acquire_done() ?
334 */
335 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
336
337 if (ww_ctx->contending_lock) {
338 /*
339 * After -EDEADLK you tried to
340 * acquire a different ww_mutex? Bad!
341 */
342 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
343
344 /*
345 * You called ww_mutex_lock after receiving -EDEADLK,
346 * but 'forgot' to unlock everything else first?
347 */
348 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
349 ww_ctx->contending_lock = NULL;
350 }
351
352 /*
353 * Naughty, using a different class will lead to undefined behavior!
354 */
355 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
356#endif
357 ww_ctx->acquired++;
358}
359
360/*
361 * after acquiring lock with fastpath or when we lost out in contested
362 * slowpath, set ctx and wake up any waiters so they can recheck.
363 *
364 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
365 * as the fastpath and opportunistic spinning are disabled in that case.
366 */
367static __always_inline void
368ww_mutex_set_context_fastpath(struct ww_mutex *lock,
369 struct ww_acquire_ctx *ctx)
370{
371 unsigned long flags;
372 struct mutex_waiter *cur;
373
374 ww_mutex_lock_acquired(lock, ctx);
375
376 lock->ctx = ctx;
377
378 /*
379 * The lock->ctx update should be visible on all cores before
380 * the atomic read is done, otherwise contended waiters might be
381 * missed. The contended waiters will either see ww_ctx == NULL
382 * and keep spinning, or it will acquire wait_lock, add itself
383 * to waiter list and sleep.
384 */
385 smp_mb(); /* ^^^ */
386
387 /*
388 * Check if lock is contended, if not there is nobody to wake up
389 */
390 if (likely(atomic_read(&lock->base.count) == 0))
391 return;
392
393 /*
394 * Uh oh, we raced in fastpath, wake up everyone in this case,
395 * so they can see the new lock->ctx.
396 */
397 spin_lock_mutex(&lock->base.wait_lock, flags);
398 list_for_each_entry(cur, &lock->base.wait_list, list) {
399 debug_mutex_wake_waiter(&lock->base, cur);
400 wake_up_process(cur->task);
401 }
402 spin_unlock_mutex(&lock->base.wait_lock, flags);
403}
404
6053ee3b
IM
405/*
406 * Lock a mutex (possibly interruptible), slowpath:
407 */
040a0a37 408static __always_inline int __sched
e4564f79 409__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
040a0a37
ML
410 struct lockdep_map *nest_lock, unsigned long ip,
411 struct ww_acquire_ctx *ww_ctx)
6053ee3b
IM
412{
413 struct task_struct *task = current;
414 struct mutex_waiter waiter;
1fb00c6c 415 unsigned long flags;
040a0a37 416 int ret;
6053ee3b 417
41719b03 418 preempt_disable();
e4c70a66 419 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
c0226027
FW
420
421#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0d66bf6d
PZ
422 /*
423 * Optimistic spinning.
424 *
425 * We try to spin for acquisition when we find that there are no
426 * pending waiters and the lock owner is currently running on a
427 * (different) CPU.
428 *
429 * The rationale is that if the lock owner is running, it is likely to
430 * release the lock soon.
431 *
432 * Since this needs the lock owner, and this mutex implementation
433 * doesn't track the owner atomically in the lock field, we need to
434 * track it non-atomically.
435 *
436 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
437 * to serialize everything.
2bd2c92c
WL
438 *
439 * The mutex spinners are queued up using MCS lock so that only one
440 * spinner can compete for the mutex. However, if mutex spinning isn't
441 * going to happen, there is no point in going through the lock/unlock
442 * overhead.
0d66bf6d 443 */
2bd2c92c
WL
444 if (!mutex_can_spin_on_owner(lock))
445 goto slowpath;
0d66bf6d
PZ
446
447 for (;;) {
c6eb3dda 448 struct task_struct *owner;
2bd2c92c 449 struct mspin_node node;
0d66bf6d 450
040a0a37
ML
451 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
452 struct ww_mutex *ww;
453
454 ww = container_of(lock, struct ww_mutex, base);
455 /*
456 * If ww->ctx is set the contents are undefined, only
457 * by acquiring wait_lock there is a guarantee that
458 * they are not invalid when reading.
459 *
460 * As such, when deadlock detection needs to be
461 * performed the optimistic spinning cannot be done.
462 */
463 if (ACCESS_ONCE(ww->ctx))
464 break;
465 }
466
0d66bf6d
PZ
467 /*
468 * If there's an owner, wait for it to either
469 * release the lock or go to sleep.
470 */
2bd2c92c 471 mspin_lock(MLOCK(lock), &node);
0d66bf6d 472 owner = ACCESS_ONCE(lock->owner);
2bd2c92c
WL
473 if (owner && !mutex_spin_on_owner(lock, owner)) {
474 mspin_unlock(MLOCK(lock), &node);
0d66bf6d 475 break;
2bd2c92c 476 }
0d66bf6d 477
0dc8c730
WL
478 if ((atomic_read(&lock->count) == 1) &&
479 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
ac6e60ee 480 lock_acquired(&lock->dep_map, ip);
040a0a37
ML
481 if (!__builtin_constant_p(ww_ctx == NULL)) {
482 struct ww_mutex *ww;
483 ww = container_of(lock, struct ww_mutex, base);
484
485 ww_mutex_set_context_fastpath(ww, ww_ctx);
486 }
487
ac6e60ee 488 mutex_set_owner(lock);
2bd2c92c 489 mspin_unlock(MLOCK(lock), &node);
ac6e60ee
CM
490 preempt_enable();
491 return 0;
492 }
2bd2c92c 493 mspin_unlock(MLOCK(lock), &node);
ac6e60ee 494
0d66bf6d
PZ
495 /*
496 * When there's no owner, we might have preempted between the
497 * owner acquiring the lock and setting the owner field. If
498 * we're an RT task that will live-lock because we won't let
499 * the owner complete.
500 */
501 if (!owner && (need_resched() || rt_task(task)))
502 break;
503
0d66bf6d
PZ
504 /*
505 * The cpu_relax() call is a compiler barrier which forces
506 * everything in this loop to be re-loaded. We don't need
507 * memory barriers as we'll eventually observe the right
508 * values at the cost of a few extra spins.
509 */
335d7afb 510 arch_mutex_cpu_relax();
0d66bf6d 511 }
2bd2c92c 512slowpath:
0d66bf6d 513#endif
1fb00c6c 514 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b 515
9a11b49a 516 debug_mutex_lock_common(lock, &waiter);
c9f4f06d 517 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
6053ee3b
IM
518
519 /* add waiting tasks to the end of the waitqueue (FIFO): */
520 list_add_tail(&waiter.list, &lock->wait_list);
521 waiter.task = task;
522
0dc8c730 523 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
4fe87745
PZ
524 goto done;
525
e4564f79 526 lock_contended(&lock->dep_map, ip);
4fe87745 527
6053ee3b
IM
528 for (;;) {
529 /*
530 * Lets try to take the lock again - this is needed even if
531 * we get here for the first time (shortly after failing to
532 * acquire the lock), to make sure that we get a wakeup once
533 * it's unlocked. Later on, if we sleep, this is the
534 * operation that gives us the lock. We xchg it to -1, so
535 * that when we release the lock, we properly wake up the
536 * other waiters:
537 */
0dc8c730
WL
538 if (MUTEX_SHOW_NO_WAITER(lock) &&
539 (atomic_xchg(&lock->count, -1) == 1))
6053ee3b
IM
540 break;
541
542 /*
543 * got a signal? (This code gets eliminated in the
544 * TASK_UNINTERRUPTIBLE case.)
545 */
6ad36762 546 if (unlikely(signal_pending_state(state, task))) {
040a0a37
ML
547 ret = -EINTR;
548 goto err;
549 }
6053ee3b 550
040a0a37
ML
551 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
552 ret = __mutex_lock_check_stamp(lock, ww_ctx);
553 if (ret)
554 goto err;
6053ee3b 555 }
040a0a37 556
6053ee3b
IM
557 __set_task_state(task, state);
558
25985edc 559 /* didn't get the lock, go to sleep: */
1fb00c6c 560 spin_unlock_mutex(&lock->wait_lock, flags);
bd2f5536 561 schedule_preempt_disabled();
1fb00c6c 562 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
563 }
564
4fe87745 565done:
c7e78cff 566 lock_acquired(&lock->dep_map, ip);
6053ee3b 567 /* got the lock - rejoice! */
0d66bf6d
PZ
568 mutex_remove_waiter(lock, &waiter, current_thread_info());
569 mutex_set_owner(lock);
6053ee3b 570
040a0a37
ML
571 if (!__builtin_constant_p(ww_ctx == NULL)) {
572 struct ww_mutex *ww = container_of(lock,
573 struct ww_mutex,
574 base);
575 struct mutex_waiter *cur;
576
577 /*
578 * This branch gets optimized out for the common case,
579 * and is only important for ww_mutex_lock.
580 */
581
582 ww_mutex_lock_acquired(ww, ww_ctx);
583 ww->ctx = ww_ctx;
584
585 /*
586 * Give any possible sleeping processes the chance to wake up,
587 * so they can recheck if they have to back off.
588 */
589 list_for_each_entry(cur, &lock->wait_list, list) {
590 debug_mutex_wake_waiter(lock, cur);
591 wake_up_process(cur->task);
592 }
593 }
594
6053ee3b
IM
595 /* set it to 0 if there are no waiters left: */
596 if (likely(list_empty(&lock->wait_list)))
597 atomic_set(&lock->count, 0);
598
1fb00c6c 599 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
600
601 debug_mutex_free_waiter(&waiter);
41719b03 602 preempt_enable();
6053ee3b 603
6053ee3b 604 return 0;
040a0a37
ML
605
606err:
607 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
608 spin_unlock_mutex(&lock->wait_lock, flags);
609 debug_mutex_free_waiter(&waiter);
610 mutex_release(&lock->dep_map, 1, ip);
611 preempt_enable();
612 return ret;
6053ee3b
IM
613}
614
ef5d4707
IM
615#ifdef CONFIG_DEBUG_LOCK_ALLOC
616void __sched
617mutex_lock_nested(struct mutex *lock, unsigned int subclass)
618{
619 might_sleep();
040a0a37
ML
620 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
621 subclass, NULL, _RET_IP_, NULL);
ef5d4707
IM
622}
623
624EXPORT_SYMBOL_GPL(mutex_lock_nested);
d63a5a74 625
e4c70a66
PZ
626void __sched
627_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
628{
629 might_sleep();
040a0a37
ML
630 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
631 0, nest, _RET_IP_, NULL);
e4c70a66
PZ
632}
633
634EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
635
ad776537
LH
636int __sched
637mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
638{
639 might_sleep();
040a0a37
ML
640 return __mutex_lock_common(lock, TASK_KILLABLE,
641 subclass, NULL, _RET_IP_, NULL);
ad776537
LH
642}
643EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
644
d63a5a74
N
645int __sched
646mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
647{
648 might_sleep();
0d66bf6d 649 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
040a0a37 650 subclass, NULL, _RET_IP_, NULL);
d63a5a74
N
651}
652
653EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
040a0a37 654
23010027
DV
655static inline int
656ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
657{
658#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
659 unsigned tmp;
660
661 if (ctx->deadlock_inject_countdown-- == 0) {
662 tmp = ctx->deadlock_inject_interval;
663 if (tmp > UINT_MAX/4)
664 tmp = UINT_MAX;
665 else
666 tmp = tmp*2 + tmp + tmp/2;
667
668 ctx->deadlock_inject_interval = tmp;
669 ctx->deadlock_inject_countdown = tmp;
670 ctx->contending_lock = lock;
671
672 ww_mutex_unlock(lock);
673
674 return -EDEADLK;
675 }
676#endif
677
678 return 0;
679}
040a0a37
ML
680
681int __sched
682__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
683{
23010027
DV
684 int ret;
685
040a0a37 686 might_sleep();
23010027 687 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
040a0a37 688 0, &ctx->dep_map, _RET_IP_, ctx);
85f48961 689 if (!ret && ctx->acquired > 1)
23010027
DV
690 return ww_mutex_deadlock_injection(lock, ctx);
691
692 return ret;
040a0a37
ML
693}
694EXPORT_SYMBOL_GPL(__ww_mutex_lock);
695
696int __sched
697__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
698{
23010027
DV
699 int ret;
700
040a0a37 701 might_sleep();
23010027
DV
702 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
703 0, &ctx->dep_map, _RET_IP_, ctx);
704
85f48961 705 if (!ret && ctx->acquired > 1)
23010027
DV
706 return ww_mutex_deadlock_injection(lock, ctx);
707
708 return ret;
040a0a37
ML
709}
710EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
711
ef5d4707
IM
712#endif
713
6053ee3b
IM
714/*
715 * Release the lock, slowpath:
716 */
7ad5b3a5 717static inline void
ef5d4707 718__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
6053ee3b 719{
02706647 720 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 721 unsigned long flags;
6053ee3b 722
1fb00c6c 723 spin_lock_mutex(&lock->wait_lock, flags);
ef5d4707 724 mutex_release(&lock->dep_map, nested, _RET_IP_);
9a11b49a 725 debug_mutex_unlock(lock);
6053ee3b
IM
726
727 /*
728 * some architectures leave the lock unlocked in the fastpath failure
729 * case, others need to leave it locked. In the later case we have to
730 * unlock it here
731 */
732 if (__mutex_slowpath_needs_to_unlock())
733 atomic_set(&lock->count, 1);
734
6053ee3b
IM
735 if (!list_empty(&lock->wait_list)) {
736 /* get the first entry from the wait-list: */
737 struct mutex_waiter *waiter =
738 list_entry(lock->wait_list.next,
739 struct mutex_waiter, list);
740
741 debug_mutex_wake_waiter(lock, waiter);
742
743 wake_up_process(waiter->task);
744 }
745
1fb00c6c 746 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
747}
748
9a11b49a
IM
749/*
750 * Release the lock, slowpath:
751 */
7918baa5 752static __used noinline void
9a11b49a
IM
753__mutex_unlock_slowpath(atomic_t *lock_count)
754{
ef5d4707 755 __mutex_unlock_common_slowpath(lock_count, 1);
9a11b49a
IM
756}
757
e4564f79 758#ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b
IM
759/*
760 * Here come the less common (and hence less performance-critical) APIs:
761 * mutex_lock_interruptible() and mutex_trylock().
762 */
7ad5b3a5 763static noinline int __sched
a41b56ef 764__mutex_lock_killable_slowpath(struct mutex *lock);
ad776537 765
7ad5b3a5 766static noinline int __sched
a41b56ef 767__mutex_lock_interruptible_slowpath(struct mutex *lock);
6053ee3b 768
ef5dc121
RD
769/**
770 * mutex_lock_interruptible - acquire the mutex, interruptible
6053ee3b
IM
771 * @lock: the mutex to be acquired
772 *
773 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
774 * been acquired or sleep until the mutex becomes available. If a
775 * signal arrives while waiting for the lock then this function
776 * returns -EINTR.
777 *
778 * This function is similar to (but not equivalent to) down_interruptible().
779 */
7ad5b3a5 780int __sched mutex_lock_interruptible(struct mutex *lock)
6053ee3b 781{
0d66bf6d
PZ
782 int ret;
783
c544bdb1 784 might_sleep();
a41b56ef
ML
785 ret = __mutex_fastpath_lock_retval(&lock->count);
786 if (likely(!ret)) {
0d66bf6d 787 mutex_set_owner(lock);
a41b56ef
ML
788 return 0;
789 } else
790 return __mutex_lock_interruptible_slowpath(lock);
6053ee3b
IM
791}
792
793EXPORT_SYMBOL(mutex_lock_interruptible);
794
7ad5b3a5 795int __sched mutex_lock_killable(struct mutex *lock)
ad776537 796{
0d66bf6d
PZ
797 int ret;
798
ad776537 799 might_sleep();
a41b56ef
ML
800 ret = __mutex_fastpath_lock_retval(&lock->count);
801 if (likely(!ret)) {
0d66bf6d 802 mutex_set_owner(lock);
a41b56ef
ML
803 return 0;
804 } else
805 return __mutex_lock_killable_slowpath(lock);
ad776537
LH
806}
807EXPORT_SYMBOL(mutex_lock_killable);
808
7918baa5 809static __used noinline void __sched
e4564f79
PZ
810__mutex_lock_slowpath(atomic_t *lock_count)
811{
812 struct mutex *lock = container_of(lock_count, struct mutex, count);
813
040a0a37
ML
814 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
815 NULL, _RET_IP_, NULL);
e4564f79
PZ
816}
817
7ad5b3a5 818static noinline int __sched
a41b56ef 819__mutex_lock_killable_slowpath(struct mutex *lock)
ad776537 820{
040a0a37
ML
821 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
822 NULL, _RET_IP_, NULL);
ad776537
LH
823}
824
7ad5b3a5 825static noinline int __sched
a41b56ef 826__mutex_lock_interruptible_slowpath(struct mutex *lock)
6053ee3b 827{
040a0a37
ML
828 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
829 NULL, _RET_IP_, NULL);
830}
831
832static noinline int __sched
833__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
834{
835 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
836 NULL, _RET_IP_, ctx);
6053ee3b 837}
040a0a37
ML
838
839static noinline int __sched
840__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
841 struct ww_acquire_ctx *ctx)
842{
843 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
844 NULL, _RET_IP_, ctx);
845}
846
e4564f79 847#endif
6053ee3b
IM
848
849/*
850 * Spinlock based trylock, we take the spinlock and check whether we
851 * can get the lock:
852 */
853static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
854{
855 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 856 unsigned long flags;
6053ee3b
IM
857 int prev;
858
1fb00c6c 859 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
860
861 prev = atomic_xchg(&lock->count, -1);
ef5d4707 862 if (likely(prev == 1)) {
0d66bf6d 863 mutex_set_owner(lock);
ef5d4707
IM
864 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
865 }
0d66bf6d 866
6053ee3b
IM
867 /* Set it back to 0 if there are no waiters: */
868 if (likely(list_empty(&lock->wait_list)))
869 atomic_set(&lock->count, 0);
870
1fb00c6c 871 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
872
873 return prev == 1;
874}
875
ef5dc121
RD
876/**
877 * mutex_trylock - try to acquire the mutex, without waiting
6053ee3b
IM
878 * @lock: the mutex to be acquired
879 *
880 * Try to acquire the mutex atomically. Returns 1 if the mutex
881 * has been acquired successfully, and 0 on contention.
882 *
883 * NOTE: this function follows the spin_trylock() convention, so
ef5dc121 884 * it is negated from the down_trylock() return values! Be careful
6053ee3b
IM
885 * about this when converting semaphore users to mutexes.
886 *
887 * This function must not be used in interrupt context. The
888 * mutex must be released by the same task that acquired it.
889 */
7ad5b3a5 890int __sched mutex_trylock(struct mutex *lock)
6053ee3b 891{
0d66bf6d
PZ
892 int ret;
893
894 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
895 if (ret)
896 mutex_set_owner(lock);
897
898 return ret;
6053ee3b 899}
6053ee3b 900EXPORT_SYMBOL(mutex_trylock);
a511e3f9 901
040a0a37
ML
902#ifndef CONFIG_DEBUG_LOCK_ALLOC
903int __sched
904__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
905{
906 int ret;
907
908 might_sleep();
909
910 ret = __mutex_fastpath_lock_retval(&lock->base.count);
911
912 if (likely(!ret)) {
913 ww_mutex_set_context_fastpath(lock, ctx);
914 mutex_set_owner(&lock->base);
915 } else
916 ret = __ww_mutex_lock_slowpath(lock, ctx);
917 return ret;
918}
919EXPORT_SYMBOL(__ww_mutex_lock);
920
921int __sched
922__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
923{
924 int ret;
925
926 might_sleep();
927
928 ret = __mutex_fastpath_lock_retval(&lock->base.count);
929
930 if (likely(!ret)) {
931 ww_mutex_set_context_fastpath(lock, ctx);
932 mutex_set_owner(&lock->base);
933 } else
934 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
935 return ret;
936}
937EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
938
939#endif
940
a511e3f9
AM
941/**
942 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
943 * @cnt: the atomic which we are to dec
944 * @lock: the mutex to return holding if we dec to 0
945 *
946 * return true and hold lock if we dec to 0, return false otherwise
947 */
948int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
949{
950 /* dec if we can't possibly hit 0 */
951 if (atomic_add_unless(cnt, -1, 1))
952 return 0;
953 /* we might hit 0, so take the lock */
954 mutex_lock(lock);
955 if (!atomic_dec_and_test(cnt)) {
956 /* when we actually did the dec, we didn't hit 0 */
957 mutex_unlock(lock);
958 return 0;
959 }
960 /* we hit 0, and we hold the lock */
961 return 1;
962}
963EXPORT_SYMBOL(atomic_dec_and_mutex_lock);