ftrace: Use bsearch to find record ip
[linux-2.6-block.git] / kernel / mutex.c
CommitLineData
6053ee3b
IM
1/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
0d66bf6d
PZ
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
6053ee3b
IM
18 * Also see Documentation/mutex-design.txt.
19 */
20#include <linux/mutex.h>
21#include <linux/sched.h>
9984de1a 22#include <linux/export.h>
6053ee3b
IM
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
9a11b49a 25#include <linux/debug_locks.h>
6053ee3b
IM
26
27/*
28 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
29 * which forces all calls into the slowpath:
30 */
31#ifdef CONFIG_DEBUG_MUTEXES
32# include "mutex-debug.h"
33# include <asm-generic/mutex-null.h>
34#else
35# include "mutex.h"
36# include <asm/mutex.h>
37#endif
38
ef5d4707
IM
39void
40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6053ee3b
IM
41{
42 atomic_set(&lock->count, 1);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
0d66bf6d 45 mutex_clear_owner(lock);
6053ee3b 46
ef5d4707 47 debug_mutex_init(lock, name, key);
6053ee3b
IM
48}
49
50EXPORT_SYMBOL(__mutex_init);
51
e4564f79 52#ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b
IM
53/*
54 * We split the mutex lock/unlock logic into separate fastpath and
55 * slowpath functions, to reduce the register pressure on the fastpath.
56 * We also put the fastpath first in the kernel image, to make sure the
57 * branch is predicted by the CPU as default-untaken.
58 */
7918baa5 59static __used noinline void __sched
9a11b49a 60__mutex_lock_slowpath(atomic_t *lock_count);
6053ee3b 61
ef5dc121 62/**
6053ee3b
IM
63 * mutex_lock - acquire the mutex
64 * @lock: the mutex to be acquired
65 *
66 * Lock the mutex exclusively for this task. If the mutex is not
67 * available right now, it will sleep until it can get it.
68 *
69 * The mutex must later on be released by the same task that
70 * acquired it. Recursive locking is not allowed. The task
71 * may not exit without first unlocking the mutex. Also, kernel
72 * memory where the mutex resides mutex must not be freed with
73 * the mutex still locked. The mutex must first be initialized
74 * (or statically defined) before it can be locked. memset()-ing
75 * the mutex to 0 is not allowed.
76 *
77 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
78 * checks that will enforce the restrictions and will also do
79 * deadlock debugging. )
80 *
81 * This function is similar to (but not equivalent to) down().
82 */
b09d2501 83void __sched mutex_lock(struct mutex *lock)
6053ee3b 84{
c544bdb1 85 might_sleep();
6053ee3b
IM
86 /*
87 * The locking fastpath is the 1->0 transition from
88 * 'unlocked' into 'locked' state.
6053ee3b
IM
89 */
90 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
0d66bf6d 91 mutex_set_owner(lock);
6053ee3b
IM
92}
93
94EXPORT_SYMBOL(mutex_lock);
e4564f79 95#endif
6053ee3b 96
7918baa5 97static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
6053ee3b 98
ef5dc121 99/**
6053ee3b
IM
100 * mutex_unlock - release the mutex
101 * @lock: the mutex to be released
102 *
103 * Unlock a mutex that has been locked by this task previously.
104 *
105 * This function must not be used in interrupt context. Unlocking
106 * of a not locked mutex is not allowed.
107 *
108 * This function is similar to (but not equivalent to) up().
109 */
7ad5b3a5 110void __sched mutex_unlock(struct mutex *lock)
6053ee3b
IM
111{
112 /*
113 * The unlocking fastpath is the 0->1 transition from 'locked'
114 * into 'unlocked' state:
6053ee3b 115 */
0d66bf6d
PZ
116#ifndef CONFIG_DEBUG_MUTEXES
117 /*
118 * When debugging is enabled we must not clear the owner before time,
119 * the slow path will always be taken, and that clears the owner field
120 * after verifying that it was indeed current.
121 */
122 mutex_clear_owner(lock);
123#endif
6053ee3b
IM
124 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
125}
126
127EXPORT_SYMBOL(mutex_unlock);
128
129/*
130 * Lock a mutex (possibly interruptible), slowpath:
131 */
132static inline int __sched
e4564f79 133__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
e4c70a66 134 struct lockdep_map *nest_lock, unsigned long ip)
6053ee3b
IM
135{
136 struct task_struct *task = current;
137 struct mutex_waiter waiter;
1fb00c6c 138 unsigned long flags;
6053ee3b 139
41719b03 140 preempt_disable();
e4c70a66 141 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
c0226027
FW
142
143#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0d66bf6d
PZ
144 /*
145 * Optimistic spinning.
146 *
147 * We try to spin for acquisition when we find that there are no
148 * pending waiters and the lock owner is currently running on a
149 * (different) CPU.
150 *
151 * The rationale is that if the lock owner is running, it is likely to
152 * release the lock soon.
153 *
154 * Since this needs the lock owner, and this mutex implementation
155 * doesn't track the owner atomically in the lock field, we need to
156 * track it non-atomically.
157 *
158 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
159 * to serialize everything.
160 */
161
162 for (;;) {
c6eb3dda 163 struct task_struct *owner;
0d66bf6d 164
0d66bf6d
PZ
165 /*
166 * If there's an owner, wait for it to either
167 * release the lock or go to sleep.
168 */
169 owner = ACCESS_ONCE(lock->owner);
170 if (owner && !mutex_spin_on_owner(lock, owner))
171 break;
172
ac6e60ee
CM
173 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
174 lock_acquired(&lock->dep_map, ip);
175 mutex_set_owner(lock);
176 preempt_enable();
177 return 0;
178 }
179
0d66bf6d
PZ
180 /*
181 * When there's no owner, we might have preempted between the
182 * owner acquiring the lock and setting the owner field. If
183 * we're an RT task that will live-lock because we won't let
184 * the owner complete.
185 */
186 if (!owner && (need_resched() || rt_task(task)))
187 break;
188
0d66bf6d
PZ
189 /*
190 * The cpu_relax() call is a compiler barrier which forces
191 * everything in this loop to be re-loaded. We don't need
192 * memory barriers as we'll eventually observe the right
193 * values at the cost of a few extra spins.
194 */
335d7afb 195 arch_mutex_cpu_relax();
0d66bf6d
PZ
196 }
197#endif
1fb00c6c 198 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b 199
9a11b49a 200 debug_mutex_lock_common(lock, &waiter);
c9f4f06d 201 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
6053ee3b
IM
202
203 /* add waiting tasks to the end of the waitqueue (FIFO): */
204 list_add_tail(&waiter.list, &lock->wait_list);
205 waiter.task = task;
206
93d81d1a 207 if (atomic_xchg(&lock->count, -1) == 1)
4fe87745
PZ
208 goto done;
209
e4564f79 210 lock_contended(&lock->dep_map, ip);
4fe87745 211
6053ee3b
IM
212 for (;;) {
213 /*
214 * Lets try to take the lock again - this is needed even if
215 * we get here for the first time (shortly after failing to
216 * acquire the lock), to make sure that we get a wakeup once
217 * it's unlocked. Later on, if we sleep, this is the
218 * operation that gives us the lock. We xchg it to -1, so
219 * that when we release the lock, we properly wake up the
220 * other waiters:
221 */
93d81d1a 222 if (atomic_xchg(&lock->count, -1) == 1)
6053ee3b
IM
223 break;
224
225 /*
226 * got a signal? (This code gets eliminated in the
227 * TASK_UNINTERRUPTIBLE case.)
228 */
6ad36762 229 if (unlikely(signal_pending_state(state, task))) {
ad776537
LH
230 mutex_remove_waiter(lock, &waiter,
231 task_thread_info(task));
e4564f79 232 mutex_release(&lock->dep_map, 1, ip);
1fb00c6c 233 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
234
235 debug_mutex_free_waiter(&waiter);
41719b03 236 preempt_enable();
6053ee3b
IM
237 return -EINTR;
238 }
239 __set_task_state(task, state);
240
25985edc 241 /* didn't get the lock, go to sleep: */
1fb00c6c 242 spin_unlock_mutex(&lock->wait_lock, flags);
ff743345
PZ
243 preempt_enable_no_resched();
244 schedule();
245 preempt_disable();
1fb00c6c 246 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
247 }
248
4fe87745 249done:
c7e78cff 250 lock_acquired(&lock->dep_map, ip);
6053ee3b 251 /* got the lock - rejoice! */
0d66bf6d
PZ
252 mutex_remove_waiter(lock, &waiter, current_thread_info());
253 mutex_set_owner(lock);
6053ee3b
IM
254
255 /* set it to 0 if there are no waiters left: */
256 if (likely(list_empty(&lock->wait_list)))
257 atomic_set(&lock->count, 0);
258
1fb00c6c 259 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
260
261 debug_mutex_free_waiter(&waiter);
41719b03 262 preempt_enable();
6053ee3b 263
6053ee3b
IM
264 return 0;
265}
266
ef5d4707
IM
267#ifdef CONFIG_DEBUG_LOCK_ALLOC
268void __sched
269mutex_lock_nested(struct mutex *lock, unsigned int subclass)
270{
271 might_sleep();
e4c70a66 272 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
ef5d4707
IM
273}
274
275EXPORT_SYMBOL_GPL(mutex_lock_nested);
d63a5a74 276
e4c70a66
PZ
277void __sched
278_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
279{
280 might_sleep();
281 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
282}
283
284EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
285
ad776537
LH
286int __sched
287mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
288{
289 might_sleep();
e4c70a66 290 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
ad776537
LH
291}
292EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
293
d63a5a74
N
294int __sched
295mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
296{
297 might_sleep();
0d66bf6d 298 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
e4c70a66 299 subclass, NULL, _RET_IP_);
d63a5a74
N
300}
301
302EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
ef5d4707
IM
303#endif
304
6053ee3b
IM
305/*
306 * Release the lock, slowpath:
307 */
7ad5b3a5 308static inline void
ef5d4707 309__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
6053ee3b 310{
02706647 311 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 312 unsigned long flags;
6053ee3b 313
1fb00c6c 314 spin_lock_mutex(&lock->wait_lock, flags);
ef5d4707 315 mutex_release(&lock->dep_map, nested, _RET_IP_);
9a11b49a 316 debug_mutex_unlock(lock);
6053ee3b
IM
317
318 /*
319 * some architectures leave the lock unlocked in the fastpath failure
320 * case, others need to leave it locked. In the later case we have to
321 * unlock it here
322 */
323 if (__mutex_slowpath_needs_to_unlock())
324 atomic_set(&lock->count, 1);
325
6053ee3b
IM
326 if (!list_empty(&lock->wait_list)) {
327 /* get the first entry from the wait-list: */
328 struct mutex_waiter *waiter =
329 list_entry(lock->wait_list.next,
330 struct mutex_waiter, list);
331
332 debug_mutex_wake_waiter(lock, waiter);
333
334 wake_up_process(waiter->task);
335 }
336
1fb00c6c 337 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
338}
339
9a11b49a
IM
340/*
341 * Release the lock, slowpath:
342 */
7918baa5 343static __used noinline void
9a11b49a
IM
344__mutex_unlock_slowpath(atomic_t *lock_count)
345{
ef5d4707 346 __mutex_unlock_common_slowpath(lock_count, 1);
9a11b49a
IM
347}
348
e4564f79 349#ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b
IM
350/*
351 * Here come the less common (and hence less performance-critical) APIs:
352 * mutex_lock_interruptible() and mutex_trylock().
353 */
7ad5b3a5 354static noinline int __sched
ad776537
LH
355__mutex_lock_killable_slowpath(atomic_t *lock_count);
356
7ad5b3a5 357static noinline int __sched
9a11b49a 358__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
6053ee3b 359
ef5dc121
RD
360/**
361 * mutex_lock_interruptible - acquire the mutex, interruptible
6053ee3b
IM
362 * @lock: the mutex to be acquired
363 *
364 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
365 * been acquired or sleep until the mutex becomes available. If a
366 * signal arrives while waiting for the lock then this function
367 * returns -EINTR.
368 *
369 * This function is similar to (but not equivalent to) down_interruptible().
370 */
7ad5b3a5 371int __sched mutex_lock_interruptible(struct mutex *lock)
6053ee3b 372{
0d66bf6d
PZ
373 int ret;
374
c544bdb1 375 might_sleep();
0d66bf6d 376 ret = __mutex_fastpath_lock_retval
6053ee3b 377 (&lock->count, __mutex_lock_interruptible_slowpath);
0d66bf6d
PZ
378 if (!ret)
379 mutex_set_owner(lock);
380
381 return ret;
6053ee3b
IM
382}
383
384EXPORT_SYMBOL(mutex_lock_interruptible);
385
7ad5b3a5 386int __sched mutex_lock_killable(struct mutex *lock)
ad776537 387{
0d66bf6d
PZ
388 int ret;
389
ad776537 390 might_sleep();
0d66bf6d 391 ret = __mutex_fastpath_lock_retval
ad776537 392 (&lock->count, __mutex_lock_killable_slowpath);
0d66bf6d
PZ
393 if (!ret)
394 mutex_set_owner(lock);
395
396 return ret;
ad776537
LH
397}
398EXPORT_SYMBOL(mutex_lock_killable);
399
7918baa5 400static __used noinline void __sched
e4564f79
PZ
401__mutex_lock_slowpath(atomic_t *lock_count)
402{
403 struct mutex *lock = container_of(lock_count, struct mutex, count);
404
e4c70a66 405 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
e4564f79
PZ
406}
407
7ad5b3a5 408static noinline int __sched
ad776537
LH
409__mutex_lock_killable_slowpath(atomic_t *lock_count)
410{
411 struct mutex *lock = container_of(lock_count, struct mutex, count);
412
e4c70a66 413 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
ad776537
LH
414}
415
7ad5b3a5 416static noinline int __sched
9a11b49a 417__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
6053ee3b
IM
418{
419 struct mutex *lock = container_of(lock_count, struct mutex, count);
420
e4c70a66 421 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
6053ee3b 422}
e4564f79 423#endif
6053ee3b
IM
424
425/*
426 * Spinlock based trylock, we take the spinlock and check whether we
427 * can get the lock:
428 */
429static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
430{
431 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 432 unsigned long flags;
6053ee3b
IM
433 int prev;
434
1fb00c6c 435 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
436
437 prev = atomic_xchg(&lock->count, -1);
ef5d4707 438 if (likely(prev == 1)) {
0d66bf6d 439 mutex_set_owner(lock);
ef5d4707
IM
440 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
441 }
0d66bf6d 442
6053ee3b
IM
443 /* Set it back to 0 if there are no waiters: */
444 if (likely(list_empty(&lock->wait_list)))
445 atomic_set(&lock->count, 0);
446
1fb00c6c 447 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
448
449 return prev == 1;
450}
451
ef5dc121
RD
452/**
453 * mutex_trylock - try to acquire the mutex, without waiting
6053ee3b
IM
454 * @lock: the mutex to be acquired
455 *
456 * Try to acquire the mutex atomically. Returns 1 if the mutex
457 * has been acquired successfully, and 0 on contention.
458 *
459 * NOTE: this function follows the spin_trylock() convention, so
ef5dc121 460 * it is negated from the down_trylock() return values! Be careful
6053ee3b
IM
461 * about this when converting semaphore users to mutexes.
462 *
463 * This function must not be used in interrupt context. The
464 * mutex must be released by the same task that acquired it.
465 */
7ad5b3a5 466int __sched mutex_trylock(struct mutex *lock)
6053ee3b 467{
0d66bf6d
PZ
468 int ret;
469
470 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
471 if (ret)
472 mutex_set_owner(lock);
473
474 return ret;
6053ee3b 475}
6053ee3b 476EXPORT_SYMBOL(mutex_trylock);
a511e3f9
AM
477
478/**
479 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
480 * @cnt: the atomic which we are to dec
481 * @lock: the mutex to return holding if we dec to 0
482 *
483 * return true and hold lock if we dec to 0, return false otherwise
484 */
485int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
486{
487 /* dec if we can't possibly hit 0 */
488 if (atomic_add_unless(cnt, -1, 1))
489 return 0;
490 /* we might hit 0, so take the lock */
491 mutex_lock(lock);
492 if (!atomic_dec_and_test(cnt)) {
493 /* when we actually did the dec, we didn't hit 0 */
494 mutex_unlock(lock);
495 return 0;
496 }
497 /* we hit 0, and we hold the lock */
498 return 1;
499}
500EXPORT_SYMBOL(atomic_dec_and_mutex_lock);