1 // SPDX-License-Identifier: GPL-2.0-only
5 #include <linux/spinlock.h>
6 #include <linux/export.h>
8 #define RT_MUTEX_BUILD_MUTEX
12 * Max number of times we'll walk the boosting chain:
14 int max_lock_depth = 1024;
17 * Debug aware fast / slowpath lock,trylock,unlock
19 * The atomic acquire/release ops are compiled away, when either the
20 * architecture does not support cmpxchg or when debugging is enabled.
22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
24 unsigned int subclass)
29 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
30 ret = __rt_mutex_lock(&lock->rtmutex, state);
32 mutex_release(&lock->dep_map, _RET_IP_);
36 void rt_mutex_base_init(struct rt_mutex_base *rtb)
38 __rt_mutex_base_init(rtb);
40 EXPORT_SYMBOL(rt_mutex_base_init);
42 #ifdef CONFIG_DEBUG_LOCK_ALLOC
44 * rt_mutex_lock_nested - lock a rt_mutex
46 * @lock: the rt_mutex to be locked
47 * @subclass: the lockdep subclass
49 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
51 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
53 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
55 #else /* !CONFIG_DEBUG_LOCK_ALLOC */
58 * rt_mutex_lock - lock a rt_mutex
60 * @lock: the rt_mutex to be locked
62 void __sched rt_mutex_lock(struct rt_mutex *lock)
64 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
66 EXPORT_SYMBOL_GPL(rt_mutex_lock);
70 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
72 * @lock: the rt_mutex to be locked
76 * -EINTR when interrupted by a signal
78 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
80 return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
82 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
85 * rt_mutex_trylock - try to lock a rt_mutex
87 * @lock: the rt_mutex to be locked
89 * This function can only be called in thread context. It's safe to call it
90 * from atomic regions, but not from hard or soft interrupt context.
96 int __sched rt_mutex_trylock(struct rt_mutex *lock)
100 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
103 ret = __rt_mutex_trylock(&lock->rtmutex);
105 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
109 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
112 * rt_mutex_unlock - unlock a rt_mutex
114 * @lock: the rt_mutex to be unlocked
116 void __sched rt_mutex_unlock(struct rt_mutex *lock)
118 mutex_release(&lock->dep_map, _RET_IP_);
119 __rt_mutex_unlock(&lock->rtmutex);
121 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
124 * Futex variants, must not use fastpath.
126 int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
128 return rt_mutex_slowtrylock(lock);
131 int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
133 return __rt_mutex_slowtrylock(lock);
137 * __rt_mutex_futex_unlock - Futex variant, that since futex variants
138 * do not use the fast-path, can be simple and will not need to retry.
140 * @lock: The rt_mutex to be unlocked
141 * @wqh: The wake queue head from which to get the next lock waiter
143 bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
144 struct rt_wake_q_head *wqh)
146 lockdep_assert_held(&lock->wait_lock);
148 debug_rt_mutex_unlock(lock);
150 if (!rt_mutex_has_waiters(lock)) {
152 return false; /* done */
156 * We've already deboosted, mark_wakeup_next_waiter() will
157 * retain preempt_disabled when we drop the wait_lock, to
158 * avoid inversion prior to the wakeup. preempt_disable()
159 * therein pairs with rt_mutex_postunlock().
161 mark_wakeup_next_waiter(wqh, lock);
163 return true; /* call postunlock() */
166 void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
168 DEFINE_RT_WAKE_Q(wqh);
172 raw_spin_lock_irqsave(&lock->wait_lock, flags);
173 postunlock = __rt_mutex_futex_unlock(lock, &wqh);
174 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
177 rt_mutex_postunlock(&wqh);
181 * __rt_mutex_init - initialize the rt_mutex
183 * @lock: The rt_mutex to be initialized
184 * @name: The lock name used for debugging
185 * @key: The lock class key used for debugging
187 * Initialize the rt_mutex to unlocked state.
189 * Initializing of a locked rt_mutex is not allowed
191 void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
192 struct lock_class_key *key)
194 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
195 __rt_mutex_base_init(&lock->rtmutex);
196 lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
198 EXPORT_SYMBOL_GPL(__rt_mutex_init);
201 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
204 * @lock: the rt_mutex to be locked
205 * @proxy_owner:the task to set as owner
207 * No locking. Caller has to do serializing itself
209 * Special API call for PI-futex support. This initializes the rtmutex and
210 * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
211 * possible at this point because the pi_state which contains the rtmutex
212 * is not yet visible to other tasks.
214 void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
215 struct task_struct *proxy_owner)
217 static struct lock_class_key pi_futex_key;
219 __rt_mutex_base_init(lock);
221 * On PREEMPT_RT the futex hashbucket spinlock becomes 'sleeping'
222 * and rtmutex based. That causes a lockdep false positive, because
223 * some of the futex functions invoke spin_unlock(&hb->lock) with
224 * the wait_lock of the rtmutex associated to the pi_futex held.
225 * spin_unlock() in turn takes wait_lock of the rtmutex on which
226 * the spinlock is based, which makes lockdep notice a lock
227 * recursion. Give the futex/rtmutex wait_lock a separate key.
229 lockdep_set_class(&lock->wait_lock, &pi_futex_key);
230 rt_mutex_set_owner(lock, proxy_owner);
234 * rt_mutex_proxy_unlock - release a lock on behalf of owner
236 * @lock: the rt_mutex to be locked
238 * No locking. Caller has to do serializing itself
240 * Special API call for PI-futex support. This just cleans up the rtmutex
241 * (debugging) state. Concurrent operations on this rt_mutex are not
242 * possible because it belongs to the pi_state which is about to be freed
243 * and it is not longer visible to other tasks.
245 void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
247 debug_rt_mutex_proxy_unlock(lock);
248 rt_mutex_set_owner(lock, NULL);
252 * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
253 * @lock: the rt_mutex to take
254 * @waiter: the pre-initialized rt_mutex_waiter
255 * @task: the task to prepare
257 * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
258 * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
260 * NOTE: does _NOT_ remove the @waiter on failure; must either call
261 * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
264 * 0 - task blocked on lock
265 * 1 - acquired the lock for task, caller should wake it up
268 * Special API call for PI-futex support.
270 int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
271 struct rt_mutex_waiter *waiter,
272 struct task_struct *task)
276 lockdep_assert_held(&lock->wait_lock);
278 if (try_to_take_rt_mutex(lock, task, NULL))
281 /* We enforce deadlock detection for futexes */
282 ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
283 RT_MUTEX_FULL_CHAINWALK);
285 if (ret && !rt_mutex_owner(lock)) {
287 * Reset the return value. We might have
288 * returned with -EDEADLK and the owner
289 * released the lock while we were walking the
290 * pi chain. Let the waiter sort it out.
299 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
300 * @lock: the rt_mutex to take
301 * @waiter: the pre-initialized rt_mutex_waiter
302 * @task: the task to prepare
304 * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
305 * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
307 * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
311 * 0 - task blocked on lock
312 * 1 - acquired the lock for task, caller should wake it up
315 * Special API call for PI-futex support.
317 int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
318 struct rt_mutex_waiter *waiter,
319 struct task_struct *task)
323 raw_spin_lock_irq(&lock->wait_lock);
324 ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
326 remove_waiter(lock, waiter);
327 raw_spin_unlock_irq(&lock->wait_lock);
333 * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
334 * @lock: the rt_mutex we were woken on
335 * @to: the timeout, null if none. hrtimer should already have
337 * @waiter: the pre-initialized rt_mutex_waiter
339 * Wait for the lock acquisition started on our behalf by
340 * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
341 * rt_mutex_cleanup_proxy_lock().
345 * <0 - error, one of -EINTR, -ETIMEDOUT
347 * Special API call for PI-futex support
349 int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
350 struct hrtimer_sleeper *to,
351 struct rt_mutex_waiter *waiter)
355 raw_spin_lock_irq(&lock->wait_lock);
356 /* sleep on the mutex */
357 set_current_state(TASK_INTERRUPTIBLE);
358 ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
360 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
361 * have to fix that up.
363 fixup_rt_mutex_waiters(lock);
364 raw_spin_unlock_irq(&lock->wait_lock);
370 * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
371 * @lock: the rt_mutex we were woken on
372 * @waiter: the pre-initialized rt_mutex_waiter
374 * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
375 * rt_mutex_wait_proxy_lock().
377 * Unless we acquired the lock; we're still enqueued on the wait-list and can
378 * in fact still be granted ownership until we're removed. Therefore we can
379 * find we are in fact the owner and must disregard the
380 * rt_mutex_wait_proxy_lock() failure.
383 * true - did the cleanup, we done.
384 * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
385 * caller should disregards its return value.
387 * Special API call for PI-futex support
389 bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
390 struct rt_mutex_waiter *waiter)
392 bool cleanup = false;
394 raw_spin_lock_irq(&lock->wait_lock);
396 * Do an unconditional try-lock, this deals with the lock stealing
397 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
400 * We're not interested in the return value, because the subsequent
401 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
402 * we will own the lock and it will have removed the waiter. If we
403 * failed the trylock, we're still not owner and we need to remove
406 try_to_take_rt_mutex(lock, current, waiter);
408 * Unless we're the owner; we're still enqueued on the wait_list.
409 * So check if we became owner, if not, take us off the wait_list.
411 if (rt_mutex_owner(lock) != current) {
412 remove_waiter(lock, waiter);
416 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
417 * have to fix that up.
419 fixup_rt_mutex_waiters(lock);
421 raw_spin_unlock_irq(&lock->wait_lock);
427 * Recheck the pi chain, in case we got a priority setting
429 * Called from sched_setscheduler
431 void __sched rt_mutex_adjust_pi(struct task_struct *task)
433 struct rt_mutex_waiter *waiter;
434 struct rt_mutex_base *next_lock;
437 raw_spin_lock_irqsave(&task->pi_lock, flags);
439 waiter = task->pi_blocked_on;
440 if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
441 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
444 next_lock = waiter->lock;
445 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
447 /* gets dropped in rt_mutex_adjust_prio_chain()! */
448 get_task_struct(task);
450 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
451 next_lock, NULL, task);
455 * Performs the wakeup of the top-waiter and re-enables preemption.
457 void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
459 rt_mutex_wake_up_q(wqh);
462 #ifdef CONFIG_DEBUG_RT_MUTEXES
463 void rt_mutex_debug_task_free(struct task_struct *task)
465 DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
466 DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
470 #ifdef CONFIG_PREEMPT_RT
472 void __mutex_rt_init(struct mutex *mutex, const char *name,
473 struct lock_class_key *key)
475 debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
476 lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
478 EXPORT_SYMBOL(__mutex_rt_init);
480 static __always_inline int __mutex_lock_common(struct mutex *lock,
482 unsigned int subclass,
483 struct lockdep_map *nest_lock,
489 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
490 ret = __rt_mutex_lock(&lock->rtmutex, state);
492 mutex_release(&lock->dep_map, ip);
494 lock_acquired(&lock->dep_map, ip);
498 #ifdef CONFIG_DEBUG_LOCK_ALLOC
499 void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
501 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
503 EXPORT_SYMBOL_GPL(mutex_lock_nested);
505 void __sched _mutex_lock_nest_lock(struct mutex *lock,
506 struct lockdep_map *nest_lock)
508 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
510 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
512 int __sched mutex_lock_interruptible_nested(struct mutex *lock,
513 unsigned int subclass)
515 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
517 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
519 int __sched mutex_lock_killable_nested(struct mutex *lock,
520 unsigned int subclass)
522 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
524 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
526 void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
532 token = io_schedule_prepare();
533 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
534 io_schedule_finish(token);
536 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
538 #else /* CONFIG_DEBUG_LOCK_ALLOC */
540 void __sched mutex_lock(struct mutex *lock)
542 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
544 EXPORT_SYMBOL(mutex_lock);
546 int __sched mutex_lock_interruptible(struct mutex *lock)
548 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
550 EXPORT_SYMBOL(mutex_lock_interruptible);
552 int __sched mutex_lock_killable(struct mutex *lock)
554 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
556 EXPORT_SYMBOL(mutex_lock_killable);
558 void __sched mutex_lock_io(struct mutex *lock)
560 int token = io_schedule_prepare();
562 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
563 io_schedule_finish(token);
565 EXPORT_SYMBOL(mutex_lock_io);
566 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
568 int __sched mutex_trylock(struct mutex *lock)
572 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
575 ret = __rt_mutex_trylock(&lock->rtmutex);
577 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
581 EXPORT_SYMBOL(mutex_trylock);
583 void __sched mutex_unlock(struct mutex *lock)
585 mutex_release(&lock->dep_map, _RET_IP_);
586 __rt_mutex_unlock(&lock->rtmutex);
588 EXPORT_SYMBOL(mutex_unlock);
590 #endif /* CONFIG_PREEMPT_RT */