Commit | Line | Data |
---|---|---|
531ae4b0 TG |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * rtmutex API | |
4 | */ | |
5 | #include <linux/spinlock.h> | |
6 | #include <linux/export.h> | |
7 | ||
e17ba59b | 8 | #define RT_MUTEX_BUILD_MUTEX |
531ae4b0 TG |
9 | #include "rtmutex.c" |
10 | ||
11 | /* | |
12 | * Max number of times we'll walk the boosting chain: | |
13 | */ | |
14 | int max_lock_depth = 1024; | |
15 | ||
16 | /* | |
17 | * Debug aware fast / slowpath lock,trylock,unlock | |
18 | * | |
19 | * The atomic acquire/release ops are compiled away, when either the | |
20 | * architecture does not support cmpxchg or when debugging is enabled. | |
21 | */ | |
22 | static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, | |
23 | unsigned int state, | |
24 | unsigned int subclass) | |
25 | { | |
26 | int ret; | |
27 | ||
28 | might_sleep(); | |
29 | mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | |
830e6acc | 30 | ret = __rt_mutex_lock(&lock->rtmutex, state); |
531ae4b0 TG |
31 | if (ret) |
32 | mutex_release(&lock->dep_map, _RET_IP_); | |
33 | return ret; | |
34 | } | |
35 | ||
830e6acc PZ |
36 | void rt_mutex_base_init(struct rt_mutex_base *rtb) |
37 | { | |
38 | __rt_mutex_base_init(rtb); | |
39 | } | |
40 | EXPORT_SYMBOL(rt_mutex_base_init); | |
41 | ||
531ae4b0 TG |
42 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
43 | /** | |
44 | * rt_mutex_lock_nested - lock a rt_mutex | |
45 | * | |
46 | * @lock: the rt_mutex to be locked | |
47 | * @subclass: the lockdep subclass | |
48 | */ | |
49 | void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) | |
50 | { | |
51 | __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); | |
52 | } | |
53 | EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); | |
54 | ||
55 | #else /* !CONFIG_DEBUG_LOCK_ALLOC */ | |
56 | ||
57 | /** | |
58 | * rt_mutex_lock - lock a rt_mutex | |
59 | * | |
60 | * @lock: the rt_mutex to be locked | |
61 | */ | |
62 | void __sched rt_mutex_lock(struct rt_mutex *lock) | |
63 | { | |
64 | __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); | |
65 | } | |
66 | EXPORT_SYMBOL_GPL(rt_mutex_lock); | |
67 | #endif | |
68 | ||
69 | /** | |
70 | * rt_mutex_lock_interruptible - lock a rt_mutex interruptible | |
71 | * | |
72 | * @lock: the rt_mutex to be locked | |
73 | * | |
74 | * Returns: | |
75 | * 0 on success | |
76 | * -EINTR when interrupted by a signal | |
77 | */ | |
78 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) | |
79 | { | |
80 | return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); | |
81 | } | |
82 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | |
83 | ||
84 | /** | |
85 | * rt_mutex_trylock - try to lock a rt_mutex | |
86 | * | |
87 | * @lock: the rt_mutex to be locked | |
88 | * | |
89 | * This function can only be called in thread context. It's safe to call it | |
90 | * from atomic regions, but not from hard or soft interrupt context. | |
91 | * | |
92 | * Returns: | |
93 | * 1 on success | |
94 | * 0 on contention | |
95 | */ | |
96 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | |
97 | { | |
98 | int ret; | |
99 | ||
100 | if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task())) | |
101 | return 0; | |
102 | ||
830e6acc | 103 | ret = __rt_mutex_trylock(&lock->rtmutex); |
531ae4b0 TG |
104 | if (ret) |
105 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | |
106 | ||
107 | return ret; | |
108 | } | |
109 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | |
110 | ||
111 | /** | |
112 | * rt_mutex_unlock - unlock a rt_mutex | |
113 | * | |
114 | * @lock: the rt_mutex to be unlocked | |
115 | */ | |
116 | void __sched rt_mutex_unlock(struct rt_mutex *lock) | |
117 | { | |
118 | mutex_release(&lock->dep_map, _RET_IP_); | |
830e6acc | 119 | __rt_mutex_unlock(&lock->rtmutex); |
531ae4b0 TG |
120 | } |
121 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | |
122 | ||
123 | /* | |
124 | * Futex variants, must not use fastpath. | |
125 | */ | |
830e6acc | 126 | int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock) |
531ae4b0 TG |
127 | { |
128 | return rt_mutex_slowtrylock(lock); | |
129 | } | |
130 | ||
830e6acc | 131 | int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock) |
531ae4b0 TG |
132 | { |
133 | return __rt_mutex_slowtrylock(lock); | |
134 | } | |
135 | ||
136 | /** | |
137 | * __rt_mutex_futex_unlock - Futex variant, that since futex variants | |
138 | * do not use the fast-path, can be simple and will not need to retry. | |
139 | * | |
140 | * @lock: The rt_mutex to be unlocked | |
7980aa39 | 141 | * @wqh: The wake queue head from which to get the next lock waiter |
531ae4b0 | 142 | */ |
830e6acc | 143 | bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock, |
7980aa39 | 144 | struct rt_wake_q_head *wqh) |
531ae4b0 TG |
145 | { |
146 | lockdep_assert_held(&lock->wait_lock); | |
147 | ||
148 | debug_rt_mutex_unlock(lock); | |
149 | ||
150 | if (!rt_mutex_has_waiters(lock)) { | |
151 | lock->owner = NULL; | |
152 | return false; /* done */ | |
153 | } | |
154 | ||
155 | /* | |
156 | * We've already deboosted, mark_wakeup_next_waiter() will | |
157 | * retain preempt_disabled when we drop the wait_lock, to | |
158 | * avoid inversion prior to the wakeup. preempt_disable() | |
159 | * therein pairs with rt_mutex_postunlock(). | |
160 | */ | |
7980aa39 | 161 | mark_wakeup_next_waiter(wqh, lock); |
531ae4b0 TG |
162 | |
163 | return true; /* call postunlock() */ | |
164 | } | |
165 | ||
830e6acc | 166 | void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock) |
531ae4b0 | 167 | { |
7980aa39 | 168 | DEFINE_RT_WAKE_Q(wqh); |
531ae4b0 TG |
169 | unsigned long flags; |
170 | bool postunlock; | |
171 | ||
172 | raw_spin_lock_irqsave(&lock->wait_lock, flags); | |
7980aa39 | 173 | postunlock = __rt_mutex_futex_unlock(lock, &wqh); |
531ae4b0 TG |
174 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
175 | ||
176 | if (postunlock) | |
7980aa39 | 177 | rt_mutex_postunlock(&wqh); |
531ae4b0 TG |
178 | } |
179 | ||
180 | /** | |
181 | * __rt_mutex_init - initialize the rt_mutex | |
182 | * | |
183 | * @lock: The rt_mutex to be initialized | |
184 | * @name: The lock name used for debugging | |
185 | * @key: The lock class key used for debugging | |
186 | * | |
187 | * Initialize the rt_mutex to unlocked state. | |
188 | * | |
189 | * Initializing of a locked rt_mutex is not allowed | |
190 | */ | |
191 | void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name, | |
830e6acc | 192 | struct lock_class_key *key) |
531ae4b0 TG |
193 | { |
194 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | |
830e6acc | 195 | __rt_mutex_base_init(&lock->rtmutex); |
531ae4b0 | 196 | lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP); |
531ae4b0 TG |
197 | } |
198 | EXPORT_SYMBOL_GPL(__rt_mutex_init); | |
199 | ||
200 | /** | |
201 | * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a | |
202 | * proxy owner | |
203 | * | |
204 | * @lock: the rt_mutex to be locked | |
205 | * @proxy_owner:the task to set as owner | |
206 | * | |
207 | * No locking. Caller has to do serializing itself | |
208 | * | |
209 | * Special API call for PI-futex support. This initializes the rtmutex and | |
210 | * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not | |
211 | * possible at this point because the pi_state which contains the rtmutex | |
212 | * is not yet visible to other tasks. | |
213 | */ | |
830e6acc | 214 | void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock, |
531ae4b0 TG |
215 | struct task_struct *proxy_owner) |
216 | { | |
51711e82 TG |
217 | static struct lock_class_key pi_futex_key; |
218 | ||
830e6acc | 219 | __rt_mutex_base_init(lock); |
51711e82 TG |
220 | /* |
221 | * On PREEMPT_RT the futex hashbucket spinlock becomes 'sleeping' | |
222 | * and rtmutex based. That causes a lockdep false positive, because | |
223 | * some of the futex functions invoke spin_unlock(&hb->lock) with | |
224 | * the wait_lock of the rtmutex associated to the pi_futex held. | |
225 | * spin_unlock() in turn takes wait_lock of the rtmutex on which | |
226 | * the spinlock is based, which makes lockdep notice a lock | |
227 | * recursion. Give the futex/rtmutex wait_lock a separate key. | |
228 | */ | |
229 | lockdep_set_class(&lock->wait_lock, &pi_futex_key); | |
531ae4b0 TG |
230 | rt_mutex_set_owner(lock, proxy_owner); |
231 | } | |
232 | ||
233 | /** | |
234 | * rt_mutex_proxy_unlock - release a lock on behalf of owner | |
235 | * | |
236 | * @lock: the rt_mutex to be locked | |
237 | * | |
238 | * No locking. Caller has to do serializing itself | |
239 | * | |
240 | * Special API call for PI-futex support. This just cleans up the rtmutex | |
241 | * (debugging) state. Concurrent operations on this rt_mutex are not | |
242 | * possible because it belongs to the pi_state which is about to be freed | |
243 | * and it is not longer visible to other tasks. | |
244 | */ | |
830e6acc | 245 | void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock) |
531ae4b0 TG |
246 | { |
247 | debug_rt_mutex_proxy_unlock(lock); | |
248 | rt_mutex_set_owner(lock, NULL); | |
249 | } | |
250 | ||
251 | /** | |
252 | * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task | |
253 | * @lock: the rt_mutex to take | |
254 | * @waiter: the pre-initialized rt_mutex_waiter | |
255 | * @task: the task to prepare | |
256 | * | |
257 | * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock | |
258 | * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. | |
259 | * | |
260 | * NOTE: does _NOT_ remove the @waiter on failure; must either call | |
261 | * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this. | |
262 | * | |
263 | * Returns: | |
264 | * 0 - task blocked on lock | |
265 | * 1 - acquired the lock for task, caller should wake it up | |
266 | * <0 - error | |
267 | * | |
268 | * Special API call for PI-futex support. | |
269 | */ | |
830e6acc | 270 | int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, |
531ae4b0 TG |
271 | struct rt_mutex_waiter *waiter, |
272 | struct task_struct *task) | |
273 | { | |
274 | int ret; | |
275 | ||
276 | lockdep_assert_held(&lock->wait_lock); | |
277 | ||
278 | if (try_to_take_rt_mutex(lock, task, NULL)) | |
279 | return 1; | |
280 | ||
281 | /* We enforce deadlock detection for futexes */ | |
add46132 | 282 | ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL, |
531ae4b0 TG |
283 | RT_MUTEX_FULL_CHAINWALK); |
284 | ||
285 | if (ret && !rt_mutex_owner(lock)) { | |
286 | /* | |
287 | * Reset the return value. We might have | |
288 | * returned with -EDEADLK and the owner | |
289 | * released the lock while we were walking the | |
290 | * pi chain. Let the waiter sort it out. | |
291 | */ | |
292 | ret = 0; | |
293 | } | |
294 | ||
295 | return ret; | |
296 | } | |
297 | ||
298 | /** | |
299 | * rt_mutex_start_proxy_lock() - Start lock acquisition for another task | |
300 | * @lock: the rt_mutex to take | |
301 | * @waiter: the pre-initialized rt_mutex_waiter | |
302 | * @task: the task to prepare | |
303 | * | |
304 | * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock | |
305 | * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. | |
306 | * | |
307 | * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter | |
308 | * on failure. | |
309 | * | |
310 | * Returns: | |
311 | * 0 - task blocked on lock | |
312 | * 1 - acquired the lock for task, caller should wake it up | |
313 | * <0 - error | |
314 | * | |
315 | * Special API call for PI-futex support. | |
316 | */ | |
830e6acc | 317 | int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, |
531ae4b0 TG |
318 | struct rt_mutex_waiter *waiter, |
319 | struct task_struct *task) | |
320 | { | |
321 | int ret; | |
322 | ||
323 | raw_spin_lock_irq(&lock->wait_lock); | |
324 | ret = __rt_mutex_start_proxy_lock(lock, waiter, task); | |
325 | if (unlikely(ret)) | |
326 | remove_waiter(lock, waiter); | |
327 | raw_spin_unlock_irq(&lock->wait_lock); | |
328 | ||
329 | return ret; | |
330 | } | |
331 | ||
332 | /** | |
333 | * rt_mutex_wait_proxy_lock() - Wait for lock acquisition | |
334 | * @lock: the rt_mutex we were woken on | |
335 | * @to: the timeout, null if none. hrtimer should already have | |
336 | * been started. | |
337 | * @waiter: the pre-initialized rt_mutex_waiter | |
338 | * | |
339 | * Wait for the lock acquisition started on our behalf by | |
340 | * rt_mutex_start_proxy_lock(). Upon failure, the caller must call | |
341 | * rt_mutex_cleanup_proxy_lock(). | |
342 | * | |
343 | * Returns: | |
344 | * 0 - success | |
345 | * <0 - error, one of -EINTR, -ETIMEDOUT | |
346 | * | |
347 | * Special API call for PI-futex support | |
348 | */ | |
830e6acc | 349 | int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, |
531ae4b0 TG |
350 | struct hrtimer_sleeper *to, |
351 | struct rt_mutex_waiter *waiter) | |
352 | { | |
353 | int ret; | |
354 | ||
355 | raw_spin_lock_irq(&lock->wait_lock); | |
356 | /* sleep on the mutex */ | |
357 | set_current_state(TASK_INTERRUPTIBLE); | |
add46132 | 358 | ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter); |
531ae4b0 TG |
359 | /* |
360 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | |
361 | * have to fix that up. | |
362 | */ | |
363 | fixup_rt_mutex_waiters(lock); | |
364 | raw_spin_unlock_irq(&lock->wait_lock); | |
365 | ||
366 | return ret; | |
367 | } | |
368 | ||
369 | /** | |
370 | * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition | |
371 | * @lock: the rt_mutex we were woken on | |
372 | * @waiter: the pre-initialized rt_mutex_waiter | |
373 | * | |
374 | * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or | |
375 | * rt_mutex_wait_proxy_lock(). | |
376 | * | |
377 | * Unless we acquired the lock; we're still enqueued on the wait-list and can | |
378 | * in fact still be granted ownership until we're removed. Therefore we can | |
379 | * find we are in fact the owner and must disregard the | |
380 | * rt_mutex_wait_proxy_lock() failure. | |
381 | * | |
382 | * Returns: | |
383 | * true - did the cleanup, we done. | |
384 | * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned, | |
385 | * caller should disregards its return value. | |
386 | * | |
387 | * Special API call for PI-futex support | |
388 | */ | |
830e6acc | 389 | bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock, |
531ae4b0 TG |
390 | struct rt_mutex_waiter *waiter) |
391 | { | |
392 | bool cleanup = false; | |
393 | ||
394 | raw_spin_lock_irq(&lock->wait_lock); | |
395 | /* | |
396 | * Do an unconditional try-lock, this deals with the lock stealing | |
397 | * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter() | |
398 | * sets a NULL owner. | |
399 | * | |
400 | * We're not interested in the return value, because the subsequent | |
401 | * test on rt_mutex_owner() will infer that. If the trylock succeeded, | |
402 | * we will own the lock and it will have removed the waiter. If we | |
403 | * failed the trylock, we're still not owner and we need to remove | |
404 | * ourselves. | |
405 | */ | |
406 | try_to_take_rt_mutex(lock, current, waiter); | |
407 | /* | |
408 | * Unless we're the owner; we're still enqueued on the wait_list. | |
409 | * So check if we became owner, if not, take us off the wait_list. | |
410 | */ | |
411 | if (rt_mutex_owner(lock) != current) { | |
412 | remove_waiter(lock, waiter); | |
413 | cleanup = true; | |
414 | } | |
415 | /* | |
416 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | |
417 | * have to fix that up. | |
418 | */ | |
419 | fixup_rt_mutex_waiters(lock); | |
420 | ||
421 | raw_spin_unlock_irq(&lock->wait_lock); | |
422 | ||
423 | return cleanup; | |
424 | } | |
425 | ||
426 | /* | |
427 | * Recheck the pi chain, in case we got a priority setting | |
428 | * | |
429 | * Called from sched_setscheduler | |
430 | */ | |
431 | void __sched rt_mutex_adjust_pi(struct task_struct *task) | |
432 | { | |
433 | struct rt_mutex_waiter *waiter; | |
830e6acc | 434 | struct rt_mutex_base *next_lock; |
531ae4b0 TG |
435 | unsigned long flags; |
436 | ||
437 | raw_spin_lock_irqsave(&task->pi_lock, flags); | |
438 | ||
439 | waiter = task->pi_blocked_on; | |
440 | if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { | |
441 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | |
442 | return; | |
443 | } | |
444 | next_lock = waiter->lock; | |
445 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | |
446 | ||
447 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | |
448 | get_task_struct(task); | |
449 | ||
450 | rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, | |
451 | next_lock, NULL, task); | |
452 | } | |
453 | ||
454 | /* | |
455 | * Performs the wakeup of the top-waiter and re-enables preemption. | |
456 | */ | |
7980aa39 | 457 | void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh) |
531ae4b0 | 458 | { |
7980aa39 | 459 | rt_mutex_wake_up_q(wqh); |
531ae4b0 TG |
460 | } |
461 | ||
462 | #ifdef CONFIG_DEBUG_RT_MUTEXES | |
463 | void rt_mutex_debug_task_free(struct task_struct *task) | |
464 | { | |
465 | DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root)); | |
466 | DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); | |
467 | } | |
468 | #endif | |
bb630f9f TG |
469 | |
470 | #ifdef CONFIG_PREEMPT_RT | |
471 | /* Mutexes */ | |
472 | void __mutex_rt_init(struct mutex *mutex, const char *name, | |
473 | struct lock_class_key *key) | |
474 | { | |
475 | debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); | |
476 | lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP); | |
477 | } | |
478 | EXPORT_SYMBOL(__mutex_rt_init); | |
479 | ||
480 | static __always_inline int __mutex_lock_common(struct mutex *lock, | |
481 | unsigned int state, | |
482 | unsigned int subclass, | |
483 | struct lockdep_map *nest_lock, | |
484 | unsigned long ip) | |
485 | { | |
486 | int ret; | |
487 | ||
488 | might_sleep(); | |
489 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); | |
490 | ret = __rt_mutex_lock(&lock->rtmutex, state); | |
491 | if (ret) | |
492 | mutex_release(&lock->dep_map, ip); | |
493 | else | |
494 | lock_acquired(&lock->dep_map, ip); | |
495 | return ret; | |
496 | } | |
497 | ||
498 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
499 | void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass) | |
500 | { | |
501 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); | |
502 | } | |
503 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | |
504 | ||
505 | void __sched _mutex_lock_nest_lock(struct mutex *lock, | |
506 | struct lockdep_map *nest_lock) | |
507 | { | |
508 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_); | |
509 | } | |
510 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); | |
511 | ||
512 | int __sched mutex_lock_interruptible_nested(struct mutex *lock, | |
513 | unsigned int subclass) | |
514 | { | |
515 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); | |
516 | } | |
517 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | |
518 | ||
519 | int __sched mutex_lock_killable_nested(struct mutex *lock, | |
520 | unsigned int subclass) | |
521 | { | |
522 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); | |
523 | } | |
524 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | |
525 | ||
526 | void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) | |
527 | { | |
528 | int token; | |
529 | ||
530 | might_sleep(); | |
531 | ||
532 | token = io_schedule_prepare(); | |
533 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); | |
534 | io_schedule_finish(token); | |
535 | } | |
536 | EXPORT_SYMBOL_GPL(mutex_lock_io_nested); | |
537 | ||
538 | #else /* CONFIG_DEBUG_LOCK_ALLOC */ | |
539 | ||
540 | void __sched mutex_lock(struct mutex *lock) | |
541 | { | |
542 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); | |
543 | } | |
544 | EXPORT_SYMBOL(mutex_lock); | |
545 | ||
546 | int __sched mutex_lock_interruptible(struct mutex *lock) | |
547 | { | |
548 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); | |
549 | } | |
550 | EXPORT_SYMBOL(mutex_lock_interruptible); | |
551 | ||
552 | int __sched mutex_lock_killable(struct mutex *lock) | |
553 | { | |
554 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); | |
555 | } | |
556 | EXPORT_SYMBOL(mutex_lock_killable); | |
557 | ||
558 | void __sched mutex_lock_io(struct mutex *lock) | |
559 | { | |
560 | int token = io_schedule_prepare(); | |
561 | ||
562 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); | |
563 | io_schedule_finish(token); | |
564 | } | |
565 | EXPORT_SYMBOL(mutex_lock_io); | |
566 | #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ | |
567 | ||
568 | int __sched mutex_trylock(struct mutex *lock) | |
569 | { | |
570 | int ret; | |
571 | ||
572 | if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task())) | |
573 | return 0; | |
574 | ||
575 | ret = __rt_mutex_trylock(&lock->rtmutex); | |
576 | if (ret) | |
577 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | |
578 | ||
579 | return ret; | |
580 | } | |
581 | EXPORT_SYMBOL(mutex_trylock); | |
582 | ||
583 | void __sched mutex_unlock(struct mutex *lock) | |
584 | { | |
585 | mutex_release(&lock->dep_map, _RET_IP_); | |
586 | __rt_mutex_unlock(&lock->rtmutex); | |
587 | } | |
588 | EXPORT_SYMBOL(mutex_unlock); | |
589 | ||
590 | #endif /* CONFIG_PREEMPT_RT */ |