Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
23f78d4a IM |
2 | /* |
3 | * RT-Mutexes: simple blocking mutual exclusion locks with PI support | |
4 | * | |
5 | * started by Ingo Molnar and Thomas Gleixner. | |
6 | * | |
7 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
8 | * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | |
9 | * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt | |
10 | * Copyright (C) 2006 Esben Nielsen | |
992caf7f SR |
11 | * Adaptive Spinlocks: |
12 | * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, | |
13 | * and Peter Morreale, | |
14 | * Adaptive Spinlocks simplification: | |
15 | * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com> | |
d07fe82c | 16 | * |
387b1468 | 17 | * See Documentation/locking/rt-mutex-design.rst for details. |
23f78d4a | 18 | */ |
531ae4b0 TG |
19 | #include <linux/sched.h> |
20 | #include <linux/sched/debug.h> | |
21 | #include <linux/sched/deadline.h> | |
174cd4b1 | 22 | #include <linux/sched/signal.h> |
8bd75c77 | 23 | #include <linux/sched/rt.h> |
84f001e1 | 24 | #include <linux/sched/wake_q.h> |
add46132 | 25 | #include <linux/ww_mutex.h> |
23f78d4a | 26 | |
ee042be1 NK |
27 | #include <trace/events/lock.h> |
28 | ||
23f78d4a | 29 | #include "rtmutex_common.h" |
b76b44fb | 30 | #include "lock_events.h" |
23f78d4a | 31 | |
add46132 PZ |
32 | #ifndef WW_RT |
33 | # define build_ww_mutex() (false) | |
34 | # define ww_container_of(rtm) NULL | |
35 | ||
36 | static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter, | |
37 | struct rt_mutex *lock, | |
894d1b3d PZ |
38 | struct ww_acquire_ctx *ww_ctx, |
39 | struct wake_q_head *wake_q) | |
add46132 PZ |
40 | { |
41 | return 0; | |
42 | } | |
43 | ||
44 | static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, | |
894d1b3d PZ |
45 | struct ww_acquire_ctx *ww_ctx, |
46 | struct wake_q_head *wake_q) | |
add46132 PZ |
47 | { |
48 | } | |
49 | ||
50 | static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, | |
51 | struct ww_acquire_ctx *ww_ctx) | |
52 | { | |
53 | } | |
54 | ||
55 | static inline int __ww_mutex_check_kill(struct rt_mutex *lock, | |
56 | struct rt_mutex_waiter *waiter, | |
57 | struct ww_acquire_ctx *ww_ctx) | |
58 | { | |
59 | return 0; | |
60 | } | |
61 | ||
62 | #else | |
63 | # define build_ww_mutex() (true) | |
64 | # define ww_container_of(rtm) container_of(rtm, struct ww_mutex, base) | |
65 | # include "ww_mutex.h" | |
66 | #endif | |
67 | ||
23f78d4a IM |
68 | /* |
69 | * lock->owner state tracking: | |
70 | * | |
8161239a LJ |
71 | * lock->owner holds the task_struct pointer of the owner. Bit 0 |
72 | * is used to keep track of the "lock has waiters" state. | |
23f78d4a | 73 | * |
8161239a LJ |
74 | * owner bit0 |
75 | * NULL 0 lock is free (fast acquire possible) | |
76 | * NULL 1 lock is free and has waiters and the top waiter | |
77 | * is going to take the lock* | |
78 | * taskpointer 0 lock is held (fast release possible) | |
79 | * taskpointer 1 lock is held and has waiters** | |
23f78d4a IM |
80 | * |
81 | * The fast atomic compare exchange based acquire and release is only | |
8161239a LJ |
82 | * possible when bit 0 of lock->owner is 0. |
83 | * | |
84 | * (*) It also can be a transitional state when grabbing the lock | |
85 | * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, | |
86 | * we need to set the bit0 before looking at the lock, and the owner may be | |
87 | * NULL in this small time, hence this can be a transitional state. | |
23f78d4a | 88 | * |
8161239a LJ |
89 | * (**) There is a small time when bit 0 is set but there are no |
90 | * waiters. This can happen when grabbing the lock in the slow path. | |
91 | * To prevent a cmpxchg of the owner releasing the lock, we need to | |
92 | * set this bit before looking at the lock. | |
23f78d4a IM |
93 | */ |
94 | ||
1c0908d8 MG |
95 | static __always_inline struct task_struct * |
96 | rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner) | |
23f78d4a | 97 | { |
8161239a | 98 | unsigned long val = (unsigned long)owner; |
23f78d4a IM |
99 | |
100 | if (rt_mutex_has_waiters(lock)) | |
101 | val |= RT_MUTEX_HAS_WAITERS; | |
102 | ||
1c0908d8 MG |
103 | return (struct task_struct *)val; |
104 | } | |
105 | ||
106 | static __always_inline void | |
107 | rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) | |
108 | { | |
109 | /* | |
110 | * lock->wait_lock is held but explicit acquire semantics are needed | |
111 | * for a new lock owner so WRITE_ONCE is insufficient. | |
112 | */ | |
113 | xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); | |
114 | } | |
115 | ||
116 | static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock) | |
117 | { | |
118 | /* lock->wait_lock is held so the unlock provides release semantics. */ | |
119 | WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); | |
23f78d4a IM |
120 | } |
121 | ||
830e6acc | 122 | static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock) |
23f78d4a IM |
123 | { |
124 | lock->owner = (struct task_struct *) | |
125 | ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); | |
126 | } | |
127 | ||
1c0908d8 MG |
128 | static __always_inline void |
129 | fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock) | |
23f78d4a | 130 | { |
dbb26055 TG |
131 | unsigned long owner, *p = (unsigned long *) &lock->owner; |
132 | ||
133 | if (rt_mutex_has_waiters(lock)) | |
134 | return; | |
135 | ||
136 | /* | |
137 | * The rbtree has no waiters enqueued, now make sure that the | |
138 | * lock->owner still has the waiters bit set, otherwise the | |
139 | * following can happen: | |
140 | * | |
141 | * CPU 0 CPU 1 CPU2 | |
142 | * l->owner=T1 | |
143 | * rt_mutex_lock(l) | |
144 | * lock(l->lock) | |
145 | * l->owner = T1 | HAS_WAITERS; | |
146 | * enqueue(T2) | |
147 | * boost() | |
148 | * unlock(l->lock) | |
149 | * block() | |
150 | * | |
151 | * rt_mutex_lock(l) | |
152 | * lock(l->lock) | |
153 | * l->owner = T1 | HAS_WAITERS; | |
154 | * enqueue(T3) | |
155 | * boost() | |
156 | * unlock(l->lock) | |
157 | * block() | |
158 | * signal(->T2) signal(->T3) | |
159 | * lock(l->lock) | |
160 | * dequeue(T2) | |
161 | * deboost() | |
162 | * unlock(l->lock) | |
163 | * lock(l->lock) | |
164 | * dequeue(T3) | |
165 | * ==> wait list is empty | |
166 | * deboost() | |
167 | * unlock(l->lock) | |
168 | * lock(l->lock) | |
169 | * fixup_rt_mutex_waiters() | |
170 | * if (wait_list_empty(l) { | |
171 | * l->owner = owner | |
172 | * owner = l->owner & ~HAS_WAITERS; | |
173 | * ==> l->owner = T1 | |
174 | * } | |
175 | * lock(l->lock) | |
176 | * rt_mutex_unlock(l) fixup_rt_mutex_waiters() | |
177 | * if (wait_list_empty(l) { | |
178 | * owner = l->owner & ~HAS_WAITERS; | |
179 | * cmpxchg(l->owner, T1, NULL) | |
180 | * ===> Success (l->owner = NULL) | |
181 | * | |
182 | * l->owner = owner | |
183 | * ==> l->owner = T1 | |
184 | * } | |
185 | * | |
186 | * With the check for the waiter bit in place T3 on CPU2 will not | |
187 | * overwrite. All tasks fiddling with the waiters bit are | |
188 | * serialized by l->lock, so nothing else can modify the waiters | |
189 | * bit. If the bit is set then nothing can change l->owner either | |
190 | * so the simple RMW is safe. The cmpxchg() will simply fail if it | |
191 | * happens in the middle of the RMW because the waiters bit is | |
192 | * still set. | |
193 | */ | |
194 | owner = READ_ONCE(*p); | |
1c0908d8 MG |
195 | if (owner & RT_MUTEX_HAS_WAITERS) { |
196 | /* | |
197 | * See rt_mutex_set_owner() and rt_mutex_clear_owner() on | |
198 | * why xchg_acquire() is used for updating owner for | |
199 | * locking and WRITE_ONCE() for unlocking. | |
200 | * | |
201 | * WRITE_ONCE() would work for the acquire case too, but | |
202 | * in case that the lock acquisition failed it might | |
203 | * force other lockers into the slow path unnecessarily. | |
204 | */ | |
205 | if (acquire_lock) | |
206 | xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS); | |
207 | else | |
208 | WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); | |
209 | } | |
23f78d4a IM |
210 | } |
211 | ||
bd197234 | 212 | /* |
cede8841 SAS |
213 | * We can speed up the acquire/release, if there's no debugging state to be |
214 | * set up. | |
bd197234 | 215 | */ |
cede8841 | 216 | #ifndef CONFIG_DEBUG_RT_MUTEXES |
830e6acc | 217 | static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, |
78515930 SAS |
218 | struct task_struct *old, |
219 | struct task_struct *new) | |
220 | { | |
709e0b62 | 221 | return try_cmpxchg_acquire(&lock->owner, &old, new); |
78515930 SAS |
222 | } |
223 | ||
af9f0063 SAS |
224 | static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock) |
225 | { | |
226 | return rt_mutex_cmpxchg_acquire(lock, NULL, current); | |
227 | } | |
228 | ||
830e6acc | 229 | static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, |
78515930 SAS |
230 | struct task_struct *old, |
231 | struct task_struct *new) | |
232 | { | |
709e0b62 | 233 | return try_cmpxchg_release(&lock->owner, &old, new); |
78515930 | 234 | } |
700318d1 DB |
235 | |
236 | /* | |
237 | * Callers must hold the ->wait_lock -- which is the whole purpose as we force | |
238 | * all future threads that attempt to [Rmw] the lock to the slowpath. As such | |
239 | * relaxed semantics suffice. | |
240 | */ | |
830e6acc | 241 | static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) |
bd197234 | 242 | { |
ce3576eb UB |
243 | unsigned long *p = (unsigned long *) &lock->owner; |
244 | unsigned long owner, new; | |
bd197234 | 245 | |
ce3576eb | 246 | owner = READ_ONCE(*p); |
bd197234 | 247 | do { |
ce3576eb UB |
248 | new = owner | RT_MUTEX_HAS_WAITERS; |
249 | } while (!try_cmpxchg_relaxed(p, &owner, new)); | |
1c0908d8 MG |
250 | |
251 | /* | |
252 | * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE | |
253 | * operations in the event of contention. Ensure the successful | |
254 | * cmpxchg is visible. | |
255 | */ | |
256 | smp_mb__after_atomic(); | |
bd197234 | 257 | } |
27e35715 TG |
258 | |
259 | /* | |
260 | * Safe fastpath aware unlock: | |
261 | * 1) Clear the waiters bit | |
262 | * 2) Drop lock->wait_lock | |
263 | * 3) Try to unlock the lock with cmpxchg | |
264 | */ | |
830e6acc | 265 | static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, |
d7a2edb8 | 266 | unsigned long flags) |
27e35715 TG |
267 | __releases(lock->wait_lock) |
268 | { | |
269 | struct task_struct *owner = rt_mutex_owner(lock); | |
270 | ||
271 | clear_rt_mutex_waiters(lock); | |
b4abf910 | 272 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
27e35715 TG |
273 | /* |
274 | * If a new waiter comes in between the unlock and the cmpxchg | |
275 | * we have two situations: | |
276 | * | |
277 | * unlock(wait_lock); | |
278 | * lock(wait_lock); | |
279 | * cmpxchg(p, owner, 0) == owner | |
280 | * mark_rt_mutex_waiters(lock); | |
281 | * acquire(lock); | |
282 | * or: | |
283 | * | |
284 | * unlock(wait_lock); | |
285 | * lock(wait_lock); | |
286 | * mark_rt_mutex_waiters(lock); | |
287 | * | |
288 | * cmpxchg(p, owner, 0) != owner | |
289 | * enqueue_waiter(); | |
290 | * unlock(wait_lock); | |
291 | * lock(wait_lock); | |
292 | * wake waiter(); | |
293 | * unlock(wait_lock); | |
294 | * lock(wait_lock); | |
295 | * acquire(lock); | |
296 | */ | |
700318d1 | 297 | return rt_mutex_cmpxchg_release(lock, owner, NULL); |
27e35715 TG |
298 | } |
299 | ||
bd197234 | 300 | #else |
830e6acc | 301 | static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, |
78515930 SAS |
302 | struct task_struct *old, |
303 | struct task_struct *new) | |
304 | { | |
305 | return false; | |
306 | ||
307 | } | |
308 | ||
af9f0063 SAS |
309 | static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock); |
310 | ||
311 | static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock) | |
312 | { | |
313 | /* | |
314 | * With debug enabled rt_mutex_cmpxchg trylock() will always fail. | |
315 | * | |
316 | * Avoid unconditionally taking the slow path by using | |
317 | * rt_mutex_slow_trylock() which is covered by the debug code and can | |
318 | * acquire a non-contended rtmutex. | |
319 | */ | |
320 | return rt_mutex_slowtrylock(lock); | |
321 | } | |
322 | ||
830e6acc | 323 | static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, |
78515930 SAS |
324 | struct task_struct *old, |
325 | struct task_struct *new) | |
326 | { | |
327 | return false; | |
328 | } | |
700318d1 | 329 | |
830e6acc | 330 | static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) |
bd197234 TG |
331 | { |
332 | lock->owner = (struct task_struct *) | |
333 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | |
334 | } | |
27e35715 TG |
335 | |
336 | /* | |
337 | * Simple slow path only version: lock->owner is protected by lock->wait_lock. | |
338 | */ | |
830e6acc | 339 | static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, |
d7a2edb8 | 340 | unsigned long flags) |
27e35715 TG |
341 | __releases(lock->wait_lock) |
342 | { | |
343 | lock->owner = NULL; | |
b4abf910 | 344 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
27e35715 TG |
345 | return true; |
346 | } | |
bd197234 TG |
347 | #endif |
348 | ||
715f7f9e PZ |
349 | static __always_inline int __waiter_prio(struct task_struct *task) |
350 | { | |
351 | int prio = task->prio; | |
352 | ||
ae04f69d | 353 | if (!rt_or_dl_prio(prio)) |
715f7f9e PZ |
354 | return DEFAULT_PRIO; |
355 | ||
356 | return prio; | |
357 | } | |
358 | ||
f7853c34 PZ |
359 | /* |
360 | * Update the waiter->tree copy of the sort keys. | |
361 | */ | |
715f7f9e PZ |
362 | static __always_inline void |
363 | waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) | |
364 | { | |
f7853c34 PZ |
365 | lockdep_assert_held(&waiter->lock->wait_lock); |
366 | lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry)); | |
367 | ||
368 | waiter->tree.prio = __waiter_prio(task); | |
369 | waiter->tree.deadline = task->dl.deadline; | |
370 | } | |
371 | ||
372 | /* | |
373 | * Update the waiter->pi_tree copy of the sort keys (from the tree copy). | |
374 | */ | |
375 | static __always_inline void | |
376 | waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) | |
377 | { | |
378 | lockdep_assert_held(&waiter->lock->wait_lock); | |
379 | lockdep_assert_held(&task->pi_lock); | |
380 | lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry)); | |
381 | ||
382 | waiter->pi_tree.prio = waiter->tree.prio; | |
383 | waiter->pi_tree.deadline = waiter->tree.deadline; | |
715f7f9e PZ |
384 | } |
385 | ||
19830e55 | 386 | /* |
f7853c34 | 387 | * Only use with rt_waiter_node_{less,equal}() |
19830e55 | 388 | */ |
f7853c34 PZ |
389 | #define task_to_waiter_node(p) \ |
390 | &(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline } | |
19830e55 | 391 | #define task_to_waiter(p) \ |
f7853c34 | 392 | &(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) } |
19830e55 | 393 | |
f7853c34 PZ |
394 | static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left, |
395 | struct rt_waiter_node *right) | |
fb00aca4 | 396 | { |
2d3d891d | 397 | if (left->prio < right->prio) |
fb00aca4 PZ |
398 | return 1; |
399 | ||
400 | /* | |
2d3d891d DF |
401 | * If both waiters have dl_prio(), we check the deadlines of the |
402 | * associated tasks. | |
403 | * If left waiter has a dl_prio(), and we didn't return 1 above, | |
404 | * then right waiter has a dl_prio() too. | |
fb00aca4 | 405 | */ |
2d3d891d | 406 | if (dl_prio(left->prio)) |
e0aad5b4 | 407 | return dl_time_before(left->deadline, right->deadline); |
fb00aca4 PZ |
408 | |
409 | return 0; | |
410 | } | |
411 | ||
f7853c34 PZ |
412 | static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left, |
413 | struct rt_waiter_node *right) | |
19830e55 PZ |
414 | { |
415 | if (left->prio != right->prio) | |
416 | return 0; | |
417 | ||
418 | /* | |
419 | * If both waiters have dl_prio(), we check the deadlines of the | |
420 | * associated tasks. | |
421 | * If left waiter has a dl_prio(), and we didn't return 0 above, | |
422 | * then right waiter has a dl_prio() too. | |
423 | */ | |
424 | if (dl_prio(left->prio)) | |
425 | return left->deadline == right->deadline; | |
426 | ||
427 | return 1; | |
428 | } | |
429 | ||
48eb3f4f GH |
430 | static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, |
431 | struct rt_mutex_waiter *top_waiter) | |
432 | { | |
f7853c34 | 433 | if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree)) |
48eb3f4f GH |
434 | return true; |
435 | ||
436 | #ifdef RT_MUTEX_BUILD_SPINLOCKS | |
437 | /* | |
438 | * Note that RT tasks are excluded from same priority (lateral) | |
439 | * steals to prevent the introduction of an unbounded latency. | |
440 | */ | |
ae04f69d | 441 | if (rt_or_dl_prio(waiter->tree.prio)) |
48eb3f4f GH |
442 | return false; |
443 | ||
f7853c34 | 444 | return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree); |
48eb3f4f GH |
445 | #else |
446 | return false; | |
447 | #endif | |
448 | } | |
449 | ||
5a798725 | 450 | #define __node_2_waiter(node) \ |
f7853c34 | 451 | rb_entry((node), struct rt_mutex_waiter, tree.entry) |
5a798725 | 452 | |
d7a2edb8 | 453 | static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) |
5a798725 | 454 | { |
add46132 PZ |
455 | struct rt_mutex_waiter *aw = __node_2_waiter(a); |
456 | struct rt_mutex_waiter *bw = __node_2_waiter(b); | |
457 | ||
f7853c34 | 458 | if (rt_waiter_node_less(&aw->tree, &bw->tree)) |
add46132 PZ |
459 | return 1; |
460 | ||
461 | if (!build_ww_mutex()) | |
462 | return 0; | |
463 | ||
f7853c34 | 464 | if (rt_waiter_node_less(&bw->tree, &aw->tree)) |
add46132 PZ |
465 | return 0; |
466 | ||
467 | /* NOTE: relies on waiter->ww_ctx being set before insertion */ | |
468 | if (aw->ww_ctx) { | |
469 | if (!bw->ww_ctx) | |
470 | return 1; | |
471 | ||
472 | return (signed long)(aw->ww_ctx->stamp - | |
473 | bw->ww_ctx->stamp) < 0; | |
474 | } | |
475 | ||
476 | return 0; | |
5a798725 PZ |
477 | } |
478 | ||
d7a2edb8 | 479 | static __always_inline void |
830e6acc | 480 | rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) |
fb00aca4 | 481 | { |
f7853c34 PZ |
482 | lockdep_assert_held(&lock->wait_lock); |
483 | ||
484 | rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less); | |
fb00aca4 PZ |
485 | } |
486 | ||
d7a2edb8 | 487 | static __always_inline void |
830e6acc | 488 | rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) |
fb00aca4 | 489 | { |
f7853c34 PZ |
490 | lockdep_assert_held(&lock->wait_lock); |
491 | ||
492 | if (RB_EMPTY_NODE(&waiter->tree.entry)) | |
fb00aca4 PZ |
493 | return; |
494 | ||
f7853c34 PZ |
495 | rb_erase_cached(&waiter->tree.entry, &lock->waiters); |
496 | RB_CLEAR_NODE(&waiter->tree.entry); | |
fb00aca4 PZ |
497 | } |
498 | ||
f7853c34 PZ |
499 | #define __node_2_rt_node(node) \ |
500 | rb_entry((node), struct rt_waiter_node, entry) | |
5a798725 | 501 | |
f7853c34 | 502 | static __always_inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b) |
5a798725 | 503 | { |
f7853c34 | 504 | return rt_waiter_node_less(__node_2_rt_node(a), __node_2_rt_node(b)); |
5a798725 PZ |
505 | } |
506 | ||
d7a2edb8 | 507 | static __always_inline void |
fb00aca4 PZ |
508 | rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) |
509 | { | |
f7853c34 PZ |
510 | lockdep_assert_held(&task->pi_lock); |
511 | ||
512 | rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less); | |
fb00aca4 PZ |
513 | } |
514 | ||
d7a2edb8 | 515 | static __always_inline void |
fb00aca4 PZ |
516 | rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) |
517 | { | |
f7853c34 PZ |
518 | lockdep_assert_held(&task->pi_lock); |
519 | ||
520 | if (RB_EMPTY_NODE(&waiter->pi_tree.entry)) | |
fb00aca4 PZ |
521 | return; |
522 | ||
f7853c34 PZ |
523 | rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters); |
524 | RB_CLEAR_NODE(&waiter->pi_tree.entry); | |
fb00aca4 PZ |
525 | } |
526 | ||
f7853c34 PZ |
527 | static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock, |
528 | struct task_struct *p) | |
c365c292 | 529 | { |
acd58620 | 530 | struct task_struct *pi_task = NULL; |
e96a7705 | 531 | |
f7853c34 PZ |
532 | lockdep_assert_held(&lock->wait_lock); |
533 | lockdep_assert(rt_mutex_owner(lock) == p); | |
acd58620 | 534 | lockdep_assert_held(&p->pi_lock); |
c365c292 | 535 | |
acd58620 PZ |
536 | if (task_has_pi_waiters(p)) |
537 | pi_task = task_top_pi_waiter(p)->task; | |
c365c292 | 538 | |
acd58620 | 539 | rt_mutex_setprio(p, pi_task); |
23f78d4a IM |
540 | } |
541 | ||
b576e640 | 542 | /* RT mutex specific wake_q wrappers */ |
9321f815 TG |
543 | static __always_inline void rt_mutex_wake_q_add_task(struct rt_wake_q_head *wqh, |
544 | struct task_struct *task, | |
545 | unsigned int wake_state) | |
b576e640 | 546 | { |
9321f815 | 547 | if (IS_ENABLED(CONFIG_PREEMPT_RT) && wake_state == TASK_RTLOCK_WAIT) { |
456cfbc6 TG |
548 | if (IS_ENABLED(CONFIG_PROVE_LOCKING)) |
549 | WARN_ON_ONCE(wqh->rtlock_task); | |
9321f815 TG |
550 | get_task_struct(task); |
551 | wqh->rtlock_task = task; | |
456cfbc6 | 552 | } else { |
9321f815 | 553 | wake_q_add(&wqh->head, task); |
456cfbc6 | 554 | } |
b576e640 TG |
555 | } |
556 | ||
9321f815 TG |
557 | static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh, |
558 | struct rt_mutex_waiter *w) | |
559 | { | |
560 | rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state); | |
561 | } | |
562 | ||
b576e640 TG |
563 | static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh) |
564 | { | |
456cfbc6 TG |
565 | if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) { |
566 | wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT); | |
567 | put_task_struct(wqh->rtlock_task); | |
568 | wqh->rtlock_task = NULL; | |
569 | } | |
570 | ||
571 | if (!wake_q_empty(&wqh->head)) | |
572 | wake_up_q(&wqh->head); | |
b576e640 TG |
573 | |
574 | /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */ | |
575 | preempt_enable(); | |
576 | } | |
577 | ||
8930ed80 TG |
578 | /* |
579 | * Deadlock detection is conditional: | |
580 | * | |
581 | * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted | |
582 | * if the detect argument is == RT_MUTEX_FULL_CHAINWALK. | |
583 | * | |
584 | * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always | |
585 | * conducted independent of the detect argument. | |
586 | * | |
587 | * If the waiter argument is NULL this indicates the deboost path and | |
588 | * deadlock detection is disabled independent of the detect argument | |
589 | * and the config settings. | |
590 | */ | |
d7a2edb8 TG |
591 | static __always_inline bool |
592 | rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, | |
593 | enum rtmutex_chainwalk chwalk) | |
8930ed80 | 594 | { |
07d25971 | 595 | if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) |
f7efc479 TG |
596 | return waiter != NULL; |
597 | return chwalk == RT_MUTEX_FULL_CHAINWALK; | |
8930ed80 TG |
598 | } |
599 | ||
830e6acc | 600 | static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p) |
82084984 TG |
601 | { |
602 | return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; | |
603 | } | |
604 | ||
23f78d4a IM |
605 | /* |
606 | * Adjust the priority chain. Also used for deadlock detection. | |
607 | * Decreases task's usage by one - may thus free the task. | |
0c106173 | 608 | * |
82084984 TG |
609 | * @task: the task owning the mutex (owner) for which a chain walk is |
610 | * probably needed | |
e6beaa36 | 611 | * @chwalk: do we have to carry out deadlock detection? |
82084984 TG |
612 | * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck |
613 | * things for a task that has just got its priority adjusted, and | |
614 | * is waiting on a mutex) | |
615 | * @next_lock: the mutex on which the owner of @orig_lock was blocked before | |
616 | * we dropped its pi_lock. Is never dereferenced, only used for | |
617 | * comparison to detect lock chain changes. | |
0c106173 | 618 | * @orig_waiter: rt_mutex_waiter struct for the task that has just donated |
82084984 TG |
619 | * its priority to the mutex owner (can be NULL in the case |
620 | * depicted above or if the top waiter is gone away and we are | |
621 | * actually deboosting the owner) | |
622 | * @top_task: the current top waiter | |
0c106173 | 623 | * |
23f78d4a | 624 | * Returns 0 or -EDEADLK. |
3eb65aea TG |
625 | * |
626 | * Chain walk basics and protection scope | |
627 | * | |
628 | * [R] refcount on task | |
f7853c34 | 629 | * [Pn] task->pi_lock held |
3eb65aea TG |
630 | * [L] rtmutex->wait_lock held |
631 | * | |
f7853c34 PZ |
632 | * Normal locking order: |
633 | * | |
634 | * rtmutex->wait_lock | |
635 | * task->pi_lock | |
636 | * | |
3eb65aea TG |
637 | * Step Description Protected by |
638 | * function arguments: | |
639 | * @task [R] | |
640 | * @orig_lock if != NULL @top_task is blocked on it | |
641 | * @next_lock Unprotected. Cannot be | |
642 | * dereferenced. Only used for | |
643 | * comparison. | |
644 | * @orig_waiter if != NULL @top_task is blocked on it | |
645 | * @top_task current, or in case of proxy | |
646 | * locking protected by calling | |
647 | * code | |
648 | * again: | |
649 | * loop_sanity_check(); | |
650 | * retry: | |
f7853c34 PZ |
651 | * [1] lock(task->pi_lock); [R] acquire [P1] |
652 | * [2] waiter = task->pi_blocked_on; [P1] | |
653 | * [3] check_exit_conditions_1(); [P1] | |
654 | * [4] lock = waiter->lock; [P1] | |
655 | * [5] if (!try_lock(lock->wait_lock)) { [P1] try to acquire [L] | |
656 | * unlock(task->pi_lock); release [P1] | |
3eb65aea TG |
657 | * goto retry; |
658 | * } | |
f7853c34 PZ |
659 | * [6] check_exit_conditions_2(); [P1] + [L] |
660 | * [7] requeue_lock_waiter(lock, waiter); [P1] + [L] | |
661 | * [8] unlock(task->pi_lock); release [P1] | |
3eb65aea TG |
662 | * put_task_struct(task); release [R] |
663 | * [9] check_exit_conditions_3(); [L] | |
664 | * [10] task = owner(lock); [L] | |
665 | * get_task_struct(task); [L] acquire [R] | |
f7853c34 PZ |
666 | * lock(task->pi_lock); [L] acquire [P2] |
667 | * [11] requeue_pi_waiter(tsk, waiters(lock));[P2] + [L] | |
668 | * [12] check_exit_conditions_4(); [P2] + [L] | |
669 | * [13] unlock(task->pi_lock); release [P2] | |
3eb65aea TG |
670 | * unlock(lock->wait_lock); release [L] |
671 | * goto again; | |
f7853c34 PZ |
672 | * |
673 | * Where P1 is the blocking task and P2 is the lock owner; going up one step | |
674 | * the owner becomes the next blocked task etc.. | |
675 | * | |
676 | * | |
23f78d4a | 677 | */ |
d7a2edb8 TG |
678 | static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, |
679 | enum rtmutex_chainwalk chwalk, | |
830e6acc PZ |
680 | struct rt_mutex_base *orig_lock, |
681 | struct rt_mutex_base *next_lock, | |
d7a2edb8 TG |
682 | struct rt_mutex_waiter *orig_waiter, |
683 | struct task_struct *top_task) | |
23f78d4a | 684 | { |
23f78d4a | 685 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; |
a57594a1 | 686 | struct rt_mutex_waiter *prerequeue_top_waiter; |
8930ed80 | 687 | int ret = 0, depth = 0; |
830e6acc | 688 | struct rt_mutex_base *lock; |
8930ed80 | 689 | bool detect_deadlock; |
67792e2c | 690 | bool requeue = true; |
23f78d4a | 691 | |
8930ed80 | 692 | detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk); |
23f78d4a IM |
693 | |
694 | /* | |
695 | * The (de)boosting is a step by step approach with a lot of | |
696 | * pitfalls. We want this to be preemptible and we want hold a | |
697 | * maximum of two locks per step. So we have to check | |
698 | * carefully whether things change under us. | |
699 | */ | |
700 | again: | |
3eb65aea TG |
701 | /* |
702 | * We limit the lock chain length for each invocation. | |
703 | */ | |
23f78d4a IM |
704 | if (++depth > max_lock_depth) { |
705 | static int prev_max; | |
706 | ||
707 | /* | |
708 | * Print this only once. If the admin changes the limit, | |
709 | * print a new message when reaching the limit again. | |
710 | */ | |
711 | if (prev_max != max_lock_depth) { | |
712 | prev_max = max_lock_depth; | |
713 | printk(KERN_WARNING "Maximum lock depth %d reached " | |
714 | "task: %s (%d)\n", max_lock_depth, | |
ba25f9dc | 715 | top_task->comm, task_pid_nr(top_task)); |
23f78d4a IM |
716 | } |
717 | put_task_struct(task); | |
718 | ||
3d5c9340 | 719 | return -EDEADLK; |
23f78d4a | 720 | } |
3eb65aea TG |
721 | |
722 | /* | |
723 | * We are fully preemptible here and only hold the refcount on | |
724 | * @task. So everything can have changed under us since the | |
725 | * caller or our own code below (goto retry/again) dropped all | |
726 | * locks. | |
727 | */ | |
23f78d4a IM |
728 | retry: |
729 | /* | |
3eb65aea | 730 | * [1] Task cannot go away as we did a get_task() before ! |
23f78d4a | 731 | */ |
b4abf910 | 732 | raw_spin_lock_irq(&task->pi_lock); |
23f78d4a | 733 | |
3eb65aea TG |
734 | /* |
735 | * [2] Get the waiter on which @task is blocked on. | |
736 | */ | |
23f78d4a | 737 | waiter = task->pi_blocked_on; |
3eb65aea TG |
738 | |
739 | /* | |
740 | * [3] check_exit_conditions_1() protected by task->pi_lock. | |
741 | */ | |
742 | ||
23f78d4a IM |
743 | /* |
744 | * Check whether the end of the boosting chain has been | |
745 | * reached or the state of the chain has changed while we | |
746 | * dropped the locks. | |
747 | */ | |
8161239a | 748 | if (!waiter) |
23f78d4a IM |
749 | goto out_unlock_pi; |
750 | ||
1a539a87 TG |
751 | /* |
752 | * Check the orig_waiter state. After we dropped the locks, | |
8161239a | 753 | * the previous owner of the lock might have released the lock. |
1a539a87 | 754 | */ |
8161239a | 755 | if (orig_waiter && !rt_mutex_owner(orig_lock)) |
1a539a87 TG |
756 | goto out_unlock_pi; |
757 | ||
82084984 TG |
758 | /* |
759 | * We dropped all locks after taking a refcount on @task, so | |
760 | * the task might have moved on in the lock chain or even left | |
761 | * the chain completely and blocks now on an unrelated lock or | |
762 | * on @orig_lock. | |
763 | * | |
764 | * We stored the lock on which @task was blocked in @next_lock, | |
765 | * so we can detect the chain change. | |
766 | */ | |
767 | if (next_lock != waiter->lock) | |
768 | goto out_unlock_pi; | |
769 | ||
6467822b PZ |
770 | /* |
771 | * There could be 'spurious' loops in the lock graph due to ww_mutex, | |
772 | * consider: | |
773 | * | |
774 | * P1: A, ww_A, ww_B | |
775 | * P2: ww_B, ww_A | |
776 | * P3: A | |
777 | * | |
778 | * P3 should not return -EDEADLK because it gets trapped in the cycle | |
779 | * created by P1 and P2 (which will resolve -- and runs into | |
780 | * max_lock_depth above). Therefore disable detect_deadlock such that | |
781 | * the below termination condition can trigger once all relevant tasks | |
782 | * are boosted. | |
783 | * | |
784 | * Even when we start with ww_mutex we can disable deadlock detection, | |
785 | * since we would supress a ww_mutex induced deadlock at [6] anyway. | |
786 | * Supressing it here however is not sufficient since we might still | |
787 | * hit [6] due to adjustment driven iteration. | |
788 | * | |
789 | * NOTE: if someone were to create a deadlock between 2 ww_classes we'd | |
790 | * utterly fail to report it; lockdep should. | |
791 | */ | |
792 | if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock) | |
793 | detect_deadlock = false; | |
794 | ||
1a539a87 TG |
795 | /* |
796 | * Drop out, when the task has no waiters. Note, | |
797 | * top_waiter can be NULL, when we are in the deboosting | |
798 | * mode! | |
799 | */ | |
397335f0 TG |
800 | if (top_waiter) { |
801 | if (!task_has_pi_waiters(task)) | |
802 | goto out_unlock_pi; | |
803 | /* | |
804 | * If deadlock detection is off, we stop here if we | |
67792e2c TG |
805 | * are not the top pi waiter of the task. If deadlock |
806 | * detection is enabled we continue, but stop the | |
807 | * requeueing in the chain walk. | |
397335f0 | 808 | */ |
67792e2c TG |
809 | if (top_waiter != task_top_pi_waiter(task)) { |
810 | if (!detect_deadlock) | |
811 | goto out_unlock_pi; | |
812 | else | |
813 | requeue = false; | |
814 | } | |
397335f0 | 815 | } |
23f78d4a IM |
816 | |
817 | /* | |
67792e2c TG |
818 | * If the waiter priority is the same as the task priority |
819 | * then there is no further priority adjustment necessary. If | |
820 | * deadlock detection is off, we stop the chain walk. If its | |
821 | * enabled we continue, but stop the requeueing in the chain | |
822 | * walk. | |
23f78d4a | 823 | */ |
f7853c34 | 824 | if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) { |
67792e2c TG |
825 | if (!detect_deadlock) |
826 | goto out_unlock_pi; | |
827 | else | |
828 | requeue = false; | |
829 | } | |
23f78d4a | 830 | |
3eb65aea | 831 | /* |
f7853c34 PZ |
832 | * [4] Get the next lock; per holding task->pi_lock we can't unblock |
833 | * and guarantee @lock's existence. | |
3eb65aea | 834 | */ |
23f78d4a | 835 | lock = waiter->lock; |
3eb65aea TG |
836 | /* |
837 | * [5] We need to trylock here as we are holding task->pi_lock, | |
838 | * which is the reverse lock order versus the other rtmutex | |
839 | * operations. | |
f7853c34 PZ |
840 | * |
841 | * Per the above, holding task->pi_lock guarantees lock exists, so | |
842 | * inverting this lock order is infeasible from a life-time | |
843 | * perspective. | |
3eb65aea | 844 | */ |
d209d74d | 845 | if (!raw_spin_trylock(&lock->wait_lock)) { |
b4abf910 | 846 | raw_spin_unlock_irq(&task->pi_lock); |
23f78d4a IM |
847 | cpu_relax(); |
848 | goto retry; | |
849 | } | |
850 | ||
397335f0 | 851 | /* |
3eb65aea TG |
852 | * [6] check_exit_conditions_2() protected by task->pi_lock and |
853 | * lock->wait_lock. | |
854 | * | |
397335f0 TG |
855 | * Deadlock detection. If the lock is the same as the original |
856 | * lock which caused us to walk the lock chain or if the | |
857 | * current lock is owned by the task which initiated the chain | |
858 | * walk, we detected a deadlock. | |
859 | */ | |
95e02ca9 | 860 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
3d5c9340 | 861 | ret = -EDEADLK; |
a055fcc1 PZ |
862 | |
863 | /* | |
864 | * When the deadlock is due to ww_mutex; also see above. Don't | |
865 | * report the deadlock and instead let the ww_mutex wound/die | |
866 | * logic pick which of the contending threads gets -EDEADLK. | |
867 | * | |
868 | * NOTE: assumes the cycle only contains a single ww_class; any | |
869 | * other configuration and we fail to report; also, see | |
870 | * lockdep. | |
871 | */ | |
e5480572 | 872 | if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx) |
a055fcc1 PZ |
873 | ret = 0; |
874 | ||
875 | raw_spin_unlock(&lock->wait_lock); | |
23f78d4a IM |
876 | goto out_unlock_pi; |
877 | } | |
878 | ||
67792e2c TG |
879 | /* |
880 | * If we just follow the lock chain for deadlock detection, no | |
881 | * need to do all the requeue operations. To avoid a truckload | |
882 | * of conditionals around the various places below, just do the | |
883 | * minimum chain walk checks. | |
884 | */ | |
885 | if (!requeue) { | |
886 | /* | |
887 | * No requeue[7] here. Just release @task [8] | |
888 | */ | |
b4abf910 | 889 | raw_spin_unlock(&task->pi_lock); |
67792e2c TG |
890 | put_task_struct(task); |
891 | ||
892 | /* | |
893 | * [9] check_exit_conditions_3 protected by lock->wait_lock. | |
894 | * If there is no owner of the lock, end of chain. | |
895 | */ | |
896 | if (!rt_mutex_owner(lock)) { | |
b4abf910 | 897 | raw_spin_unlock_irq(&lock->wait_lock); |
67792e2c TG |
898 | return 0; |
899 | } | |
900 | ||
901 | /* [10] Grab the next task, i.e. owner of @lock */ | |
7b3c92b8 | 902 | task = get_task_struct(rt_mutex_owner(lock)); |
b4abf910 | 903 | raw_spin_lock(&task->pi_lock); |
67792e2c TG |
904 | |
905 | /* | |
906 | * No requeue [11] here. We just do deadlock detection. | |
907 | * | |
908 | * [12] Store whether owner is blocked | |
909 | * itself. Decision is made after dropping the locks | |
910 | */ | |
911 | next_lock = task_blocked_on_lock(task); | |
912 | /* | |
913 | * Get the top waiter for the next iteration | |
914 | */ | |
915 | top_waiter = rt_mutex_top_waiter(lock); | |
916 | ||
917 | /* [13] Drop locks */ | |
b4abf910 TG |
918 | raw_spin_unlock(&task->pi_lock); |
919 | raw_spin_unlock_irq(&lock->wait_lock); | |
67792e2c TG |
920 | |
921 | /* If owner is not blocked, end of chain. */ | |
922 | if (!next_lock) | |
923 | goto out_put_task; | |
924 | goto again; | |
925 | } | |
926 | ||
a57594a1 TG |
927 | /* |
928 | * Store the current top waiter before doing the requeue | |
929 | * operation on @lock. We need it for the boost/deboost | |
930 | * decision below. | |
931 | */ | |
932 | prerequeue_top_waiter = rt_mutex_top_waiter(lock); | |
23f78d4a | 933 | |
9f40a51a | 934 | /* [7] Requeue the waiter in the lock waiter tree. */ |
fb00aca4 | 935 | rt_mutex_dequeue(lock, waiter); |
e0aad5b4 PZ |
936 | |
937 | /* | |
938 | * Update the waiter prio fields now that we're dequeued. | |
939 | * | |
940 | * These values can have changed through either: | |
941 | * | |
942 | * sys_sched_set_scheduler() / sys_sched_setattr() | |
943 | * | |
944 | * or | |
945 | * | |
946 | * DL CBS enforcement advancing the effective deadline. | |
e0aad5b4 | 947 | */ |
715f7f9e | 948 | waiter_update_prio(waiter, task); |
e0aad5b4 | 949 | |
fb00aca4 | 950 | rt_mutex_enqueue(lock, waiter); |
23f78d4a | 951 | |
f7853c34 PZ |
952 | /* |
953 | * [8] Release the (blocking) task in preparation for | |
954 | * taking the owner task in [10]. | |
955 | * | |
956 | * Since we hold lock->waiter_lock, task cannot unblock, even if we | |
957 | * release task->pi_lock. | |
958 | */ | |
b4abf910 | 959 | raw_spin_unlock(&task->pi_lock); |
2ffa5a5c TG |
960 | put_task_struct(task); |
961 | ||
a57594a1 | 962 | /* |
3eb65aea TG |
963 | * [9] check_exit_conditions_3 protected by lock->wait_lock. |
964 | * | |
a57594a1 TG |
965 | * We must abort the chain walk if there is no lock owner even |
966 | * in the dead lock detection case, as we have nothing to | |
967 | * follow here. This is the end of the chain we are walking. | |
968 | */ | |
8161239a LJ |
969 | if (!rt_mutex_owner(lock)) { |
970 | /* | |
3eb65aea TG |
971 | * If the requeue [7] above changed the top waiter, |
972 | * then we need to wake the new top waiter up to try | |
973 | * to get the lock. | |
8161239a | 974 | */ |
db370a8b WLC |
975 | top_waiter = rt_mutex_top_waiter(lock); |
976 | if (prerequeue_top_waiter != top_waiter) | |
977 | wake_up_state(top_waiter->task, top_waiter->wake_state); | |
b4abf910 | 978 | raw_spin_unlock_irq(&lock->wait_lock); |
2ffa5a5c | 979 | return 0; |
8161239a | 980 | } |
23f78d4a | 981 | |
f7853c34 PZ |
982 | /* |
983 | * [10] Grab the next task, i.e. the owner of @lock | |
984 | * | |
985 | * Per holding lock->wait_lock and checking for !owner above, there | |
986 | * must be an owner and it cannot go away. | |
987 | */ | |
7b3c92b8 | 988 | task = get_task_struct(rt_mutex_owner(lock)); |
b4abf910 | 989 | raw_spin_lock(&task->pi_lock); |
23f78d4a | 990 | |
3eb65aea | 991 | /* [11] requeue the pi waiters if necessary */ |
23f78d4a | 992 | if (waiter == rt_mutex_top_waiter(lock)) { |
a57594a1 TG |
993 | /* |
994 | * The waiter became the new top (highest priority) | |
995 | * waiter on the lock. Replace the previous top waiter | |
9f40a51a | 996 | * in the owner tasks pi waiters tree with this waiter |
a57594a1 TG |
997 | * and adjust the priority of the owner. |
998 | */ | |
999 | rt_mutex_dequeue_pi(task, prerequeue_top_waiter); | |
f7853c34 | 1000 | waiter_clone_prio(waiter, task); |
fb00aca4 | 1001 | rt_mutex_enqueue_pi(task, waiter); |
f7853c34 | 1002 | rt_mutex_adjust_prio(lock, task); |
23f78d4a | 1003 | |
a57594a1 TG |
1004 | } else if (prerequeue_top_waiter == waiter) { |
1005 | /* | |
1006 | * The waiter was the top waiter on the lock, but is | |
e2db7592 | 1007 | * no longer the top priority waiter. Replace waiter in |
9f40a51a | 1008 | * the owner tasks pi waiters tree with the new top |
a57594a1 TG |
1009 | * (highest priority) waiter and adjust the priority |
1010 | * of the owner. | |
1011 | * The new top waiter is stored in @waiter so that | |
1012 | * @waiter == @top_waiter evaluates to true below and | |
1013 | * we continue to deboost the rest of the chain. | |
1014 | */ | |
fb00aca4 | 1015 | rt_mutex_dequeue_pi(task, waiter); |
23f78d4a | 1016 | waiter = rt_mutex_top_waiter(lock); |
f7853c34 | 1017 | waiter_clone_prio(waiter, task); |
fb00aca4 | 1018 | rt_mutex_enqueue_pi(task, waiter); |
f7853c34 | 1019 | rt_mutex_adjust_prio(lock, task); |
a57594a1 TG |
1020 | } else { |
1021 | /* | |
1022 | * Nothing changed. No need to do any priority | |
1023 | * adjustment. | |
1024 | */ | |
23f78d4a IM |
1025 | } |
1026 | ||
82084984 | 1027 | /* |
3eb65aea TG |
1028 | * [12] check_exit_conditions_4() protected by task->pi_lock |
1029 | * and lock->wait_lock. The actual decisions are made after we | |
1030 | * dropped the locks. | |
1031 | * | |
82084984 TG |
1032 | * Check whether the task which owns the current lock is pi |
1033 | * blocked itself. If yes we store a pointer to the lock for | |
1034 | * the lock chain change detection above. After we dropped | |
1035 | * task->pi_lock next_lock cannot be dereferenced anymore. | |
1036 | */ | |
1037 | next_lock = task_blocked_on_lock(task); | |
a57594a1 TG |
1038 | /* |
1039 | * Store the top waiter of @lock for the end of chain walk | |
1040 | * decision below. | |
1041 | */ | |
23f78d4a | 1042 | top_waiter = rt_mutex_top_waiter(lock); |
3eb65aea TG |
1043 | |
1044 | /* [13] Drop the locks */ | |
b4abf910 TG |
1045 | raw_spin_unlock(&task->pi_lock); |
1046 | raw_spin_unlock_irq(&lock->wait_lock); | |
23f78d4a | 1047 | |
82084984 | 1048 | /* |
3eb65aea TG |
1049 | * Make the actual exit decisions [12], based on the stored |
1050 | * values. | |
1051 | * | |
82084984 TG |
1052 | * We reached the end of the lock chain. Stop right here. No |
1053 | * point to go back just to figure that out. | |
1054 | */ | |
1055 | if (!next_lock) | |
1056 | goto out_put_task; | |
1057 | ||
a57594a1 TG |
1058 | /* |
1059 | * If the current waiter is not the top waiter on the lock, | |
1060 | * then we can stop the chain walk here if we are not in full | |
1061 | * deadlock detection mode. | |
1062 | */ | |
23f78d4a IM |
1063 | if (!detect_deadlock && waiter != top_waiter) |
1064 | goto out_put_task; | |
1065 | ||
1066 | goto again; | |
1067 | ||
1068 | out_unlock_pi: | |
b4abf910 | 1069 | raw_spin_unlock_irq(&task->pi_lock); |
23f78d4a IM |
1070 | out_put_task: |
1071 | put_task_struct(task); | |
36c8b586 | 1072 | |
23f78d4a IM |
1073 | return ret; |
1074 | } | |
1075 | ||
23f78d4a IM |
1076 | /* |
1077 | * Try to take an rt-mutex | |
1078 | * | |
b4abf910 | 1079 | * Must be called with lock->wait_lock held and interrupts disabled |
8161239a | 1080 | * |
358c331f TG |
1081 | * @lock: The lock to be acquired. |
1082 | * @task: The task which wants to acquire the lock | |
9f40a51a | 1083 | * @waiter: The waiter that is queued to the lock's wait tree if the |
358c331f | 1084 | * callsite called task_blocked_on_lock(), otherwise NULL |
23f78d4a | 1085 | */ |
d7a2edb8 | 1086 | static int __sched |
830e6acc | 1087 | try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, |
d7a2edb8 | 1088 | struct rt_mutex_waiter *waiter) |
23f78d4a | 1089 | { |
e0aad5b4 PZ |
1090 | lockdep_assert_held(&lock->wait_lock); |
1091 | ||
23f78d4a | 1092 | /* |
358c331f TG |
1093 | * Before testing whether we can acquire @lock, we set the |
1094 | * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all | |
1095 | * other tasks which try to modify @lock into the slow path | |
1096 | * and they serialize on @lock->wait_lock. | |
23f78d4a | 1097 | * |
358c331f TG |
1098 | * The RT_MUTEX_HAS_WAITERS bit can have a transitional state |
1099 | * as explained at the top of this file if and only if: | |
23f78d4a | 1100 | * |
358c331f TG |
1101 | * - There is a lock owner. The caller must fixup the |
1102 | * transient state if it does a trylock or leaves the lock | |
1103 | * function due to a signal or timeout. | |
1104 | * | |
1105 | * - @task acquires the lock and there are no other | |
1106 | * waiters. This is undone in rt_mutex_set_owner(@task) at | |
1107 | * the end of this function. | |
23f78d4a IM |
1108 | */ |
1109 | mark_rt_mutex_waiters(lock); | |
1110 | ||
358c331f TG |
1111 | /* |
1112 | * If @lock has an owner, give up. | |
1113 | */ | |
8161239a | 1114 | if (rt_mutex_owner(lock)) |
23f78d4a IM |
1115 | return 0; |
1116 | ||
8161239a | 1117 | /* |
358c331f | 1118 | * If @waiter != NULL, @task has already enqueued the waiter |
9f40a51a | 1119 | * into @lock waiter tree. If @waiter == NULL then this is a |
358c331f | 1120 | * trylock attempt. |
8161239a | 1121 | */ |
358c331f | 1122 | if (waiter) { |
48eb3f4f | 1123 | struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); |
8161239a | 1124 | |
358c331f | 1125 | /* |
48eb3f4f GH |
1126 | * If waiter is the highest priority waiter of @lock, |
1127 | * or allowed to steal it, take it over. | |
358c331f | 1128 | */ |
48eb3f4f GH |
1129 | if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) { |
1130 | /* | |
1131 | * We can acquire the lock. Remove the waiter from the | |
1132 | * lock waiters tree. | |
1133 | */ | |
1134 | rt_mutex_dequeue(lock, waiter); | |
1135 | } else { | |
1136 | return 0; | |
1137 | } | |
358c331f | 1138 | } else { |
8161239a | 1139 | /* |
358c331f TG |
1140 | * If the lock has waiters already we check whether @task is |
1141 | * eligible to take over the lock. | |
1142 | * | |
1143 | * If there are no other waiters, @task can acquire | |
1144 | * the lock. @task->pi_blocked_on is NULL, so it does | |
1145 | * not need to be dequeued. | |
8161239a LJ |
1146 | */ |
1147 | if (rt_mutex_has_waiters(lock)) { | |
48eb3f4f GH |
1148 | /* Check whether the trylock can steal it. */ |
1149 | if (!rt_mutex_steal(task_to_waiter(task), | |
1150 | rt_mutex_top_waiter(lock))) | |
358c331f TG |
1151 | return 0; |
1152 | ||
1153 | /* | |
1154 | * The current top waiter stays enqueued. We | |
1155 | * don't have to change anything in the lock | |
1156 | * waiters order. | |
1157 | */ | |
1158 | } else { | |
1159 | /* | |
1160 | * No waiters. Take the lock without the | |
1161 | * pi_lock dance.@task->pi_blocked_on is NULL | |
1162 | * and we have no waiters to enqueue in @task | |
9f40a51a | 1163 | * pi waiters tree. |
358c331f TG |
1164 | */ |
1165 | goto takeit; | |
8161239a | 1166 | } |
8161239a LJ |
1167 | } |
1168 | ||
358c331f TG |
1169 | /* |
1170 | * Clear @task->pi_blocked_on. Requires protection by | |
1171 | * @task->pi_lock. Redundant operation for the @waiter == NULL | |
1172 | * case, but conditionals are more expensive than a redundant | |
1173 | * store. | |
1174 | */ | |
b4abf910 | 1175 | raw_spin_lock(&task->pi_lock); |
358c331f TG |
1176 | task->pi_blocked_on = NULL; |
1177 | /* | |
1178 | * Finish the lock acquisition. @task is the new owner. If | |
1179 | * other waiters exist we have to insert the highest priority | |
9f40a51a | 1180 | * waiter into @task->pi_waiters tree. |
358c331f TG |
1181 | */ |
1182 | if (rt_mutex_has_waiters(lock)) | |
1183 | rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); | |
b4abf910 | 1184 | raw_spin_unlock(&task->pi_lock); |
358c331f TG |
1185 | |
1186 | takeit: | |
358c331f TG |
1187 | /* |
1188 | * This either preserves the RT_MUTEX_HAS_WAITERS bit if there | |
1189 | * are still waiters or clears it. | |
1190 | */ | |
8161239a | 1191 | rt_mutex_set_owner(lock, task); |
23f78d4a | 1192 | |
23f78d4a IM |
1193 | return 1; |
1194 | } | |
1195 | ||
1196 | /* | |
1197 | * Task blocks on lock. | |
1198 | * | |
1199 | * Prepare waiter and propagate pi chain | |
1200 | * | |
b4abf910 | 1201 | * This must be called with lock->wait_lock held and interrupts disabled |
23f78d4a | 1202 | */ |
830e6acc | 1203 | static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, |
d7a2edb8 TG |
1204 | struct rt_mutex_waiter *waiter, |
1205 | struct task_struct *task, | |
add46132 | 1206 | struct ww_acquire_ctx *ww_ctx, |
894d1b3d PZ |
1207 | enum rtmutex_chainwalk chwalk, |
1208 | struct wake_q_head *wake_q) | |
23f78d4a | 1209 | { |
36c8b586 | 1210 | struct task_struct *owner = rt_mutex_owner(lock); |
23f78d4a | 1211 | struct rt_mutex_waiter *top_waiter = waiter; |
830e6acc | 1212 | struct rt_mutex_base *next_lock; |
db630637 | 1213 | int chain_walk = 0, res; |
23f78d4a | 1214 | |
e0aad5b4 PZ |
1215 | lockdep_assert_held(&lock->wait_lock); |
1216 | ||
397335f0 TG |
1217 | /* |
1218 | * Early deadlock detection. We really don't want the task to | |
1219 | * enqueue on itself just to untangle the mess later. It's not | |
1220 | * only an optimization. We drop the locks, so another waiter | |
1221 | * can come in before the chain walk detects the deadlock. So | |
1222 | * the other will detect the deadlock and return -EDEADLOCK, | |
1223 | * which is wrong, as the other waiter is not in a deadlock | |
1224 | * situation. | |
02ea9fc9 PZ |
1225 | * |
1226 | * Except for ww_mutex, in that case the chain walk must already deal | |
1227 | * with spurious cycles, see the comments at [3] and [6]. | |
397335f0 | 1228 | */ |
02ea9fc9 | 1229 | if (owner == task && !(build_ww_mutex() && ww_ctx)) |
397335f0 TG |
1230 | return -EDEADLK; |
1231 | ||
b4abf910 | 1232 | raw_spin_lock(&task->pi_lock); |
8dac456a | 1233 | waiter->task = task; |
23f78d4a | 1234 | waiter->lock = lock; |
715f7f9e | 1235 | waiter_update_prio(waiter, task); |
f7853c34 | 1236 | waiter_clone_prio(waiter, task); |
23f78d4a IM |
1237 | |
1238 | /* Get the top priority waiter on the lock */ | |
1239 | if (rt_mutex_has_waiters(lock)) | |
1240 | top_waiter = rt_mutex_top_waiter(lock); | |
fb00aca4 | 1241 | rt_mutex_enqueue(lock, waiter); |
23f78d4a | 1242 | |
8dac456a | 1243 | task->pi_blocked_on = waiter; |
23f78d4a | 1244 | |
b4abf910 | 1245 | raw_spin_unlock(&task->pi_lock); |
23f78d4a | 1246 | |
add46132 PZ |
1247 | if (build_ww_mutex() && ww_ctx) { |
1248 | struct rt_mutex *rtm; | |
1249 | ||
1250 | /* Check whether the waiter should back out immediately */ | |
1251 | rtm = container_of(lock, struct rt_mutex, rtmutex); | |
894d1b3d | 1252 | res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx, wake_q); |
37e8abff TG |
1253 | if (res) { |
1254 | raw_spin_lock(&task->pi_lock); | |
1255 | rt_mutex_dequeue(lock, waiter); | |
1256 | task->pi_blocked_on = NULL; | |
1257 | raw_spin_unlock(&task->pi_lock); | |
add46132 | 1258 | return res; |
37e8abff | 1259 | } |
add46132 PZ |
1260 | } |
1261 | ||
8161239a LJ |
1262 | if (!owner) |
1263 | return 0; | |
1264 | ||
b4abf910 | 1265 | raw_spin_lock(&owner->pi_lock); |
23f78d4a | 1266 | if (waiter == rt_mutex_top_waiter(lock)) { |
fb00aca4 PZ |
1267 | rt_mutex_dequeue_pi(owner, top_waiter); |
1268 | rt_mutex_enqueue_pi(owner, waiter); | |
23f78d4a | 1269 | |
f7853c34 | 1270 | rt_mutex_adjust_prio(lock, owner); |
db630637 SR |
1271 | if (owner->pi_blocked_on) |
1272 | chain_walk = 1; | |
8930ed80 | 1273 | } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { |
db630637 | 1274 | chain_walk = 1; |
82084984 | 1275 | } |
db630637 | 1276 | |
82084984 TG |
1277 | /* Store the lock on which owner is blocked or NULL */ |
1278 | next_lock = task_blocked_on_lock(owner); | |
1279 | ||
b4abf910 | 1280 | raw_spin_unlock(&owner->pi_lock); |
82084984 TG |
1281 | /* |
1282 | * Even if full deadlock detection is on, if the owner is not | |
1283 | * blocked itself, we can avoid finding this out in the chain | |
1284 | * walk. | |
1285 | */ | |
1286 | if (!chain_walk || !next_lock) | |
23f78d4a IM |
1287 | return 0; |
1288 | ||
db630637 SR |
1289 | /* |
1290 | * The owner can't disappear while holding a lock, | |
1291 | * so the owner struct is protected by wait_lock. | |
1292 | * Gets dropped in rt_mutex_adjust_prio_chain()! | |
1293 | */ | |
1294 | get_task_struct(owner); | |
1295 | ||
abfdccd6 | 1296 | raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); |
23f78d4a | 1297 | |
8930ed80 | 1298 | res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, |
82084984 | 1299 | next_lock, waiter, task); |
23f78d4a | 1300 | |
b4abf910 | 1301 | raw_spin_lock_irq(&lock->wait_lock); |
23f78d4a IM |
1302 | |
1303 | return res; | |
1304 | } | |
1305 | ||
1306 | /* | |
9f40a51a | 1307 | * Remove the top waiter from the current tasks pi waiter tree and |
45ab4eff | 1308 | * queue it up. |
23f78d4a | 1309 | * |
b4abf910 | 1310 | * Called with lock->wait_lock held and interrupts disabled. |
23f78d4a | 1311 | */ |
7980aa39 | 1312 | static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh, |
830e6acc | 1313 | struct rt_mutex_base *lock) |
23f78d4a IM |
1314 | { |
1315 | struct rt_mutex_waiter *waiter; | |
23f78d4a | 1316 | |
f7853c34 PZ |
1317 | lockdep_assert_held(&lock->wait_lock); |
1318 | ||
b4abf910 | 1319 | raw_spin_lock(¤t->pi_lock); |
23f78d4a IM |
1320 | |
1321 | waiter = rt_mutex_top_waiter(lock); | |
23f78d4a IM |
1322 | |
1323 | /* | |
acd58620 PZ |
1324 | * Remove it from current->pi_waiters and deboost. |
1325 | * | |
1326 | * We must in fact deboost here in order to ensure we call | |
1327 | * rt_mutex_setprio() to update p->pi_top_task before the | |
1328 | * task unblocks. | |
23f78d4a | 1329 | */ |
fb00aca4 | 1330 | rt_mutex_dequeue_pi(current, waiter); |
f7853c34 | 1331 | rt_mutex_adjust_prio(lock, current); |
23f78d4a | 1332 | |
27e35715 TG |
1333 | /* |
1334 | * As we are waking up the top waiter, and the waiter stays | |
1335 | * queued on the lock until it gets the lock, this lock | |
1336 | * obviously has waiters. Just set the bit here and this has | |
1337 | * the added benefit of forcing all new tasks into the | |
1338 | * slow path making sure no task of lower priority than | |
1339 | * the top waiter can steal this lock. | |
1340 | */ | |
1341 | lock->owner = (void *) RT_MUTEX_HAS_WAITERS; | |
23f78d4a | 1342 | |
acd58620 PZ |
1343 | /* |
1344 | * We deboosted before waking the top waiter task such that we don't | |
1345 | * run two tasks with the 'same' priority (and ensure the | |
1346 | * p->pi_top_task pointer points to a blocked task). This however can | |
1347 | * lead to priority inversion if we would get preempted after the | |
1348 | * deboost but before waking our donor task, hence the preempt_disable() | |
1349 | * before unlock. | |
1350 | * | |
7980aa39 | 1351 | * Pairs with preempt_enable() in rt_mutex_wake_up_q(); |
acd58620 PZ |
1352 | */ |
1353 | preempt_disable(); | |
7980aa39 | 1354 | rt_mutex_wake_q_add(wqh, waiter); |
acd58620 | 1355 | raw_spin_unlock(¤t->pi_lock); |
23f78d4a IM |
1356 | } |
1357 | ||
e17ba59b TG |
1358 | static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) |
1359 | { | |
1360 | int ret = try_to_take_rt_mutex(lock, current, NULL); | |
1361 | ||
1362 | /* | |
1363 | * try_to_take_rt_mutex() sets the lock waiters bit | |
1364 | * unconditionally. Clean this up. | |
1365 | */ | |
1c0908d8 | 1366 | fixup_rt_mutex_waiters(lock, true); |
e17ba59b TG |
1367 | |
1368 | return ret; | |
1369 | } | |
1370 | ||
1371 | /* | |
1372 | * Slow path try-lock function: | |
1373 | */ | |
1374 | static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock) | |
1375 | { | |
1376 | unsigned long flags; | |
1377 | int ret; | |
1378 | ||
1379 | /* | |
1380 | * If the lock already has an owner we fail to get the lock. | |
1381 | * This can be done without taking the @lock->wait_lock as | |
1382 | * it is only being read, and this is a trylock anyway. | |
1383 | */ | |
1384 | if (rt_mutex_owner(lock)) | |
1385 | return 0; | |
1386 | ||
1387 | /* | |
1388 | * The mutex has currently no owner. Lock the wait lock and try to | |
1389 | * acquire the lock. We use irqsave here to support early boot calls. | |
1390 | */ | |
1391 | raw_spin_lock_irqsave(&lock->wait_lock, flags); | |
1392 | ||
1393 | ret = __rt_mutex_slowtrylock(lock); | |
1394 | ||
1395 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); | |
1396 | ||
1397 | return ret; | |
1398 | } | |
1399 | ||
1400 | static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) | |
1401 | { | |
1402 | if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) | |
1403 | return 1; | |
1404 | ||
1405 | return rt_mutex_slowtrylock(lock); | |
1406 | } | |
1407 | ||
1408 | /* | |
1409 | * Slow path to release a rt-mutex. | |
1410 | */ | |
1411 | static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) | |
1412 | { | |
1413 | DEFINE_RT_WAKE_Q(wqh); | |
1414 | unsigned long flags; | |
1415 | ||
1416 | /* irqsave required to support early boot calls */ | |
1417 | raw_spin_lock_irqsave(&lock->wait_lock, flags); | |
1418 | ||
1419 | debug_rt_mutex_unlock(lock); | |
1420 | ||
1421 | /* | |
1422 | * We must be careful here if the fast path is enabled. If we | |
1423 | * have no waiters queued we cannot set owner to NULL here | |
1424 | * because of: | |
1425 | * | |
1426 | * foo->lock->owner = NULL; | |
1427 | * rtmutex_lock(foo->lock); <- fast path | |
1428 | * free = atomic_dec_and_test(foo->refcnt); | |
1429 | * rtmutex_unlock(foo->lock); <- fast path | |
1430 | * if (free) | |
1431 | * kfree(foo); | |
1432 | * raw_spin_unlock(foo->lock->wait_lock); | |
1433 | * | |
1434 | * So for the fastpath enabled kernel: | |
1435 | * | |
1436 | * Nothing can set the waiters bit as long as we hold | |
1437 | * lock->wait_lock. So we do the following sequence: | |
1438 | * | |
1439 | * owner = rt_mutex_owner(lock); | |
1440 | * clear_rt_mutex_waiters(lock); | |
1441 | * raw_spin_unlock(&lock->wait_lock); | |
1442 | * if (cmpxchg(&lock->owner, owner, 0) == owner) | |
1443 | * return; | |
1444 | * goto retry; | |
1445 | * | |
1446 | * The fastpath disabled variant is simple as all access to | |
1447 | * lock->owner is serialized by lock->wait_lock: | |
1448 | * | |
1449 | * lock->owner = NULL; | |
1450 | * raw_spin_unlock(&lock->wait_lock); | |
1451 | */ | |
1452 | while (!rt_mutex_has_waiters(lock)) { | |
1453 | /* Drops lock->wait_lock ! */ | |
1454 | if (unlock_rt_mutex_safe(lock, flags) == true) | |
1455 | return; | |
1456 | /* Relock the rtmutex and try again */ | |
1457 | raw_spin_lock_irqsave(&lock->wait_lock, flags); | |
1458 | } | |
1459 | ||
1460 | /* | |
1461 | * The wakeup next waiter path does not suffer from the above | |
1462 | * race. See the comments there. | |
1463 | * | |
1464 | * Queue the next waiter for wakeup once we release the wait_lock. | |
1465 | */ | |
1466 | mark_wakeup_next_waiter(&wqh, lock); | |
1467 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); | |
1468 | ||
1469 | rt_mutex_wake_up_q(&wqh); | |
1470 | } | |
1471 | ||
1472 | static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) | |
1473 | { | |
1474 | if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) | |
1475 | return; | |
1476 | ||
1477 | rt_mutex_slowunlock(lock); | |
1478 | } | |
1479 | ||
992caf7f SR |
1480 | #ifdef CONFIG_SMP |
1481 | static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, | |
1482 | struct rt_mutex_waiter *waiter, | |
1483 | struct task_struct *owner) | |
1484 | { | |
1485 | bool res = true; | |
1486 | ||
1487 | rcu_read_lock(); | |
1488 | for (;;) { | |
1489 | /* If owner changed, trylock again. */ | |
1490 | if (owner != rt_mutex_owner(lock)) | |
1491 | break; | |
1492 | /* | |
1493 | * Ensure that @owner is dereferenced after checking that | |
1494 | * the lock owner still matches @owner. If that fails, | |
1495 | * @owner might point to freed memory. If it still matches, | |
1496 | * the rcu_read_lock() ensures the memory stays valid. | |
1497 | */ | |
1498 | barrier(); | |
1499 | /* | |
1500 | * Stop spinning when: | |
1501 | * - the lock owner has been scheduled out | |
1502 | * - current is not longer the top waiter | |
1503 | * - current is requested to reschedule (redundant | |
1504 | * for CONFIG_PREEMPT_RCU=y) | |
1505 | * - the VCPU on which owner runs is preempted | |
1506 | */ | |
c0bed69d | 1507 | if (!owner_on_cpu(owner) || need_resched() || |
f16cc980 | 1508 | !rt_mutex_waiter_is_top_waiter(lock, waiter)) { |
992caf7f SR |
1509 | res = false; |
1510 | break; | |
1511 | } | |
1512 | cpu_relax(); | |
1513 | } | |
1514 | rcu_read_unlock(); | |
1515 | return res; | |
1516 | } | |
1517 | #else | |
1518 | static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, | |
1519 | struct rt_mutex_waiter *waiter, | |
1520 | struct task_struct *owner) | |
1521 | { | |
1522 | return false; | |
1523 | } | |
1524 | #endif | |
1525 | ||
e17ba59b TG |
1526 | #ifdef RT_MUTEX_BUILD_MUTEX |
1527 | /* | |
1528 | * Functions required for: | |
1529 | * - rtmutex, futex on all kernels | |
1530 | * - mutex and rwsem substitutions on RT kernels | |
1531 | */ | |
1532 | ||
23f78d4a | 1533 | /* |
8161239a | 1534 | * Remove a waiter from a lock and give up |
23f78d4a | 1535 | * |
e17ba59b | 1536 | * Must be called with lock->wait_lock held and interrupts disabled. It must |
8161239a | 1537 | * have just failed to try_to_take_rt_mutex(). |
23f78d4a | 1538 | */ |
830e6acc | 1539 | static void __sched remove_waiter(struct rt_mutex_base *lock, |
d7a2edb8 | 1540 | struct rt_mutex_waiter *waiter) |
23f78d4a | 1541 | { |
1ca7b860 | 1542 | bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); |
36c8b586 | 1543 | struct task_struct *owner = rt_mutex_owner(lock); |
830e6acc | 1544 | struct rt_mutex_base *next_lock; |
23f78d4a | 1545 | |
e0aad5b4 PZ |
1546 | lockdep_assert_held(&lock->wait_lock); |
1547 | ||
b4abf910 | 1548 | raw_spin_lock(¤t->pi_lock); |
fb00aca4 | 1549 | rt_mutex_dequeue(lock, waiter); |
23f78d4a | 1550 | current->pi_blocked_on = NULL; |
b4abf910 | 1551 | raw_spin_unlock(¤t->pi_lock); |
23f78d4a | 1552 | |
1ca7b860 TG |
1553 | /* |
1554 | * Only update priority if the waiter was the highest priority | |
1555 | * waiter of the lock and there is an owner to update. | |
1556 | */ | |
1557 | if (!owner || !is_top_waiter) | |
8161239a LJ |
1558 | return; |
1559 | ||
b4abf910 | 1560 | raw_spin_lock(&owner->pi_lock); |
23f78d4a | 1561 | |
1ca7b860 | 1562 | rt_mutex_dequeue_pi(owner, waiter); |
23f78d4a | 1563 | |
1ca7b860 TG |
1564 | if (rt_mutex_has_waiters(lock)) |
1565 | rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); | |
23f78d4a | 1566 | |
f7853c34 | 1567 | rt_mutex_adjust_prio(lock, owner); |
23f78d4a | 1568 | |
1ca7b860 TG |
1569 | /* Store the lock on which owner is blocked or NULL */ |
1570 | next_lock = task_blocked_on_lock(owner); | |
db630637 | 1571 | |
b4abf910 | 1572 | raw_spin_unlock(&owner->pi_lock); |
23f78d4a | 1573 | |
1ca7b860 TG |
1574 | /* |
1575 | * Don't walk the chain, if the owner task is not blocked | |
1576 | * itself. | |
1577 | */ | |
82084984 | 1578 | if (!next_lock) |
23f78d4a IM |
1579 | return; |
1580 | ||
db630637 SR |
1581 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
1582 | get_task_struct(owner); | |
1583 | ||
b4abf910 | 1584 | raw_spin_unlock_irq(&lock->wait_lock); |
23f78d4a | 1585 | |
8930ed80 TG |
1586 | rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, |
1587 | next_lock, NULL, current); | |
23f78d4a | 1588 | |
b4abf910 | 1589 | raw_spin_lock_irq(&lock->wait_lock); |
23f78d4a IM |
1590 | } |
1591 | ||
8dac456a | 1592 | /** |
ebbdc41e | 1593 | * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop |
8dac456a | 1594 | * @lock: the rt_mutex to take |
add46132 | 1595 | * @ww_ctx: WW mutex context pointer |
8dac456a | 1596 | * @state: the state the task should block in (TASK_INTERRUPTIBLE |
b4abf910 | 1597 | * or TASK_UNINTERRUPTIBLE) |
8dac456a DH |
1598 | * @timeout: the pre-initialized and started timer, or NULL for none |
1599 | * @waiter: the pre-initialized rt_mutex_waiter | |
4a077914 | 1600 | * @wake_q: wake_q of tasks to wake when we drop the lock->wait_lock |
8dac456a | 1601 | * |
b4abf910 | 1602 | * Must be called with lock->wait_lock held and interrupts disabled |
23f78d4a | 1603 | */ |
ebbdc41e | 1604 | static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, |
add46132 | 1605 | struct ww_acquire_ctx *ww_ctx, |
ebbdc41e TG |
1606 | unsigned int state, |
1607 | struct hrtimer_sleeper *timeout, | |
4a077914 JS |
1608 | struct rt_mutex_waiter *waiter, |
1609 | struct wake_q_head *wake_q) | |
77abd3b7 | 1610 | __releases(&lock->wait_lock) __acquires(&lock->wait_lock) |
23f78d4a | 1611 | { |
add46132 | 1612 | struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); |
992caf7f | 1613 | struct task_struct *owner; |
23f78d4a IM |
1614 | int ret = 0; |
1615 | ||
b76b44fb | 1616 | lockevent_inc(rtmutex_slow_block); |
23f78d4a IM |
1617 | for (;;) { |
1618 | /* Try to acquire the lock: */ | |
b76b44fb WL |
1619 | if (try_to_take_rt_mutex(lock, current, waiter)) { |
1620 | lockevent_inc(rtmutex_slow_acq3); | |
23f78d4a | 1621 | break; |
b76b44fb | 1622 | } |
23f78d4a | 1623 | |
a51a327f TG |
1624 | if (timeout && !timeout->task) { |
1625 | ret = -ETIMEDOUT; | |
1626 | break; | |
1627 | } | |
1628 | if (signal_pending_state(state, current)) { | |
1629 | ret = -EINTR; | |
1630 | break; | |
23f78d4a IM |
1631 | } |
1632 | ||
add46132 PZ |
1633 | if (build_ww_mutex() && ww_ctx) { |
1634 | ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx); | |
1635 | if (ret) | |
1636 | break; | |
1637 | } | |
1638 | ||
992caf7f SR |
1639 | if (waiter == rt_mutex_top_waiter(lock)) |
1640 | owner = rt_mutex_owner(lock); | |
1641 | else | |
1642 | owner = NULL; | |
abfdccd6 | 1643 | raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); |
23f78d4a | 1644 | |
b76b44fb WL |
1645 | if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) { |
1646 | lockevent_inc(rtmutex_slow_sleep); | |
d14f9e93 | 1647 | rt_mutex_schedule(); |
b76b44fb | 1648 | } |
23f78d4a | 1649 | |
b4abf910 | 1650 | raw_spin_lock_irq(&lock->wait_lock); |
23f78d4a IM |
1651 | set_current_state(state); |
1652 | } | |
1653 | ||
afffc6c1 | 1654 | __set_current_state(TASK_RUNNING); |
8dac456a DH |
1655 | return ret; |
1656 | } | |
1657 | ||
d7a2edb8 | 1658 | static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, |
d33d2603 | 1659 | struct rt_mutex_base *lock, |
d7a2edb8 | 1660 | struct rt_mutex_waiter *w) |
3d5c9340 TG |
1661 | { |
1662 | /* | |
1663 | * If the result is not -EDEADLOCK or the caller requested | |
1664 | * deadlock detection, nothing to do here. | |
1665 | */ | |
1666 | if (res != -EDEADLOCK || detect_deadlock) | |
1667 | return; | |
1668 | ||
add46132 PZ |
1669 | if (build_ww_mutex() && w->ww_ctx) |
1670 | return; | |
1671 | ||
d33d2603 RX |
1672 | raw_spin_unlock_irq(&lock->wait_lock); |
1673 | ||
6d41c675 | 1674 | WARN(1, "rtmutex deadlock detected\n"); |
d33d2603 | 1675 | |
3d5c9340 TG |
1676 | while (1) { |
1677 | set_current_state(TASK_INTERRUPTIBLE); | |
d14f9e93 | 1678 | rt_mutex_schedule(); |
3d5c9340 TG |
1679 | } |
1680 | } | |
1681 | ||
ebbdc41e TG |
1682 | /** |
1683 | * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held | |
1684 | * @lock: The rtmutex to block lock | |
add46132 | 1685 | * @ww_ctx: WW mutex context pointer |
ebbdc41e TG |
1686 | * @state: The task state for sleeping |
1687 | * @chwalk: Indicator whether full or partial chainwalk is requested | |
1688 | * @waiter: Initializer waiter for blocking | |
894d1b3d | 1689 | * @wake_q: The wake_q to wake tasks after we release the wait_lock |
8dac456a | 1690 | */ |
ebbdc41e | 1691 | static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, |
add46132 | 1692 | struct ww_acquire_ctx *ww_ctx, |
ebbdc41e TG |
1693 | unsigned int state, |
1694 | enum rtmutex_chainwalk chwalk, | |
894d1b3d PZ |
1695 | struct rt_mutex_waiter *waiter, |
1696 | struct wake_q_head *wake_q) | |
8dac456a | 1697 | { |
add46132 PZ |
1698 | struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); |
1699 | struct ww_mutex *ww = ww_container_of(rtm); | |
ebbdc41e | 1700 | int ret; |
8dac456a | 1701 | |
ebbdc41e | 1702 | lockdep_assert_held(&lock->wait_lock); |
b76b44fb | 1703 | lockevent_inc(rtmutex_slowlock); |
8dac456a DH |
1704 | |
1705 | /* Try to acquire the lock again: */ | |
add46132 PZ |
1706 | if (try_to_take_rt_mutex(lock, current, NULL)) { |
1707 | if (build_ww_mutex() && ww_ctx) { | |
894d1b3d | 1708 | __ww_mutex_check_waiters(rtm, ww_ctx, wake_q); |
add46132 PZ |
1709 | ww_mutex_lock_acquired(ww, ww_ctx); |
1710 | } | |
b76b44fb | 1711 | lockevent_inc(rtmutex_slow_acq1); |
8dac456a | 1712 | return 0; |
add46132 | 1713 | } |
8dac456a DH |
1714 | |
1715 | set_current_state(state); | |
1716 | ||
ee042be1 NK |
1717 | trace_contention_begin(lock, LCB_F_RT); |
1718 | ||
894d1b3d | 1719 | ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk, wake_q); |
8161239a | 1720 | if (likely(!ret)) |
4a077914 | 1721 | ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter, wake_q); |
add46132 PZ |
1722 | |
1723 | if (likely(!ret)) { | |
1724 | /* acquired the lock */ | |
1725 | if (build_ww_mutex() && ww_ctx) { | |
1726 | if (!ww_ctx->is_wait_die) | |
894d1b3d | 1727 | __ww_mutex_check_waiters(rtm, ww_ctx, wake_q); |
add46132 PZ |
1728 | ww_mutex_lock_acquired(ww, ww_ctx); |
1729 | } | |
b76b44fb | 1730 | lockevent_inc(rtmutex_slow_acq2); |
add46132 | 1731 | } else { |
9d3e2d02 | 1732 | __set_current_state(TASK_RUNNING); |
ebbdc41e | 1733 | remove_waiter(lock, waiter); |
d33d2603 | 1734 | rt_mutex_handle_deadlock(ret, chwalk, lock, waiter); |
b76b44fb | 1735 | lockevent_inc(rtmutex_deadlock); |
3d5c9340 | 1736 | } |
23f78d4a IM |
1737 | |
1738 | /* | |
1739 | * try_to_take_rt_mutex() sets the waiter bit | |
1740 | * unconditionally. We might have to fix that up. | |
1741 | */ | |
1c0908d8 | 1742 | fixup_rt_mutex_waiters(lock, true); |
ee042be1 NK |
1743 | |
1744 | trace_contention_end(lock, ret); | |
1745 | ||
ebbdc41e TG |
1746 | return ret; |
1747 | } | |
23f78d4a | 1748 | |
ebbdc41e | 1749 | static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, |
add46132 | 1750 | struct ww_acquire_ctx *ww_ctx, |
894d1b3d PZ |
1751 | unsigned int state, |
1752 | struct wake_q_head *wake_q) | |
ebbdc41e TG |
1753 | { |
1754 | struct rt_mutex_waiter waiter; | |
1755 | int ret; | |
1756 | ||
1757 | rt_mutex_init_waiter(&waiter); | |
add46132 | 1758 | waiter.ww_ctx = ww_ctx; |
23f78d4a | 1759 | |
add46132 | 1760 | ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK, |
894d1b3d | 1761 | &waiter, wake_q); |
23f78d4a | 1762 | |
23f78d4a | 1763 | debug_rt_mutex_free_waiter(&waiter); |
b76b44fb | 1764 | lockevent_cond_inc(rtmutex_slow_wake, !wake_q_empty(wake_q)); |
ebbdc41e TG |
1765 | return ret; |
1766 | } | |
1767 | ||
1768 | /* | |
1769 | * rt_mutex_slowlock - Locking slowpath invoked when fast path fails | |
1770 | * @lock: The rtmutex to block lock | |
add46132 | 1771 | * @ww_ctx: WW mutex context pointer |
ebbdc41e TG |
1772 | * @state: The task state for sleeping |
1773 | */ | |
1774 | static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, | |
add46132 | 1775 | struct ww_acquire_ctx *ww_ctx, |
ebbdc41e TG |
1776 | unsigned int state) |
1777 | { | |
894d1b3d | 1778 | DEFINE_WAKE_Q(wake_q); |
ebbdc41e TG |
1779 | unsigned long flags; |
1780 | int ret; | |
1781 | ||
d14f9e93 SAS |
1782 | /* |
1783 | * Do all pre-schedule work here, before we queue a waiter and invoke | |
1784 | * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would | |
1785 | * otherwise recurse back into task_blocks_on_rt_mutex() through | |
1786 | * rtlock_slowlock() and will then enqueue a second waiter for this | |
1787 | * same task and things get really confusing real fast. | |
1788 | */ | |
1789 | rt_mutex_pre_schedule(); | |
1790 | ||
ebbdc41e TG |
1791 | /* |
1792 | * Technically we could use raw_spin_[un]lock_irq() here, but this can | |
1793 | * be called in early boot if the cmpxchg() fast path is disabled | |
1794 | * (debug, no architecture support). In this case we will acquire the | |
1795 | * rtmutex with lock->wait_lock held. But we cannot unconditionally | |
1796 | * enable interrupts in that early boot case. So we need to use the | |
1797 | * irqsave/restore variants. | |
1798 | */ | |
1799 | raw_spin_lock_irqsave(&lock->wait_lock, flags); | |
894d1b3d | 1800 | ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state, &wake_q); |
abfdccd6 | 1801 | raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); |
d14f9e93 | 1802 | rt_mutex_post_schedule(); |
23f78d4a IM |
1803 | |
1804 | return ret; | |
1805 | } | |
1806 | ||
830e6acc | 1807 | static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, |
531ae4b0 TG |
1808 | unsigned int state) |
1809 | { | |
45f67f30 TG |
1810 | lockdep_assert(!current->pi_blocked_on); |
1811 | ||
af9f0063 | 1812 | if (likely(rt_mutex_try_acquire(lock))) |
531ae4b0 TG |
1813 | return 0; |
1814 | ||
add46132 | 1815 | return rt_mutex_slowlock(lock, NULL, state); |
531ae4b0 | 1816 | } |
e17ba59b | 1817 | #endif /* RT_MUTEX_BUILD_MUTEX */ |
1c143c4b TG |
1818 | |
1819 | #ifdef RT_MUTEX_BUILD_SPINLOCKS | |
1820 | /* | |
1821 | * Functions required for spin/rw_lock substitution on RT kernels | |
1822 | */ | |
1823 | ||
1824 | /** | |
1825 | * rtlock_slowlock_locked - Slow path lock acquisition for RT locks | |
1826 | * @lock: The underlying RT mutex | |
894d1b3d | 1827 | * @wake_q: The wake_q to wake tasks after we release the wait_lock |
1c143c4b | 1828 | */ |
894d1b3d PZ |
1829 | static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock, |
1830 | struct wake_q_head *wake_q) | |
77abd3b7 | 1831 | __releases(&lock->wait_lock) __acquires(&lock->wait_lock) |
1c143c4b TG |
1832 | { |
1833 | struct rt_mutex_waiter waiter; | |
992caf7f | 1834 | struct task_struct *owner; |
1c143c4b TG |
1835 | |
1836 | lockdep_assert_held(&lock->wait_lock); | |
b76b44fb | 1837 | lockevent_inc(rtlock_slowlock); |
1c143c4b | 1838 | |
b76b44fb WL |
1839 | if (try_to_take_rt_mutex(lock, current, NULL)) { |
1840 | lockevent_inc(rtlock_slow_acq1); | |
1c143c4b | 1841 | return; |
b76b44fb | 1842 | } |
1c143c4b TG |
1843 | |
1844 | rt_mutex_init_rtlock_waiter(&waiter); | |
1845 | ||
1846 | /* Save current state and set state to TASK_RTLOCK_WAIT */ | |
1847 | current_save_and_set_rtlock_wait_state(); | |
1848 | ||
ee042be1 NK |
1849 | trace_contention_begin(lock, LCB_F_RT); |
1850 | ||
894d1b3d | 1851 | task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK, wake_q); |
1c143c4b TG |
1852 | |
1853 | for (;;) { | |
1854 | /* Try to acquire the lock again */ | |
b76b44fb WL |
1855 | if (try_to_take_rt_mutex(lock, current, &waiter)) { |
1856 | lockevent_inc(rtlock_slow_acq2); | |
1c143c4b | 1857 | break; |
b76b44fb | 1858 | } |
1c143c4b | 1859 | |
992caf7f SR |
1860 | if (&waiter == rt_mutex_top_waiter(lock)) |
1861 | owner = rt_mutex_owner(lock); | |
1862 | else | |
1863 | owner = NULL; | |
abfdccd6 | 1864 | raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); |
1c143c4b | 1865 | |
b76b44fb WL |
1866 | if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) { |
1867 | lockevent_inc(rtlock_slow_sleep); | |
992caf7f | 1868 | schedule_rtlock(); |
b76b44fb | 1869 | } |
1c143c4b TG |
1870 | |
1871 | raw_spin_lock_irq(&lock->wait_lock); | |
1872 | set_current_state(TASK_RTLOCK_WAIT); | |
1873 | } | |
1874 | ||
1875 | /* Restore the task state */ | |
1876 | current_restore_rtlock_saved_state(); | |
1877 | ||
1878 | /* | |
1879 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. | |
1880 | * We might have to fix that up: | |
1881 | */ | |
1c0908d8 | 1882 | fixup_rt_mutex_waiters(lock, true); |
1c143c4b | 1883 | debug_rt_mutex_free_waiter(&waiter); |
ee042be1 NK |
1884 | |
1885 | trace_contention_end(lock, 0); | |
b76b44fb | 1886 | lockevent_cond_inc(rtlock_slow_wake, !wake_q_empty(wake_q)); |
1c143c4b TG |
1887 | } |
1888 | ||
1889 | static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) | |
1890 | { | |
1891 | unsigned long flags; | |
894d1b3d | 1892 | DEFINE_WAKE_Q(wake_q); |
1c143c4b TG |
1893 | |
1894 | raw_spin_lock_irqsave(&lock->wait_lock, flags); | |
894d1b3d | 1895 | rtlock_slowlock_locked(lock, &wake_q); |
abfdccd6 | 1896 | raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); |
1c143c4b TG |
1897 | } |
1898 | ||
1899 | #endif /* RT_MUTEX_BUILD_SPINLOCKS */ |