Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* rwsem.c: R/W semaphores: contention handling functions |
2 | * | |
3 | * Written by David Howells (dhowells@redhat.com). | |
4 | * Derived from arch/i386/kernel/semaphore.c | |
ce6711f3 AS |
5 | * |
6 | * Writer lock-stealing by Alex Shi <alex.shi@intel.com> | |
fe6e674c | 7 | * and Michel Lespinasse <walken@google.com> |
4fc828e2 DB |
8 | * |
9 | * Optimistic spinning by Tim Chen <tim.c.chen@intel.com> | |
10 | * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes. | |
1da177e4 LT |
11 | */ |
12 | #include <linux/rwsem.h> | |
1da177e4 | 13 | #include <linux/init.h> |
8bc3bcc9 | 14 | #include <linux/export.h> |
174cd4b1 | 15 | #include <linux/sched/signal.h> |
4fc828e2 | 16 | #include <linux/sched/rt.h> |
84f001e1 | 17 | #include <linux/sched/wake_q.h> |
b17b0153 | 18 | #include <linux/sched/debug.h> |
7a215f89 | 19 | #include <linux/osq_lock.h> |
4fc828e2 | 20 | |
7a215f89 | 21 | #include "rwsem.h" |
1da177e4 | 22 | |
3cf2f34e TC |
23 | /* |
24 | * Guide to the rw_semaphore's count field for common values. | |
25 | * (32-bit case illustrated, similar for 64-bit) | |
26 | * | |
27 | * 0x0000000X (1) X readers active or attempting lock, no writer waiting | |
28 | * X = #active_readers + #readers attempting to lock | |
29 | * (X*ACTIVE_BIAS) | |
30 | * | |
31 | * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or | |
32 | * attempting to read lock or write lock. | |
33 | * | |
34 | * 0xffff000X (1) X readers active or attempting lock, with waiters for lock | |
35 | * X = #active readers + # readers attempting lock | |
36 | * (X*ACTIVE_BIAS + WAITING_BIAS) | |
37 | * (2) 1 writer attempting lock, no waiters for lock | |
38 | * X-1 = #active readers + #readers attempting lock | |
39 | * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS) | |
40 | * (3) 1 writer active, no waiters for lock | |
41 | * X-1 = #active readers + #readers attempting lock | |
42 | * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS) | |
43 | * | |
44 | * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock | |
45 | * (WAITING_BIAS + ACTIVE_BIAS) | |
46 | * (2) 1 writer active or attempting lock, no waiters for lock | |
47 | * (ACTIVE_WRITE_BIAS) | |
48 | * | |
49 | * 0xffff0000 (1) There are writers or readers queued but none active | |
50 | * or in the process of attempting lock. | |
51 | * (WAITING_BIAS) | |
52 | * Note: writer can attempt to steal lock for this count by adding | |
53 | * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count | |
54 | * | |
55 | * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue. | |
56 | * (ACTIVE_WRITE_BIAS + WAITING_BIAS) | |
57 | * | |
58 | * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking | |
59 | * the count becomes more than 0 for successful lock acquisition, | |
60 | * i.e. the case where there are only readers or nobody has lock. | |
61 | * (1st and 2nd case above). | |
62 | * | |
63 | * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and | |
64 | * checking the count becomes ACTIVE_WRITE_BIAS for successful lock | |
65 | * acquisition (i.e. nobody else has lock or attempts lock). If | |
66 | * unsuccessful, in rwsem_down_write_failed, we'll check to see if there | |
67 | * are only waiters but none active (5th case above), and attempt to | |
68 | * steal the lock. | |
69 | * | |
70 | */ | |
71 | ||
4ea2176d IM |
72 | /* |
73 | * Initialize an rwsem: | |
74 | */ | |
75 | void __init_rwsem(struct rw_semaphore *sem, const char *name, | |
76 | struct lock_class_key *key) | |
77 | { | |
78 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
79 | /* | |
80 | * Make sure we are not reinitializing a held semaphore: | |
81 | */ | |
82 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | |
4dfbb9d8 | 83 | lockdep_init_map(&sem->dep_map, name, key, 0); |
4ea2176d | 84 | #endif |
8ee62b18 | 85 | atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); |
ddb6c9b5 | 86 | raw_spin_lock_init(&sem->wait_lock); |
4ea2176d | 87 | INIT_LIST_HEAD(&sem->wait_list); |
5db6c6fe | 88 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
4fc828e2 | 89 | sem->owner = NULL; |
4d9d951e | 90 | osq_lock_init(&sem->osq); |
4fc828e2 | 91 | #endif |
4ea2176d IM |
92 | } |
93 | ||
94 | EXPORT_SYMBOL(__init_rwsem); | |
95 | ||
e2d57f78 ML |
96 | enum rwsem_waiter_type { |
97 | RWSEM_WAITING_FOR_WRITE, | |
98 | RWSEM_WAITING_FOR_READ | |
99 | }; | |
100 | ||
1da177e4 LT |
101 | struct rwsem_waiter { |
102 | struct list_head list; | |
103 | struct task_struct *task; | |
e2d57f78 | 104 | enum rwsem_waiter_type type; |
1da177e4 LT |
105 | }; |
106 | ||
fe6e674c ML |
107 | enum rwsem_wake_type { |
108 | RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */ | |
109 | RWSEM_WAKE_READERS, /* Wake readers only */ | |
110 | RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ | |
111 | }; | |
70bdc6e0 | 112 | |
1da177e4 LT |
113 | /* |
114 | * handle the lock release when processes blocked on it that can now run | |
115 | * - if we come here from up_xxxx(), then: | |
116 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) | |
117 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) | |
345af7bf | 118 | * - there must be someone on the queue |
133e89ef DB |
119 | * - the wait_lock must be held by the caller |
120 | * - tasks are marked for wakeup, the caller must later invoke wake_up_q() | |
121 | * to actually wakeup the blocked task(s) and drop the reference count, | |
122 | * preferably when the wait_lock is released | |
1da177e4 | 123 | * - woken process blocks are discarded from the list after having task zeroed |
133e89ef | 124 | * - writers are only marked woken if downgrading is false |
1da177e4 | 125 | */ |
84b23f9b DB |
126 | static void __rwsem_mark_wake(struct rw_semaphore *sem, |
127 | enum rwsem_wake_type wake_type, | |
128 | struct wake_q_head *wake_q) | |
1da177e4 | 129 | { |
70800c3c DB |
130 | struct rwsem_waiter *waiter, *tmp; |
131 | long oldcount, woken = 0, adjustment = 0; | |
1da177e4 | 132 | |
70800c3c DB |
133 | /* |
134 | * Take a peek at the queue head waiter such that we can determine | |
135 | * the wakeup(s) to perform. | |
136 | */ | |
137 | waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list); | |
84b23f9b | 138 | |
8cf5322c | 139 | if (waiter->type == RWSEM_WAITING_FOR_WRITE) { |
133e89ef DB |
140 | if (wake_type == RWSEM_WAKE_ANY) { |
141 | /* | |
142 | * Mark writer at the front of the queue for wakeup. | |
143 | * Until the task is actually later awoken later by | |
144 | * the caller, other writers are able to steal it. | |
145 | * Readers, on the other hand, will block as they | |
146 | * will notice the queued writer. | |
8cf5322c | 147 | */ |
133e89ef DB |
148 | wake_q_add(wake_q, waiter->task); |
149 | } | |
84b23f9b DB |
150 | |
151 | return; | |
8cf5322c | 152 | } |
1da177e4 | 153 | |
84b23f9b DB |
154 | /* |
155 | * Writers might steal the lock before we grant it to the next reader. | |
fe6e674c ML |
156 | * We prefer to do the first reader grant before counting readers |
157 | * so we can bail out early if a writer stole the lock. | |
70bdc6e0 | 158 | */ |
fe6e674c ML |
159 | if (wake_type != RWSEM_WAKE_READ_OWNED) { |
160 | adjustment = RWSEM_ACTIVE_READ_BIAS; | |
161 | try_reader_grant: | |
86a3b5f3 | 162 | oldcount = atomic_long_fetch_add(adjustment, &sem->count); |
fe6e674c | 163 | if (unlikely(oldcount < RWSEM_WAITING_BIAS)) { |
bf7b4c47 WL |
164 | /* |
165 | * If the count is still less than RWSEM_WAITING_BIAS | |
166 | * after removing the adjustment, it is assumed that | |
167 | * a writer has stolen the lock. We have to undo our | |
168 | * reader grant. | |
169 | */ | |
170 | if (atomic_long_add_return(-adjustment, &sem->count) < | |
171 | RWSEM_WAITING_BIAS) | |
84b23f9b DB |
172 | return; |
173 | ||
fe6e674c ML |
174 | /* Last active locker left. Retry waking readers. */ |
175 | goto try_reader_grant; | |
176 | } | |
19c5d690 WL |
177 | /* |
178 | * It is not really necessary to set it to reader-owned here, | |
179 | * but it gives the spinners an early indication that the | |
180 | * readers now have the lock. | |
181 | */ | |
182 | rwsem_set_reader_owned(sem); | |
fe6e674c | 183 | } |
1da177e4 | 184 | |
84b23f9b DB |
185 | /* |
186 | * Grant an infinite number of read locks to the readers at the front | |
70800c3c DB |
187 | * of the queue. We know that woken will be at least 1 as we accounted |
188 | * for above. Note we increment the 'active part' of the count by the | |
189 | * number of readers before waking any processes up. | |
1da177e4 | 190 | */ |
70800c3c DB |
191 | list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { |
192 | struct task_struct *tsk; | |
1da177e4 | 193 | |
70800c3c | 194 | if (waiter->type == RWSEM_WAITING_FOR_WRITE) |
1da177e4 LT |
195 | break; |
196 | ||
70800c3c | 197 | woken++; |
1da177e4 | 198 | tsk = waiter->task; |
e3851390 DB |
199 | |
200 | wake_q_add(wake_q, tsk); | |
70800c3c | 201 | list_del(&waiter->list); |
49e4b2bc | 202 | /* |
e3851390 DB |
203 | * Ensure that the last operation is setting the reader |
204 | * waiter to nil such that rwsem_down_read_failed() cannot | |
205 | * race with do_exit() by always holding a reference count | |
206 | * to the task to wakeup. | |
49e4b2bc | 207 | */ |
e3851390 | 208 | smp_store_release(&waiter->task, NULL); |
70800c3c | 209 | } |
1da177e4 | 210 | |
70800c3c DB |
211 | adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; |
212 | if (list_empty(&sem->wait_list)) { | |
213 | /* hit end of list above */ | |
214 | adjustment -= RWSEM_WAITING_BIAS; | |
215 | } | |
216 | ||
217 | if (adjustment) | |
218 | atomic_long_add(adjustment, &sem->count); | |
ce6711f3 AS |
219 | } |
220 | ||
1da177e4 | 221 | /* |
4fc828e2 | 222 | * Wait for the read lock to be granted |
1da177e4 | 223 | */ |
83ced169 KT |
224 | static inline struct rw_semaphore __sched * |
225 | __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) | |
1da177e4 | 226 | { |
b5f54181 | 227 | long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; |
a8618a0e | 228 | struct rwsem_waiter waiter; |
194a6b5b | 229 | DEFINE_WAKE_Q(wake_q); |
1da177e4 | 230 | |
d269a8b8 | 231 | waiter.task = current; |
da16922c | 232 | waiter.type = RWSEM_WAITING_FOR_READ; |
1da177e4 | 233 | |
f7dd1cee | 234 | raw_spin_lock_irq(&sem->wait_lock); |
fd41b334 ML |
235 | if (list_empty(&sem->wait_list)) |
236 | adjustment += RWSEM_WAITING_BIAS; | |
a8618a0e | 237 | list_add_tail(&waiter.list, &sem->wait_list); |
1da177e4 | 238 | |
70bdc6e0 | 239 | /* we're now waiting on the lock, but no longer actively locking */ |
8ee62b18 | 240 | count = atomic_long_add_return(adjustment, &sem->count); |
1da177e4 | 241 | |
70800c3c DB |
242 | /* |
243 | * If there are no active locks, wake the front queued process(es). | |
25c39325 ML |
244 | * |
245 | * If there are no writers and we are first in the queue, | |
246 | * wake our own waiter to join the existing active readers ! | |
247 | */ | |
248 | if (count == RWSEM_WAITING_BIAS || | |
249 | (count > RWSEM_WAITING_BIAS && | |
250 | adjustment != -RWSEM_ACTIVE_READ_BIAS)) | |
84b23f9b | 251 | __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); |
1da177e4 | 252 | |
ddb6c9b5 | 253 | raw_spin_unlock_irq(&sem->wait_lock); |
133e89ef | 254 | wake_up_q(&wake_q); |
1da177e4 LT |
255 | |
256 | /* wait to be given the lock */ | |
f7dd1cee | 257 | while (true) { |
83ced169 | 258 | set_current_state(state); |
a8618a0e | 259 | if (!waiter.task) |
1da177e4 | 260 | break; |
83ced169 KT |
261 | if (signal_pending_state(state, current)) { |
262 | raw_spin_lock_irq(&sem->wait_lock); | |
263 | if (waiter.task) | |
264 | goto out_nolock; | |
265 | raw_spin_unlock_irq(&sem->wait_lock); | |
266 | break; | |
267 | } | |
1da177e4 | 268 | schedule(); |
1da177e4 LT |
269 | } |
270 | ||
642fa448 | 271 | __set_current_state(TASK_RUNNING); |
1da177e4 | 272 | return sem; |
83ced169 KT |
273 | out_nolock: |
274 | list_del(&waiter.list); | |
275 | if (list_empty(&sem->wait_list)) | |
276 | atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count); | |
277 | raw_spin_unlock_irq(&sem->wait_lock); | |
278 | __set_current_state(TASK_RUNNING); | |
279 | return ERR_PTR(-EINTR); | |
280 | } | |
281 | ||
282 | __visible struct rw_semaphore * __sched | |
283 | rwsem_down_read_failed(struct rw_semaphore *sem) | |
284 | { | |
285 | return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE); | |
1da177e4 | 286 | } |
db0e716a | 287 | EXPORT_SYMBOL(rwsem_down_read_failed); |
1da177e4 | 288 | |
83ced169 KT |
289 | __visible struct rw_semaphore * __sched |
290 | rwsem_down_read_failed_killable(struct rw_semaphore *sem) | |
291 | { | |
292 | return __rwsem_down_read_failed_common(sem, TASK_KILLABLE); | |
293 | } | |
294 | EXPORT_SYMBOL(rwsem_down_read_failed_killable); | |
295 | ||
c0fcb6c2 JL |
296 | /* |
297 | * This function must be called with the sem->wait_lock held to prevent | |
298 | * race conditions between checking the rwsem wait list and setting the | |
299 | * sem->count accordingly. | |
300 | */ | |
4fc828e2 DB |
301 | static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) |
302 | { | |
debfab74 | 303 | /* |
c0fcb6c2 | 304 | * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS. |
debfab74 | 305 | */ |
c0fcb6c2 JL |
306 | if (count != RWSEM_WAITING_BIAS) |
307 | return false; | |
308 | ||
309 | /* | |
310 | * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there | |
311 | * are other tasks on the wait list, we need to add on WAITING_BIAS. | |
312 | */ | |
313 | count = list_is_singular(&sem->wait_list) ? | |
314 | RWSEM_ACTIVE_WRITE_BIAS : | |
315 | RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS; | |
316 | ||
8ee62b18 JL |
317 | if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count) |
318 | == RWSEM_WAITING_BIAS) { | |
7a215f89 | 319 | rwsem_set_owner(sem); |
debfab74 | 320 | return true; |
4fc828e2 | 321 | } |
debfab74 | 322 | |
4fc828e2 DB |
323 | return false; |
324 | } | |
325 | ||
5db6c6fe | 326 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
1da177e4 | 327 | /* |
4fc828e2 DB |
328 | * Try to acquire write lock before the writer has been put on wait queue. |
329 | */ | |
330 | static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) | |
331 | { | |
8ee62b18 | 332 | long old, count = atomic_long_read(&sem->count); |
4fc828e2 DB |
333 | |
334 | while (true) { | |
335 | if (!(count == 0 || count == RWSEM_WAITING_BIAS)) | |
336 | return false; | |
337 | ||
8ee62b18 | 338 | old = atomic_long_cmpxchg_acquire(&sem->count, count, |
00eb4bab | 339 | count + RWSEM_ACTIVE_WRITE_BIAS); |
7a215f89 DB |
340 | if (old == count) { |
341 | rwsem_set_owner(sem); | |
4fc828e2 | 342 | return true; |
7a215f89 | 343 | } |
4fc828e2 DB |
344 | |
345 | count = old; | |
346 | } | |
347 | } | |
348 | ||
349 | static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) | |
350 | { | |
351 | struct task_struct *owner; | |
1a993670 | 352 | bool ret = true; |
4fc828e2 DB |
353 | |
354 | if (need_resched()) | |
37e95624 | 355 | return false; |
4fc828e2 DB |
356 | |
357 | rcu_read_lock(); | |
4d3199e4 | 358 | owner = READ_ONCE(sem->owner); |
19c5d690 | 359 | if (!rwsem_owner_is_writer(owner)) { |
1a993670 | 360 | /* |
19c5d690 | 361 | * Don't spin if the rwsem is readers owned. |
1a993670 | 362 | */ |
19c5d690 | 363 | ret = !rwsem_owner_is_reader(owner); |
1a993670 DB |
364 | goto done; |
365 | } | |
4fc828e2 | 366 | |
05ffc951 PX |
367 | /* |
368 | * As lock holder preemption issue, we both skip spinning if task is not | |
369 | * on cpu or its cpu is preempted | |
370 | */ | |
371 | ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); | |
1a993670 DB |
372 | done: |
373 | rcu_read_unlock(); | |
374 | return ret; | |
4fc828e2 DB |
375 | } |
376 | ||
ddd0fa73 WL |
377 | /* |
378 | * Return true only if we can still spin on the owner field of the rwsem. | |
379 | */ | |
380 | static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) | |
4fc828e2 | 381 | { |
ddd0fa73 WL |
382 | struct task_struct *owner = READ_ONCE(sem->owner); |
383 | ||
384 | if (!rwsem_owner_is_writer(owner)) | |
385 | goto out; | |
386 | ||
4fc828e2 | 387 | rcu_read_lock(); |
9198f6ed JL |
388 | while (sem->owner == owner) { |
389 | /* | |
390 | * Ensure we emit the owner->on_cpu, dereference _after_ | |
391 | * checking sem->owner still matches owner, if that fails, | |
392 | * owner might point to free()d memory, if it still matches, | |
393 | * the rcu_read_lock() ensures the memory stays valid. | |
394 | */ | |
395 | barrier(); | |
396 | ||
05ffc951 PX |
397 | /* |
398 | * abort spinning when need_resched or owner is not running or | |
399 | * owner's cpu is preempted. | |
400 | */ | |
401 | if (!owner->on_cpu || need_resched() || | |
402 | vcpu_is_preempted(task_cpu(owner))) { | |
b3fd4f03 DB |
403 | rcu_read_unlock(); |
404 | return false; | |
405 | } | |
4fc828e2 | 406 | |
f2f09a4c | 407 | cpu_relax(); |
4fc828e2 DB |
408 | } |
409 | rcu_read_unlock(); | |
ddd0fa73 | 410 | out: |
4fc828e2 | 411 | /* |
19c5d690 WL |
412 | * If there is a new owner or the owner is not set, we continue |
413 | * spinning. | |
4fc828e2 | 414 | */ |
19c5d690 | 415 | return !rwsem_owner_is_reader(READ_ONCE(sem->owner)); |
4fc828e2 DB |
416 | } |
417 | ||
418 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | |
419 | { | |
4fc828e2 DB |
420 | bool taken = false; |
421 | ||
422 | preempt_disable(); | |
423 | ||
424 | /* sem->wait_lock should not be held when doing optimistic spinning */ | |
425 | if (!rwsem_can_spin_on_owner(sem)) | |
426 | goto done; | |
427 | ||
428 | if (!osq_lock(&sem->osq)) | |
429 | goto done; | |
430 | ||
ddd0fa73 WL |
431 | /* |
432 | * Optimistically spin on the owner field and attempt to acquire the | |
433 | * lock whenever the owner changes. Spinning will be stopped when: | |
434 | * 1) the owning writer isn't running; or | |
435 | * 2) readers own the lock as we can't determine if they are | |
436 | * actively running or not. | |
437 | */ | |
438 | while (rwsem_spin_on_owner(sem)) { | |
19c5d690 | 439 | /* |
ddd0fa73 | 440 | * Try to acquire the lock |
19c5d690 | 441 | */ |
4fc828e2 DB |
442 | if (rwsem_try_write_lock_unqueued(sem)) { |
443 | taken = true; | |
444 | break; | |
445 | } | |
446 | ||
447 | /* | |
448 | * When there's no owner, we might have preempted between the | |
449 | * owner acquiring the lock and setting the owner field. If | |
450 | * we're an RT task that will live-lock because we won't let | |
451 | * the owner complete. | |
452 | */ | |
ddd0fa73 | 453 | if (!sem->owner && (need_resched() || rt_task(current))) |
4fc828e2 DB |
454 | break; |
455 | ||
456 | /* | |
457 | * The cpu_relax() call is a compiler barrier which forces | |
458 | * everything in this loop to be re-loaded. We don't need | |
459 | * memory barriers as we'll eventually observe the right | |
460 | * values at the cost of a few extra spins. | |
461 | */ | |
f2f09a4c | 462 | cpu_relax(); |
4fc828e2 DB |
463 | } |
464 | osq_unlock(&sem->osq); | |
465 | done: | |
466 | preempt_enable(); | |
467 | return taken; | |
468 | } | |
469 | ||
59aabfc7 WL |
470 | /* |
471 | * Return true if the rwsem has active spinner | |
472 | */ | |
473 | static inline bool rwsem_has_spinner(struct rw_semaphore *sem) | |
474 | { | |
475 | return osq_is_locked(&sem->osq); | |
476 | } | |
477 | ||
4fc828e2 DB |
478 | #else |
479 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | |
480 | { | |
481 | return false; | |
482 | } | |
59aabfc7 WL |
483 | |
484 | static inline bool rwsem_has_spinner(struct rw_semaphore *sem) | |
485 | { | |
486 | return false; | |
487 | } | |
4fc828e2 DB |
488 | #endif |
489 | ||
490 | /* | |
491 | * Wait until we successfully acquire the write lock | |
1da177e4 | 492 | */ |
d4799608 MH |
493 | static inline struct rw_semaphore * |
494 | __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) | |
1da177e4 | 495 | { |
4fc828e2 DB |
496 | long count; |
497 | bool waiting = true; /* any queued threads before us */ | |
1e78277c | 498 | struct rwsem_waiter waiter; |
d4799608 | 499 | struct rw_semaphore *ret = sem; |
194a6b5b | 500 | DEFINE_WAKE_Q(wake_q); |
1e78277c | 501 | |
4fc828e2 | 502 | /* undo write bias from down_write operation, stop active locking */ |
8ee62b18 | 503 | count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count); |
4fc828e2 DB |
504 | |
505 | /* do optimistic spinning and steal lock if possible */ | |
506 | if (rwsem_optimistic_spin(sem)) | |
507 | return sem; | |
508 | ||
509 | /* | |
510 | * Optimistic spinning failed, proceed to the slowpath | |
511 | * and block until we can acquire the sem. | |
512 | */ | |
513 | waiter.task = current; | |
023fe4f7 | 514 | waiter.type = RWSEM_WAITING_FOR_WRITE; |
1e78277c ML |
515 | |
516 | raw_spin_lock_irq(&sem->wait_lock); | |
4fc828e2 DB |
517 | |
518 | /* account for this before adding a new element to the list */ | |
1e78277c | 519 | if (list_empty(&sem->wait_list)) |
4fc828e2 DB |
520 | waiting = false; |
521 | ||
1e78277c ML |
522 | list_add_tail(&waiter.list, &sem->wait_list); |
523 | ||
524 | /* we're now waiting on the lock, but no longer actively locking */ | |
4fc828e2 | 525 | if (waiting) { |
8ee62b18 | 526 | count = atomic_long_read(&sem->count); |
1e78277c | 527 | |
4fc828e2 | 528 | /* |
0cc3d011 AM |
529 | * If there were already threads queued before us and there are |
530 | * no active writers, the lock must be read owned; so we try to | |
531 | * wake any read locks that were queued ahead of us. | |
4fc828e2 | 532 | */ |
133e89ef | 533 | if (count > RWSEM_WAITING_BIAS) { |
84b23f9b | 534 | __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q); |
133e89ef DB |
535 | /* |
536 | * The wakeup is normally called _after_ the wait_lock | |
537 | * is released, but given that we are proactively waking | |
538 | * readers we can deal with the wake_q overhead as it is | |
539 | * similar to releasing and taking the wait_lock again | |
540 | * for attempting rwsem_try_write_lock(). | |
541 | */ | |
542 | wake_up_q(&wake_q); | |
bcc9a76d WL |
543 | |
544 | /* | |
545 | * Reinitialize wake_q after use. | |
546 | */ | |
547 | wake_q_init(&wake_q); | |
133e89ef | 548 | } |
4fc828e2 DB |
549 | |
550 | } else | |
8ee62b18 | 551 | count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count); |
1e78277c | 552 | |
023fe4f7 | 553 | /* wait until we successfully acquire the lock */ |
d4799608 | 554 | set_current_state(state); |
1e78277c | 555 | while (true) { |
4fc828e2 DB |
556 | if (rwsem_try_write_lock(count, sem)) |
557 | break; | |
1e78277c | 558 | raw_spin_unlock_irq(&sem->wait_lock); |
a7d2c573 ML |
559 | |
560 | /* Block until there are no active lockers. */ | |
561 | do { | |
04cafed7 PZ |
562 | if (signal_pending_state(state, current)) |
563 | goto out_nolock; | |
564 | ||
a7d2c573 | 565 | schedule(); |
d4799608 | 566 | set_current_state(state); |
8ee62b18 | 567 | } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK); |
a7d2c573 | 568 | |
023fe4f7 | 569 | raw_spin_lock_irq(&sem->wait_lock); |
1e78277c | 570 | } |
4fc828e2 | 571 | __set_current_state(TASK_RUNNING); |
023fe4f7 ML |
572 | list_del(&waiter.list); |
573 | raw_spin_unlock_irq(&sem->wait_lock); | |
1e78277c | 574 | |
d4799608 | 575 | return ret; |
04cafed7 PZ |
576 | |
577 | out_nolock: | |
578 | __set_current_state(TASK_RUNNING); | |
579 | raw_spin_lock_irq(&sem->wait_lock); | |
580 | list_del(&waiter.list); | |
581 | if (list_empty(&sem->wait_list)) | |
8ee62b18 | 582 | atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count); |
04cafed7 | 583 | else |
133e89ef | 584 | __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); |
04cafed7 | 585 | raw_spin_unlock_irq(&sem->wait_lock); |
133e89ef | 586 | wake_up_q(&wake_q); |
04cafed7 PZ |
587 | |
588 | return ERR_PTR(-EINTR); | |
d4799608 MH |
589 | } |
590 | ||
591 | __visible struct rw_semaphore * __sched | |
592 | rwsem_down_write_failed(struct rw_semaphore *sem) | |
593 | { | |
594 | return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE); | |
1da177e4 | 595 | } |
db0e716a | 596 | EXPORT_SYMBOL(rwsem_down_write_failed); |
1da177e4 | 597 | |
d4799608 MH |
598 | __visible struct rw_semaphore * __sched |
599 | rwsem_down_write_failed_killable(struct rw_semaphore *sem) | |
600 | { | |
601 | return __rwsem_down_write_failed_common(sem, TASK_KILLABLE); | |
602 | } | |
603 | EXPORT_SYMBOL(rwsem_down_write_failed_killable); | |
604 | ||
1da177e4 LT |
605 | /* |
606 | * handle waking up a waiter on the semaphore | |
607 | * - up_read/up_write has decremented the active part of count if we come here | |
608 | */ | |
3ebae4f3 | 609 | __visible |
d1233754 | 610 | struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) |
1da177e4 LT |
611 | { |
612 | unsigned long flags; | |
194a6b5b | 613 | DEFINE_WAKE_Q(wake_q); |
1da177e4 | 614 | |
59aabfc7 WL |
615 | /* |
616 | * If a spinner is present, it is not necessary to do the wakeup. | |
617 | * Try to do wakeup only if the trylock succeeds to minimize | |
618 | * spinlock contention which may introduce too much delay in the | |
619 | * unlock operation. | |
620 | * | |
621 | * spinning writer up_write/up_read caller | |
622 | * --------------- ----------------------- | |
623 | * [S] osq_unlock() [L] osq | |
624 | * MB RMB | |
625 | * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock) | |
626 | * | |
627 | * Here, it is important to make sure that there won't be a missed | |
628 | * wakeup while the rwsem is free and the only spinning writer goes | |
629 | * to sleep without taking the rwsem. Even when the spinning writer | |
630 | * is just going to break out of the waiting loop, it will still do | |
631 | * a trylock in rwsem_down_write_failed() before sleeping. IOW, if | |
632 | * rwsem_has_spinner() is true, it will guarantee at least one | |
633 | * trylock attempt on the rwsem later on. | |
634 | */ | |
635 | if (rwsem_has_spinner(sem)) { | |
636 | /* | |
637 | * The smp_rmb() here is to make sure that the spinner | |
638 | * state is consulted before reading the wait_lock. | |
639 | */ | |
640 | smp_rmb(); | |
641 | if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags)) | |
642 | return sem; | |
643 | goto locked; | |
644 | } | |
ddb6c9b5 | 645 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
59aabfc7 | 646 | locked: |
1da177e4 | 647 | |
1da177e4 | 648 | if (!list_empty(&sem->wait_list)) |
84b23f9b | 649 | __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); |
1da177e4 | 650 | |
ddb6c9b5 | 651 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
133e89ef | 652 | wake_up_q(&wake_q); |
1da177e4 | 653 | |
1da177e4 LT |
654 | return sem; |
655 | } | |
db0e716a | 656 | EXPORT_SYMBOL(rwsem_wake); |
1da177e4 LT |
657 | |
658 | /* | |
659 | * downgrade a write lock into a read lock | |
660 | * - caller incremented waiting part of count and discovered it still negative | |
661 | * - just wake up any readers at the front of the queue | |
662 | */ | |
3ebae4f3 | 663 | __visible |
d1233754 | 664 | struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) |
1da177e4 LT |
665 | { |
666 | unsigned long flags; | |
194a6b5b | 667 | DEFINE_WAKE_Q(wake_q); |
1da177e4 | 668 | |
ddb6c9b5 | 669 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 | 670 | |
1da177e4 | 671 | if (!list_empty(&sem->wait_list)) |
84b23f9b | 672 | __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q); |
1da177e4 | 673 | |
ddb6c9b5 | 674 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
133e89ef | 675 | wake_up_q(&wake_q); |
1da177e4 | 676 | |
1da177e4 LT |
677 | return sem; |
678 | } | |
1da177e4 | 679 | EXPORT_SYMBOL(rwsem_downgrade_wake); |