Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* rwsem.c: R/W semaphores: contention handling functions |
2 | * | |
3 | * Written by David Howells (dhowells@redhat.com). | |
4 | * Derived from arch/i386/kernel/semaphore.c | |
ce6711f3 AS |
5 | * |
6 | * Writer lock-stealing by Alex Shi <alex.shi@intel.com> | |
fe6e674c | 7 | * and Michel Lespinasse <walken@google.com> |
4fc828e2 DB |
8 | * |
9 | * Optimistic spinning by Tim Chen <tim.c.chen@intel.com> | |
10 | * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes. | |
1da177e4 LT |
11 | */ |
12 | #include <linux/rwsem.h> | |
13 | #include <linux/sched.h> | |
14 | #include <linux/init.h> | |
8bc3bcc9 | 15 | #include <linux/export.h> |
4fc828e2 DB |
16 | #include <linux/sched/rt.h> |
17 | ||
18 | #include "mcs_spinlock.h" | |
1da177e4 | 19 | |
3cf2f34e TC |
20 | /* |
21 | * Guide to the rw_semaphore's count field for common values. | |
22 | * (32-bit case illustrated, similar for 64-bit) | |
23 | * | |
24 | * 0x0000000X (1) X readers active or attempting lock, no writer waiting | |
25 | * X = #active_readers + #readers attempting to lock | |
26 | * (X*ACTIVE_BIAS) | |
27 | * | |
28 | * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or | |
29 | * attempting to read lock or write lock. | |
30 | * | |
31 | * 0xffff000X (1) X readers active or attempting lock, with waiters for lock | |
32 | * X = #active readers + # readers attempting lock | |
33 | * (X*ACTIVE_BIAS + WAITING_BIAS) | |
34 | * (2) 1 writer attempting lock, no waiters for lock | |
35 | * X-1 = #active readers + #readers attempting lock | |
36 | * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS) | |
37 | * (3) 1 writer active, no waiters for lock | |
38 | * X-1 = #active readers + #readers attempting lock | |
39 | * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS) | |
40 | * | |
41 | * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock | |
42 | * (WAITING_BIAS + ACTIVE_BIAS) | |
43 | * (2) 1 writer active or attempting lock, no waiters for lock | |
44 | * (ACTIVE_WRITE_BIAS) | |
45 | * | |
46 | * 0xffff0000 (1) There are writers or readers queued but none active | |
47 | * or in the process of attempting lock. | |
48 | * (WAITING_BIAS) | |
49 | * Note: writer can attempt to steal lock for this count by adding | |
50 | * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count | |
51 | * | |
52 | * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue. | |
53 | * (ACTIVE_WRITE_BIAS + WAITING_BIAS) | |
54 | * | |
55 | * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking | |
56 | * the count becomes more than 0 for successful lock acquisition, | |
57 | * i.e. the case where there are only readers or nobody has lock. | |
58 | * (1st and 2nd case above). | |
59 | * | |
60 | * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and | |
61 | * checking the count becomes ACTIVE_WRITE_BIAS for successful lock | |
62 | * acquisition (i.e. nobody else has lock or attempts lock). If | |
63 | * unsuccessful, in rwsem_down_write_failed, we'll check to see if there | |
64 | * are only waiters but none active (5th case above), and attempt to | |
65 | * steal the lock. | |
66 | * | |
67 | */ | |
68 | ||
4ea2176d IM |
69 | /* |
70 | * Initialize an rwsem: | |
71 | */ | |
72 | void __init_rwsem(struct rw_semaphore *sem, const char *name, | |
73 | struct lock_class_key *key) | |
74 | { | |
75 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
76 | /* | |
77 | * Make sure we are not reinitializing a held semaphore: | |
78 | */ | |
79 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | |
4dfbb9d8 | 80 | lockdep_init_map(&sem->dep_map, name, key, 0); |
4ea2176d IM |
81 | #endif |
82 | sem->count = RWSEM_UNLOCKED_VALUE; | |
ddb6c9b5 | 83 | raw_spin_lock_init(&sem->wait_lock); |
4ea2176d | 84 | INIT_LIST_HEAD(&sem->wait_list); |
5db6c6fe | 85 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
4fc828e2 | 86 | sem->owner = NULL; |
4d9d951e | 87 | osq_lock_init(&sem->osq); |
4fc828e2 | 88 | #endif |
4ea2176d IM |
89 | } |
90 | ||
91 | EXPORT_SYMBOL(__init_rwsem); | |
92 | ||
e2d57f78 ML |
93 | enum rwsem_waiter_type { |
94 | RWSEM_WAITING_FOR_WRITE, | |
95 | RWSEM_WAITING_FOR_READ | |
96 | }; | |
97 | ||
1da177e4 LT |
98 | struct rwsem_waiter { |
99 | struct list_head list; | |
100 | struct task_struct *task; | |
e2d57f78 | 101 | enum rwsem_waiter_type type; |
1da177e4 LT |
102 | }; |
103 | ||
fe6e674c ML |
104 | enum rwsem_wake_type { |
105 | RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */ | |
106 | RWSEM_WAKE_READERS, /* Wake readers only */ | |
107 | RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ | |
108 | }; | |
70bdc6e0 | 109 | |
1da177e4 LT |
110 | /* |
111 | * handle the lock release when processes blocked on it that can now run | |
112 | * - if we come here from up_xxxx(), then: | |
113 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) | |
114 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) | |
345af7bf | 115 | * - there must be someone on the queue |
1da177e4 LT |
116 | * - the spinlock must be held by the caller |
117 | * - woken process blocks are discarded from the list after having task zeroed | |
118 | * - writers are only woken if downgrading is false | |
119 | */ | |
70bdc6e0 | 120 | static struct rw_semaphore * |
fe6e674c | 121 | __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type) |
1da177e4 LT |
122 | { |
123 | struct rwsem_waiter *waiter; | |
124 | struct task_struct *tsk; | |
125 | struct list_head *next; | |
b5f54181 | 126 | long oldcount, woken, loop, adjustment; |
1da177e4 | 127 | |
345af7bf | 128 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
8cf5322c | 129 | if (waiter->type == RWSEM_WAITING_FOR_WRITE) { |
fe6e674c | 130 | if (wake_type == RWSEM_WAKE_ANY) |
8cf5322c ML |
131 | /* Wake writer at the front of the queue, but do not |
132 | * grant it the lock yet as we want other writers | |
133 | * to be able to steal it. Readers, on the other hand, | |
134 | * will block as they will notice the queued writer. | |
135 | */ | |
136 | wake_up_process(waiter->task); | |
345af7bf | 137 | goto out; |
8cf5322c | 138 | } |
1da177e4 | 139 | |
fe6e674c ML |
140 | /* Writers might steal the lock before we grant it to the next reader. |
141 | * We prefer to do the first reader grant before counting readers | |
142 | * so we can bail out early if a writer stole the lock. | |
70bdc6e0 | 143 | */ |
fe6e674c ML |
144 | adjustment = 0; |
145 | if (wake_type != RWSEM_WAKE_READ_OWNED) { | |
146 | adjustment = RWSEM_ACTIVE_READ_BIAS; | |
147 | try_reader_grant: | |
148 | oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; | |
149 | if (unlikely(oldcount < RWSEM_WAITING_BIAS)) { | |
150 | /* A writer stole the lock. Undo our reader grant. */ | |
151 | if (rwsem_atomic_update(-adjustment, sem) & | |
152 | RWSEM_ACTIVE_MASK) | |
153 | goto out; | |
154 | /* Last active locker left. Retry waking readers. */ | |
155 | goto try_reader_grant; | |
156 | } | |
157 | } | |
1da177e4 | 158 | |
345af7bf ML |
159 | /* Grant an infinite number of read locks to the readers at the front |
160 | * of the queue. Note we increment the 'active part' of the count by | |
161 | * the number of readers before waking any processes up. | |
1da177e4 | 162 | */ |
1da177e4 LT |
163 | woken = 0; |
164 | do { | |
165 | woken++; | |
166 | ||
167 | if (waiter->list.next == &sem->wait_list) | |
168 | break; | |
169 | ||
170 | waiter = list_entry(waiter->list.next, | |
171 | struct rwsem_waiter, list); | |
172 | ||
e2d57f78 | 173 | } while (waiter->type != RWSEM_WAITING_FOR_WRITE); |
1da177e4 | 174 | |
fe6e674c | 175 | adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; |
e2d57f78 | 176 | if (waiter->type != RWSEM_WAITING_FOR_WRITE) |
fd41b334 ML |
177 | /* hit end of list above */ |
178 | adjustment -= RWSEM_WAITING_BIAS; | |
1da177e4 | 179 | |
fe6e674c ML |
180 | if (adjustment) |
181 | rwsem_atomic_add(adjustment, sem); | |
1da177e4 LT |
182 | |
183 | next = sem->wait_list.next; | |
8cf5322c ML |
184 | loop = woken; |
185 | do { | |
1da177e4 LT |
186 | waiter = list_entry(next, struct rwsem_waiter, list); |
187 | next = waiter->list.next; | |
188 | tsk = waiter->task; | |
d59dd462 | 189 | smp_mb(); |
1da177e4 LT |
190 | waiter->task = NULL; |
191 | wake_up_process(tsk); | |
192 | put_task_struct(tsk); | |
8cf5322c | 193 | } while (--loop); |
1da177e4 LT |
194 | |
195 | sem->wait_list.next = next; | |
196 | next->prev = &sem->wait_list; | |
197 | ||
198 | out: | |
1da177e4 | 199 | return sem; |
ce6711f3 AS |
200 | } |
201 | ||
1da177e4 | 202 | /* |
4fc828e2 | 203 | * Wait for the read lock to be granted |
1da177e4 | 204 | */ |
3ebae4f3 | 205 | __visible |
1e78277c | 206 | struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) |
1da177e4 | 207 | { |
b5f54181 | 208 | long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; |
a8618a0e | 209 | struct rwsem_waiter waiter; |
1da177e4 | 210 | struct task_struct *tsk = current; |
1da177e4 | 211 | |
1da177e4 | 212 | /* set up my own style of waitqueue */ |
a8618a0e | 213 | waiter.task = tsk; |
da16922c | 214 | waiter.type = RWSEM_WAITING_FOR_READ; |
1da177e4 LT |
215 | get_task_struct(tsk); |
216 | ||
f7dd1cee | 217 | raw_spin_lock_irq(&sem->wait_lock); |
fd41b334 ML |
218 | if (list_empty(&sem->wait_list)) |
219 | adjustment += RWSEM_WAITING_BIAS; | |
a8618a0e | 220 | list_add_tail(&waiter.list, &sem->wait_list); |
1da177e4 | 221 | |
70bdc6e0 | 222 | /* we're now waiting on the lock, but no longer actively locking */ |
1da177e4 LT |
223 | count = rwsem_atomic_update(adjustment, sem); |
224 | ||
25c39325 ML |
225 | /* If there are no active locks, wake the front queued process(es). |
226 | * | |
227 | * If there are no writers and we are first in the queue, | |
228 | * wake our own waiter to join the existing active readers ! | |
229 | */ | |
230 | if (count == RWSEM_WAITING_BIAS || | |
231 | (count > RWSEM_WAITING_BIAS && | |
232 | adjustment != -RWSEM_ACTIVE_READ_BIAS)) | |
fe6e674c | 233 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
1da177e4 | 234 | |
ddb6c9b5 | 235 | raw_spin_unlock_irq(&sem->wait_lock); |
1da177e4 LT |
236 | |
237 | /* wait to be given the lock */ | |
f7dd1cee ML |
238 | while (true) { |
239 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
a8618a0e | 240 | if (!waiter.task) |
1da177e4 LT |
241 | break; |
242 | schedule(); | |
1da177e4 LT |
243 | } |
244 | ||
73105994 | 245 | __set_task_state(tsk, TASK_RUNNING); |
1da177e4 LT |
246 | return sem; |
247 | } | |
db0e716a | 248 | EXPORT_SYMBOL(rwsem_down_read_failed); |
1da177e4 | 249 | |
4fc828e2 DB |
250 | static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) |
251 | { | |
debfab74 JL |
252 | /* |
253 | * Try acquiring the write lock. Check count first in order | |
254 | * to reduce unnecessary expensive cmpxchg() operations. | |
255 | */ | |
256 | if (count == RWSEM_WAITING_BIAS && | |
257 | cmpxchg(&sem->count, RWSEM_WAITING_BIAS, | |
258 | RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) { | |
259 | if (!list_is_singular(&sem->wait_list)) | |
260 | rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); | |
261 | return true; | |
4fc828e2 | 262 | } |
debfab74 | 263 | |
4fc828e2 DB |
264 | return false; |
265 | } | |
266 | ||
5db6c6fe | 267 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
1da177e4 | 268 | /* |
4fc828e2 DB |
269 | * Try to acquire write lock before the writer has been put on wait queue. |
270 | */ | |
271 | static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) | |
272 | { | |
273 | long old, count = ACCESS_ONCE(sem->count); | |
274 | ||
275 | while (true) { | |
276 | if (!(count == 0 || count == RWSEM_WAITING_BIAS)) | |
277 | return false; | |
278 | ||
279 | old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS); | |
280 | if (old == count) | |
281 | return true; | |
282 | ||
283 | count = old; | |
284 | } | |
285 | } | |
286 | ||
287 | static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) | |
288 | { | |
289 | struct task_struct *owner; | |
37e95624 | 290 | bool on_cpu = false; |
4fc828e2 DB |
291 | |
292 | if (need_resched()) | |
37e95624 | 293 | return false; |
4fc828e2 DB |
294 | |
295 | rcu_read_lock(); | |
296 | owner = ACCESS_ONCE(sem->owner); | |
297 | if (owner) | |
298 | on_cpu = owner->on_cpu; | |
299 | rcu_read_unlock(); | |
300 | ||
301 | /* | |
37e95624 JL |
302 | * If sem->owner is not set, yet we have just recently entered the |
303 | * slowpath, then there is a possibility reader(s) may have the lock. | |
304 | * To be safe, avoid spinning in these situations. | |
4fc828e2 DB |
305 | */ |
306 | return on_cpu; | |
307 | } | |
308 | ||
309 | static inline bool owner_running(struct rw_semaphore *sem, | |
310 | struct task_struct *owner) | |
311 | { | |
312 | if (sem->owner != owner) | |
313 | return false; | |
314 | ||
315 | /* | |
316 | * Ensure we emit the owner->on_cpu, dereference _after_ checking | |
317 | * sem->owner still matches owner, if that fails, owner might | |
318 | * point to free()d memory, if it still matches, the rcu_read_lock() | |
319 | * ensures the memory stays valid. | |
320 | */ | |
321 | barrier(); | |
322 | ||
323 | return owner->on_cpu; | |
324 | } | |
325 | ||
326 | static noinline | |
327 | bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner) | |
328 | { | |
329 | rcu_read_lock(); | |
330 | while (owner_running(sem, owner)) { | |
331 | if (need_resched()) | |
332 | break; | |
333 | ||
3a6bfbc9 | 334 | cpu_relax_lowlatency(); |
4fc828e2 DB |
335 | } |
336 | rcu_read_unlock(); | |
337 | ||
338 | /* | |
339 | * We break out the loop above on need_resched() or when the | |
340 | * owner changed, which is a sign for heavy contention. Return | |
341 | * success only when sem->owner is NULL. | |
342 | */ | |
343 | return sem->owner == NULL; | |
344 | } | |
345 | ||
346 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | |
347 | { | |
348 | struct task_struct *owner; | |
349 | bool taken = false; | |
350 | ||
351 | preempt_disable(); | |
352 | ||
353 | /* sem->wait_lock should not be held when doing optimistic spinning */ | |
354 | if (!rwsem_can_spin_on_owner(sem)) | |
355 | goto done; | |
356 | ||
357 | if (!osq_lock(&sem->osq)) | |
358 | goto done; | |
359 | ||
360 | while (true) { | |
361 | owner = ACCESS_ONCE(sem->owner); | |
362 | if (owner && !rwsem_spin_on_owner(sem, owner)) | |
363 | break; | |
364 | ||
365 | /* wait_lock will be acquired if write_lock is obtained */ | |
366 | if (rwsem_try_write_lock_unqueued(sem)) { | |
367 | taken = true; | |
368 | break; | |
369 | } | |
370 | ||
371 | /* | |
372 | * When there's no owner, we might have preempted between the | |
373 | * owner acquiring the lock and setting the owner field. If | |
374 | * we're an RT task that will live-lock because we won't let | |
375 | * the owner complete. | |
376 | */ | |
377 | if (!owner && (need_resched() || rt_task(current))) | |
378 | break; | |
379 | ||
380 | /* | |
381 | * The cpu_relax() call is a compiler barrier which forces | |
382 | * everything in this loop to be re-loaded. We don't need | |
383 | * memory barriers as we'll eventually observe the right | |
384 | * values at the cost of a few extra spins. | |
385 | */ | |
3a6bfbc9 | 386 | cpu_relax_lowlatency(); |
4fc828e2 DB |
387 | } |
388 | osq_unlock(&sem->osq); | |
389 | done: | |
390 | preempt_enable(); | |
391 | return taken; | |
392 | } | |
393 | ||
394 | #else | |
395 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | |
396 | { | |
397 | return false; | |
398 | } | |
399 | #endif | |
400 | ||
401 | /* | |
402 | * Wait until we successfully acquire the write lock | |
1da177e4 | 403 | */ |
3ebae4f3 | 404 | __visible |
d1233754 | 405 | struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) |
1da177e4 | 406 | { |
4fc828e2 DB |
407 | long count; |
408 | bool waiting = true; /* any queued threads before us */ | |
1e78277c | 409 | struct rwsem_waiter waiter; |
1e78277c | 410 | |
4fc828e2 DB |
411 | /* undo write bias from down_write operation, stop active locking */ |
412 | count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem); | |
413 | ||
414 | /* do optimistic spinning and steal lock if possible */ | |
415 | if (rwsem_optimistic_spin(sem)) | |
416 | return sem; | |
417 | ||
418 | /* | |
419 | * Optimistic spinning failed, proceed to the slowpath | |
420 | * and block until we can acquire the sem. | |
421 | */ | |
422 | waiter.task = current; | |
023fe4f7 | 423 | waiter.type = RWSEM_WAITING_FOR_WRITE; |
1e78277c ML |
424 | |
425 | raw_spin_lock_irq(&sem->wait_lock); | |
4fc828e2 DB |
426 | |
427 | /* account for this before adding a new element to the list */ | |
1e78277c | 428 | if (list_empty(&sem->wait_list)) |
4fc828e2 DB |
429 | waiting = false; |
430 | ||
1e78277c ML |
431 | list_add_tail(&waiter.list, &sem->wait_list); |
432 | ||
433 | /* we're now waiting on the lock, but no longer actively locking */ | |
4fc828e2 DB |
434 | if (waiting) { |
435 | count = ACCESS_ONCE(sem->count); | |
1e78277c | 436 | |
4fc828e2 | 437 | /* |
0cc3d011 AM |
438 | * If there were already threads queued before us and there are |
439 | * no active writers, the lock must be read owned; so we try to | |
440 | * wake any read locks that were queued ahead of us. | |
4fc828e2 DB |
441 | */ |
442 | if (count > RWSEM_WAITING_BIAS) | |
443 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS); | |
444 | ||
445 | } else | |
446 | count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); | |
1e78277c | 447 | |
023fe4f7 | 448 | /* wait until we successfully acquire the lock */ |
4fc828e2 | 449 | set_current_state(TASK_UNINTERRUPTIBLE); |
1e78277c | 450 | while (true) { |
4fc828e2 DB |
451 | if (rwsem_try_write_lock(count, sem)) |
452 | break; | |
1e78277c | 453 | raw_spin_unlock_irq(&sem->wait_lock); |
a7d2c573 ML |
454 | |
455 | /* Block until there are no active lockers. */ | |
456 | do { | |
457 | schedule(); | |
4fc828e2 | 458 | set_current_state(TASK_UNINTERRUPTIBLE); |
9b0fc9c0 | 459 | } while ((count = sem->count) & RWSEM_ACTIVE_MASK); |
a7d2c573 | 460 | |
023fe4f7 | 461 | raw_spin_lock_irq(&sem->wait_lock); |
1e78277c | 462 | } |
4fc828e2 | 463 | __set_current_state(TASK_RUNNING); |
1e78277c | 464 | |
023fe4f7 ML |
465 | list_del(&waiter.list); |
466 | raw_spin_unlock_irq(&sem->wait_lock); | |
1e78277c ML |
467 | |
468 | return sem; | |
1da177e4 | 469 | } |
db0e716a | 470 | EXPORT_SYMBOL(rwsem_down_write_failed); |
1da177e4 LT |
471 | |
472 | /* | |
473 | * handle waking up a waiter on the semaphore | |
474 | * - up_read/up_write has decremented the active part of count if we come here | |
475 | */ | |
3ebae4f3 | 476 | __visible |
d1233754 | 477 | struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) |
1da177e4 LT |
478 | { |
479 | unsigned long flags; | |
480 | ||
ddb6c9b5 | 481 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
482 | |
483 | /* do nothing if list empty */ | |
484 | if (!list_empty(&sem->wait_list)) | |
70bdc6e0 | 485 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
1da177e4 | 486 | |
ddb6c9b5 | 487 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 | 488 | |
1da177e4 LT |
489 | return sem; |
490 | } | |
db0e716a | 491 | EXPORT_SYMBOL(rwsem_wake); |
1da177e4 LT |
492 | |
493 | /* | |
494 | * downgrade a write lock into a read lock | |
495 | * - caller incremented waiting part of count and discovered it still negative | |
496 | * - just wake up any readers at the front of the queue | |
497 | */ | |
3ebae4f3 | 498 | __visible |
d1233754 | 499 | struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) |
1da177e4 LT |
500 | { |
501 | unsigned long flags; | |
502 | ||
ddb6c9b5 | 503 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
504 | |
505 | /* do nothing if list empty */ | |
506 | if (!list_empty(&sem->wait_list)) | |
70bdc6e0 | 507 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
1da177e4 | 508 | |
ddb6c9b5 | 509 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 | 510 | |
1da177e4 LT |
511 | return sem; |
512 | } | |
1da177e4 | 513 | EXPORT_SYMBOL(rwsem_downgrade_wake); |