Commit | Line | Data |
---|---|---|
c942fddf | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
70af2f8a | 2 | /* |
c7114b4e | 3 | * Queued read/write locks |
70af2f8a | 4 | * |
70af2f8a WL |
5 | * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. |
6 | * | |
7 | * Authors: Waiman Long <waiman.long@hp.com> | |
8 | */ | |
9 | #include <linux/smp.h> | |
10 | #include <linux/bug.h> | |
11 | #include <linux/cpumask.h> | |
12 | #include <linux/percpu.h> | |
13 | #include <linux/hardirq.h> | |
9ab6055f | 14 | #include <linux/spinlock.h> |
ee042be1 | 15 | #include <trace/events/lock.h> |
70af2f8a | 16 | |
70af2f8a | 17 | /** |
434e09e7 WL |
18 | * queued_read_lock_slowpath - acquire read lock of a queued rwlock |
19 | * @lock: Pointer to queued rwlock structure | |
70af2f8a | 20 | */ |
501f7f69 | 21 | void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock) |
70af2f8a | 22 | { |
70af2f8a WL |
23 | /* |
24 | * Readers come here when they cannot get the lock without waiting | |
25 | */ | |
26 | if (unlikely(in_interrupt())) { | |
27 | /* | |
0e06e5be | 28 | * Readers in interrupt context will get the lock immediately |
b519b56e WD |
29 | * if the writer is just waiting (not holding the lock yet), |
30 | * so spin with ACQUIRE semantics until the lock is available | |
31 | * without waiting in the queue. | |
70af2f8a | 32 | */ |
d1331661 | 33 | atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); |
70af2f8a WL |
34 | return; |
35 | } | |
36 | atomic_sub(_QR_BIAS, &lock->cnts); | |
37 | ||
ee042be1 NK |
38 | trace_contention_begin(lock, LCB_F_SPIN | LCB_F_READ); |
39 | ||
70af2f8a WL |
40 | /* |
41 | * Put the reader into the wait queue | |
42 | */ | |
6e1e5196 | 43 | arch_spin_lock(&lock->wait_lock); |
b519b56e | 44 | atomic_add(_QR_BIAS, &lock->cnts); |
70af2f8a WL |
45 | |
46 | /* | |
77e430e3 WD |
47 | * The ACQUIRE semantics of the following spinning code ensure |
48 | * that accesses can't leak upwards out of our subsequent critical | |
49 | * section in the case that the lock is currently held for write. | |
70af2f8a | 50 | */ |
d1331661 | 51 | atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); |
70af2f8a WL |
52 | |
53 | /* | |
54 | * Signal the next one in queue to become queue head | |
55 | */ | |
6e1e5196 | 56 | arch_spin_unlock(&lock->wait_lock); |
ee042be1 NK |
57 | |
58 | trace_contention_end(lock, 0); | |
70af2f8a | 59 | } |
f7d71f20 | 60 | EXPORT_SYMBOL(queued_read_lock_slowpath); |
70af2f8a WL |
61 | |
62 | /** | |
434e09e7 WL |
63 | * queued_write_lock_slowpath - acquire write lock of a queued rwlock |
64 | * @lock : Pointer to queued rwlock structure | |
70af2f8a | 65 | */ |
501f7f69 | 66 | void __lockfunc queued_write_lock_slowpath(struct qrwlock *lock) |
70af2f8a | 67 | { |
84a24bf8 AS |
68 | int cnts; |
69 | ||
ee042be1 NK |
70 | trace_contention_begin(lock, LCB_F_SPIN | LCB_F_WRITE); |
71 | ||
70af2f8a | 72 | /* Put the writer into the wait queue */ |
6e1e5196 | 73 | arch_spin_lock(&lock->wait_lock); |
70af2f8a WL |
74 | |
75 | /* Try to acquire the lock directly if no reader is present */ | |
28ce0e70 WL |
76 | if (!(cnts = atomic_read(&lock->cnts)) && |
77 | atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)) | |
70af2f8a WL |
78 | goto unlock; |
79 | ||
d1331661 | 80 | /* Set the waiting flag to notify readers that a writer is pending */ |
28ce0e70 | 81 | atomic_or(_QW_WAITING, &lock->cnts); |
70af2f8a | 82 | |
d1331661 | 83 | /* When no more readers or writers, set the locked flag */ |
b519b56e | 84 | do { |
84a24bf8 AS |
85 | cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING); |
86 | } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)); | |
70af2f8a | 87 | unlock: |
6e1e5196 | 88 | arch_spin_unlock(&lock->wait_lock); |
ee042be1 NK |
89 | |
90 | trace_contention_end(lock, 0); | |
70af2f8a | 91 | } |
f7d71f20 | 92 | EXPORT_SYMBOL(queued_write_lock_slowpath); |