Commit | Line | Data |
---|---|---|
c942fddf | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
70af2f8a WL |
2 | /* |
3 | * Queue read/write lock | |
4 | * | |
493e2ba2 PD |
5 | * These use generic atomic and locking routines, but depend on a fair spinlock |
6 | * implementation in order to be fair themselves. The implementation in | |
7 | * asm-generic/spinlock.h meets these requirements. | |
8 | * | |
70af2f8a WL |
9 | * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. |
10 | * | |
11 | * Authors: Waiman Long <waiman.long@hp.com> | |
12 | */ | |
13 | #ifndef __ASM_GENERIC_QRWLOCK_H | |
14 | #define __ASM_GENERIC_QRWLOCK_H | |
15 | ||
16 | #include <linux/atomic.h> | |
17 | #include <asm/barrier.h> | |
18 | #include <asm/processor.h> | |
19 | ||
20 | #include <asm-generic/qrwlock_types.h> | |
d8d0da4e WL |
21 | |
22 | /* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */ | |
70af2f8a WL |
23 | |
24 | /* | |
2db34e8b | 25 | * Writer states & reader shift and bias. |
70af2f8a | 26 | */ |
d1331661 WD |
27 | #define _QW_WAITING 0x100 /* A writer is waiting */ |
28 | #define _QW_LOCKED 0x0ff /* A writer holds the lock */ | |
29 | #define _QW_WMASK 0x1ff /* Writer mask */ | |
30 | #define _QR_SHIFT 9 /* Reader count shift */ | |
70af2f8a WL |
31 | #define _QR_BIAS (1U << _QR_SHIFT) |
32 | ||
33 | /* | |
34 | * External function declarations | |
35 | */ | |
b519b56e | 36 | extern void queued_read_lock_slowpath(struct qrwlock *lock); |
f7d71f20 | 37 | extern void queued_write_lock_slowpath(struct qrwlock *lock); |
70af2f8a | 38 | |
70af2f8a | 39 | /** |
434e09e7 WL |
40 | * queued_read_trylock - try to acquire read lock of a queued rwlock |
41 | * @lock : Pointer to queued rwlock structure | |
70af2f8a WL |
42 | * Return: 1 if lock acquired, 0 if failed |
43 | */ | |
f7d71f20 | 44 | static inline int queued_read_trylock(struct qrwlock *lock) |
70af2f8a | 45 | { |
f44ca087 | 46 | int cnts; |
70af2f8a WL |
47 | |
48 | cnts = atomic_read(&lock->cnts); | |
49 | if (likely(!(cnts & _QW_WMASK))) { | |
77e430e3 | 50 | cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); |
70af2f8a WL |
51 | if (likely(!(cnts & _QW_WMASK))) |
52 | return 1; | |
53 | atomic_sub(_QR_BIAS, &lock->cnts); | |
54 | } | |
55 | return 0; | |
56 | } | |
57 | ||
58 | /** | |
434e09e7 WL |
59 | * queued_write_trylock - try to acquire write lock of a queued rwlock |
60 | * @lock : Pointer to queued rwlock structure | |
70af2f8a WL |
61 | * Return: 1 if lock acquired, 0 if failed |
62 | */ | |
f7d71f20 | 63 | static inline int queued_write_trylock(struct qrwlock *lock) |
70af2f8a | 64 | { |
f44ca087 | 65 | int cnts; |
70af2f8a WL |
66 | |
67 | cnts = atomic_read(&lock->cnts); | |
68 | if (unlikely(cnts)) | |
69 | return 0; | |
70 | ||
27df8968 MW |
71 | return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, |
72 | _QW_LOCKED)); | |
70af2f8a WL |
73 | } |
74 | /** | |
434e09e7 WL |
75 | * queued_read_lock - acquire read lock of a queued rwlock |
76 | * @lock: Pointer to queued rwlock structure | |
70af2f8a | 77 | */ |
f7d71f20 | 78 | static inline void queued_read_lock(struct qrwlock *lock) |
70af2f8a | 79 | { |
f44ca087 | 80 | int cnts; |
70af2f8a | 81 | |
77e430e3 | 82 | cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); |
70af2f8a WL |
83 | if (likely(!(cnts & _QW_WMASK))) |
84 | return; | |
85 | ||
86 | /* The slowpath will decrement the reader count, if necessary. */ | |
b519b56e | 87 | queued_read_lock_slowpath(lock); |
70af2f8a WL |
88 | } |
89 | ||
90 | /** | |
434e09e7 WL |
91 | * queued_write_lock - acquire write lock of a queued rwlock |
92 | * @lock : Pointer to queued rwlock structure | |
70af2f8a | 93 | */ |
f7d71f20 | 94 | static inline void queued_write_lock(struct qrwlock *lock) |
70af2f8a | 95 | { |
f44ca087 | 96 | int cnts = 0; |
70af2f8a | 97 | /* Optimize for the unfair lock case where the fair flag is 0. */ |
27df8968 | 98 | if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) |
70af2f8a WL |
99 | return; |
100 | ||
f7d71f20 | 101 | queued_write_lock_slowpath(lock); |
70af2f8a WL |
102 | } |
103 | ||
104 | /** | |
434e09e7 WL |
105 | * queued_read_unlock - release read lock of a queued rwlock |
106 | * @lock : Pointer to queued rwlock structure | |
70af2f8a | 107 | */ |
f7d71f20 | 108 | static inline void queued_read_unlock(struct qrwlock *lock) |
70af2f8a WL |
109 | { |
110 | /* | |
111 | * Atomically decrement the reader count | |
112 | */ | |
77e430e3 | 113 | (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); |
70af2f8a WL |
114 | } |
115 | ||
70af2f8a | 116 | /** |
434e09e7 WL |
117 | * queued_write_unlock - release write lock of a queued rwlock |
118 | * @lock : Pointer to queued rwlock structure | |
70af2f8a | 119 | */ |
f7d71f20 | 120 | static inline void queued_write_unlock(struct qrwlock *lock) |
70af2f8a | 121 | { |
d1331661 | 122 | smp_store_release(&lock->wlocked, 0); |
70af2f8a | 123 | } |
70af2f8a | 124 | |
26128cb6 BG |
125 | /** |
126 | * queued_rwlock_is_contended - check if the lock is contended | |
434e09e7 | 127 | * @lock : Pointer to queued rwlock structure |
26128cb6 BG |
128 | * Return: 1 if lock contended, 0 otherwise |
129 | */ | |
130 | static inline int queued_rwlock_is_contended(struct qrwlock *lock) | |
131 | { | |
132 | return arch_spin_is_locked(&lock->wait_lock); | |
133 | } | |
134 | ||
70af2f8a WL |
135 | /* |
136 | * Remapping rwlock architecture specific functions to the corresponding | |
434e09e7 | 137 | * queued rwlock functions. |
70af2f8a | 138 | */ |
26128cb6 BG |
139 | #define arch_read_lock(l) queued_read_lock(l) |
140 | #define arch_write_lock(l) queued_write_lock(l) | |
141 | #define arch_read_trylock(l) queued_read_trylock(l) | |
142 | #define arch_write_trylock(l) queued_write_trylock(l) | |
143 | #define arch_read_unlock(l) queued_read_unlock(l) | |
144 | #define arch_write_unlock(l) queued_write_unlock(l) | |
145 | #define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l) | |
70af2f8a WL |
146 | |
147 | #endif /* __ASM_GENERIC_QRWLOCK_H */ |