Commit | Line | Data |
---|---|---|
70af2f8a WL |
1 | /* |
2 | * Queue read/write lock | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. | |
15 | * | |
16 | * Authors: Waiman Long <waiman.long@hp.com> | |
17 | */ | |
18 | #ifndef __ASM_GENERIC_QRWLOCK_H | |
19 | #define __ASM_GENERIC_QRWLOCK_H | |
20 | ||
21 | #include <linux/atomic.h> | |
22 | #include <asm/barrier.h> | |
23 | #include <asm/processor.h> | |
24 | ||
25 | #include <asm-generic/qrwlock_types.h> | |
26 | ||
27 | /* | |
2db34e8b | 28 | * Writer states & reader shift and bias. |
29 | * | |
30 | * | +0 | +1 | +2 | +3 | | |
31 | * ----+----+----+----+----+ | |
32 | * LE | 78 | 56 | 34 | 12 | 0x12345678 | |
33 | * ----+----+----+----+----+ | |
34 | * | wr | rd | | |
35 | * +----+----+----+----+ | |
36 | * | |
37 | * ----+----+----+----+----+ | |
38 | * BE | 12 | 34 | 56 | 78 | 0x12345678 | |
39 | * ----+----+----+----+----+ | |
40 | * | rd | wr | | |
41 | * +----+----+----+----+ | |
70af2f8a WL |
42 | */ |
43 | #define _QW_WAITING 1 /* A writer is waiting */ | |
44 | #define _QW_LOCKED 0xff /* A writer holds the lock */ | |
45 | #define _QW_WMASK 0xff /* Writer mask */ | |
46 | #define _QR_SHIFT 8 /* Reader count shift */ | |
47 | #define _QR_BIAS (1U << _QR_SHIFT) | |
48 | ||
49 | /* | |
50 | * External function declarations | |
51 | */ | |
0e06e5be | 52 | extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts); |
f7d71f20 | 53 | extern void queued_write_lock_slowpath(struct qrwlock *lock); |
70af2f8a WL |
54 | |
55 | /** | |
f7d71f20 | 56 | * queued_read_can_lock- would read_trylock() succeed? |
70af2f8a WL |
57 | * @lock: Pointer to queue rwlock structure |
58 | */ | |
f7d71f20 | 59 | static inline int queued_read_can_lock(struct qrwlock *lock) |
70af2f8a WL |
60 | { |
61 | return !(atomic_read(&lock->cnts) & _QW_WMASK); | |
62 | } | |
63 | ||
64 | /** | |
f7d71f20 | 65 | * queued_write_can_lock- would write_trylock() succeed? |
70af2f8a WL |
66 | * @lock: Pointer to queue rwlock structure |
67 | */ | |
f7d71f20 | 68 | static inline int queued_write_can_lock(struct qrwlock *lock) |
70af2f8a WL |
69 | { |
70 | return !atomic_read(&lock->cnts); | |
71 | } | |
72 | ||
73 | /** | |
f7d71f20 | 74 | * queued_read_trylock - try to acquire read lock of a queue rwlock |
70af2f8a WL |
75 | * @lock : Pointer to queue rwlock structure |
76 | * Return: 1 if lock acquired, 0 if failed | |
77 | */ | |
f7d71f20 | 78 | static inline int queued_read_trylock(struct qrwlock *lock) |
70af2f8a WL |
79 | { |
80 | u32 cnts; | |
81 | ||
82 | cnts = atomic_read(&lock->cnts); | |
83 | if (likely(!(cnts & _QW_WMASK))) { | |
77e430e3 | 84 | cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); |
70af2f8a WL |
85 | if (likely(!(cnts & _QW_WMASK))) |
86 | return 1; | |
87 | atomic_sub(_QR_BIAS, &lock->cnts); | |
88 | } | |
89 | return 0; | |
90 | } | |
91 | ||
92 | /** | |
f7d71f20 | 93 | * queued_write_trylock - try to acquire write lock of a queue rwlock |
70af2f8a WL |
94 | * @lock : Pointer to queue rwlock structure |
95 | * Return: 1 if lock acquired, 0 if failed | |
96 | */ | |
f7d71f20 | 97 | static inline int queued_write_trylock(struct qrwlock *lock) |
70af2f8a WL |
98 | { |
99 | u32 cnts; | |
100 | ||
101 | cnts = atomic_read(&lock->cnts); | |
102 | if (unlikely(cnts)) | |
103 | return 0; | |
104 | ||
77e430e3 WD |
105 | return likely(atomic_cmpxchg_acquire(&lock->cnts, |
106 | cnts, cnts | _QW_LOCKED) == cnts); | |
70af2f8a WL |
107 | } |
108 | /** | |
f7d71f20 | 109 | * queued_read_lock - acquire read lock of a queue rwlock |
70af2f8a WL |
110 | * @lock: Pointer to queue rwlock structure |
111 | */ | |
f7d71f20 | 112 | static inline void queued_read_lock(struct qrwlock *lock) |
70af2f8a WL |
113 | { |
114 | u32 cnts; | |
115 | ||
77e430e3 | 116 | cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); |
70af2f8a WL |
117 | if (likely(!(cnts & _QW_WMASK))) |
118 | return; | |
119 | ||
120 | /* The slowpath will decrement the reader count, if necessary. */ | |
0e06e5be | 121 | queued_read_lock_slowpath(lock, cnts); |
70af2f8a WL |
122 | } |
123 | ||
124 | /** | |
f7d71f20 | 125 | * queued_write_lock - acquire write lock of a queue rwlock |
70af2f8a WL |
126 | * @lock : Pointer to queue rwlock structure |
127 | */ | |
f7d71f20 | 128 | static inline void queued_write_lock(struct qrwlock *lock) |
70af2f8a WL |
129 | { |
130 | /* Optimize for the unfair lock case where the fair flag is 0. */ | |
77e430e3 | 131 | if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0) |
70af2f8a WL |
132 | return; |
133 | ||
f7d71f20 | 134 | queued_write_lock_slowpath(lock); |
70af2f8a WL |
135 | } |
136 | ||
137 | /** | |
f7d71f20 | 138 | * queued_read_unlock - release read lock of a queue rwlock |
70af2f8a WL |
139 | * @lock : Pointer to queue rwlock structure |
140 | */ | |
f7d71f20 | 141 | static inline void queued_read_unlock(struct qrwlock *lock) |
70af2f8a WL |
142 | { |
143 | /* | |
144 | * Atomically decrement the reader count | |
145 | */ | |
77e430e3 | 146 | (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); |
70af2f8a WL |
147 | } |
148 | ||
2db34e8b | 149 | /** |
150 | * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock | |
151 | * @lock : Pointer to queue rwlock structure | |
152 | * Return: the write byte address of a queue rwlock | |
153 | */ | |
154 | static inline u8 *__qrwlock_write_byte(struct qrwlock *lock) | |
155 | { | |
156 | return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN); | |
157 | } | |
158 | ||
70af2f8a | 159 | /** |
f7d71f20 | 160 | * queued_write_unlock - release write lock of a queue rwlock |
70af2f8a WL |
161 | * @lock : Pointer to queue rwlock structure |
162 | */ | |
f7d71f20 | 163 | static inline void queued_write_unlock(struct qrwlock *lock) |
70af2f8a | 164 | { |
2db34e8b | 165 | smp_store_release(__qrwlock_write_byte(lock), 0); |
70af2f8a | 166 | } |
70af2f8a WL |
167 | |
168 | /* | |
169 | * Remapping rwlock architecture specific functions to the corresponding | |
170 | * queue rwlock functions. | |
171 | */ | |
f7d71f20 WL |
172 | #define arch_read_can_lock(l) queued_read_can_lock(l) |
173 | #define arch_write_can_lock(l) queued_write_can_lock(l) | |
174 | #define arch_read_lock(l) queued_read_lock(l) | |
175 | #define arch_write_lock(l) queued_write_lock(l) | |
176 | #define arch_read_trylock(l) queued_read_trylock(l) | |
177 | #define arch_write_trylock(l) queued_write_trylock(l) | |
178 | #define arch_read_unlock(l) queued_read_unlock(l) | |
179 | #define arch_write_unlock(l) queued_write_unlock(l) | |
70af2f8a WL |
180 | |
181 | #endif /* __ASM_GENERIC_QRWLOCK_H */ |