Commit | Line | Data |
---|---|---|
08e875c1 CM |
1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | #ifndef __ASM_SPINLOCK_H | |
17 | #define __ASM_SPINLOCK_H | |
18 | ||
81bb5c64 | 19 | #include <asm/lse.h> |
08e875c1 CM |
20 | #include <asm/spinlock_types.h> |
21 | #include <asm/processor.h> | |
22 | ||
23 | /* | |
24 | * Spinlock implementation. | |
25 | * | |
08e875c1 CM |
26 | * The memory barriers are implicit with the load-acquire and store-release |
27 | * instructions. | |
08e875c1 | 28 | */ |
d86b8da0 WD |
29 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
30 | { | |
31 | unsigned int tmp; | |
32 | arch_spinlock_t lockval; | |
08e875c1 | 33 | |
d86b8da0 WD |
34 | asm volatile( |
35 | " sevl\n" | |
36 | "1: wfe\n" | |
37 | "2: ldaxr %w0, %2\n" | |
38 | " eor %w1, %w0, %w0, ror #16\n" | |
39 | " cbnz %w1, 1b\n" | |
40 | ARM64_LSE_ATOMIC_INSN( | |
41 | /* LL/SC */ | |
42 | " stxr %w1, %w0, %2\n" | |
43 | " cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */ | |
44 | /* LSE atomics */ | |
45 | " nop\n" | |
46 | " nop\n") | |
47 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) | |
48 | : | |
49 | : "memory"); | |
50 | } | |
08e875c1 CM |
51 | |
52 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | |
53 | ||
54 | static inline void arch_spin_lock(arch_spinlock_t *lock) | |
55 | { | |
56 | unsigned int tmp; | |
52ea2a56 | 57 | arch_spinlock_t lockval, newval; |
08e875c1 CM |
58 | |
59 | asm volatile( | |
52ea2a56 | 60 | /* Atomically increment the next ticket. */ |
81bb5c64 WD |
61 | ARM64_LSE_ATOMIC_INSN( |
62 | /* LL/SC */ | |
52ea2a56 WD |
63 | " prfm pstl1strm, %3\n" |
64 | "1: ldaxr %w0, %3\n" | |
65 | " add %w1, %w0, %w5\n" | |
66 | " stxr %w2, %w1, %3\n" | |
81bb5c64 WD |
67 | " cbnz %w2, 1b\n", |
68 | /* LSE atomics */ | |
69 | " mov %w2, %w5\n" | |
70 | " ldadda %w2, %w0, %3\n" | |
71 | " nop\n" | |
72 | " nop\n" | |
73 | " nop\n" | |
74 | ) | |
75 | ||
52ea2a56 WD |
76 | /* Did we get the lock? */ |
77 | " eor %w1, %w0, %w0, ror #16\n" | |
78 | " cbz %w1, 3f\n" | |
79 | /* | |
80 | * No: spin on the owner. Send a local event to avoid missing an | |
81 | * unlock before the exclusive load. | |
82 | */ | |
83 | " sevl\n" | |
84 | "2: wfe\n" | |
85 | " ldaxrh %w2, %4\n" | |
86 | " eor %w1, %w2, %w0, lsr #16\n" | |
87 | " cbnz %w1, 2b\n" | |
88 | /* We got the lock. Critical section starts here. */ | |
89 | "3:" | |
90 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) | |
91 | : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) | |
92 | : "memory"); | |
08e875c1 CM |
93 | } |
94 | ||
95 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | |
96 | { | |
97 | unsigned int tmp; | |
52ea2a56 | 98 | arch_spinlock_t lockval; |
08e875c1 | 99 | |
81bb5c64 WD |
100 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
101 | /* LL/SC */ | |
102 | " prfm pstl1strm, %2\n" | |
103 | "1: ldaxr %w0, %2\n" | |
104 | " eor %w1, %w0, %w0, ror #16\n" | |
105 | " cbnz %w1, 2f\n" | |
106 | " add %w0, %w0, %3\n" | |
107 | " stxr %w1, %w0, %2\n" | |
108 | " cbnz %w1, 1b\n" | |
109 | "2:", | |
110 | /* LSE atomics */ | |
111 | " ldr %w0, %2\n" | |
112 | " eor %w1, %w0, %w0, ror #16\n" | |
113 | " cbnz %w1, 1f\n" | |
114 | " add %w1, %w0, %3\n" | |
115 | " casa %w0, %w1, %2\n" | |
116 | " and %w1, %w1, #0xffff\n" | |
117 | " eor %w1, %w1, %w0, lsr #16\n" | |
118 | "1:") | |
52ea2a56 WD |
119 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) |
120 | : "I" (1 << TICKET_SHIFT) | |
121 | : "memory"); | |
08e875c1 CM |
122 | |
123 | return !tmp; | |
124 | } | |
125 | ||
126 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | |
127 | { | |
81bb5c64 WD |
128 | unsigned long tmp; |
129 | ||
130 | asm volatile(ARM64_LSE_ATOMIC_INSN( | |
131 | /* LL/SC */ | |
c1d7cd22 | 132 | " ldrh %w1, %0\n" |
81bb5c64 WD |
133 | " add %w1, %w1, #1\n" |
134 | " stlrh %w1, %0", | |
135 | /* LSE atomics */ | |
136 | " mov %w1, #1\n" | |
137 | " nop\n" | |
138 | " staddlh %w1, %0") | |
139 | : "=Q" (lock->owner), "=&r" (tmp) | |
140 | : | |
52ea2a56 WD |
141 | : "memory"); |
142 | } | |
143 | ||
5686b06c WD |
144 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
145 | { | |
146 | return lock.owner == lock.next; | |
147 | } | |
148 | ||
52ea2a56 WD |
149 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
150 | { | |
af2e7aae | 151 | return !arch_spin_value_unlocked(READ_ONCE(*lock)); |
52ea2a56 WD |
152 | } |
153 | ||
154 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | |
155 | { | |
af2e7aae | 156 | arch_spinlock_t lockval = READ_ONCE(*lock); |
52ea2a56 | 157 | return (lockval.next - lockval.owner) > 1; |
08e875c1 | 158 | } |
52ea2a56 | 159 | #define arch_spin_is_contended arch_spin_is_contended |
08e875c1 CM |
160 | |
161 | /* | |
162 | * Write lock implementation. | |
163 | * | |
164 | * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is | |
165 | * exclusively held. | |
166 | * | |
167 | * The memory barriers are implicit with the load-acquire and store-release | |
168 | * instructions. | |
169 | */ | |
170 | ||
171 | static inline void arch_write_lock(arch_rwlock_t *rw) | |
172 | { | |
173 | unsigned int tmp; | |
174 | ||
81bb5c64 WD |
175 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
176 | /* LL/SC */ | |
08e875c1 CM |
177 | " sevl\n" |
178 | "1: wfe\n" | |
3a0310eb | 179 | "2: ldaxr %w0, %1\n" |
08e875c1 | 180 | " cbnz %w0, 1b\n" |
3a0310eb | 181 | " stxr %w0, %w2, %1\n" |
08e875c1 | 182 | " cbnz %w0, 2b\n" |
81bb5c64 WD |
183 | " nop", |
184 | /* LSE atomics */ | |
185 | "1: mov %w0, wzr\n" | |
186 | "2: casa %w0, %w2, %1\n" | |
187 | " cbz %w0, 3f\n" | |
188 | " ldxr %w0, %1\n" | |
189 | " cbz %w0, 2b\n" | |
190 | " wfe\n" | |
191 | " b 1b\n" | |
192 | "3:") | |
3a0310eb WD |
193 | : "=&r" (tmp), "+Q" (rw->lock) |
194 | : "r" (0x80000000) | |
95c41896 | 195 | : "memory"); |
08e875c1 CM |
196 | } |
197 | ||
198 | static inline int arch_write_trylock(arch_rwlock_t *rw) | |
199 | { | |
200 | unsigned int tmp; | |
201 | ||
81bb5c64 WD |
202 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
203 | /* LL/SC */ | |
9511ca19 WD |
204 | "1: ldaxr %w0, %1\n" |
205 | " cbnz %w0, 2f\n" | |
3a0310eb | 206 | " stxr %w0, %w2, %1\n" |
9511ca19 | 207 | " cbnz %w0, 1b\n" |
81bb5c64 WD |
208 | "2:", |
209 | /* LSE atomics */ | |
210 | " mov %w0, wzr\n" | |
211 | " casa %w0, %w2, %1\n" | |
212 | " nop\n" | |
213 | " nop") | |
3a0310eb WD |
214 | : "=&r" (tmp), "+Q" (rw->lock) |
215 | : "r" (0x80000000) | |
95c41896 | 216 | : "memory"); |
08e875c1 CM |
217 | |
218 | return !tmp; | |
219 | } | |
220 | ||
221 | static inline void arch_write_unlock(arch_rwlock_t *rw) | |
222 | { | |
81bb5c64 WD |
223 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
224 | " stlr wzr, %0", | |
225 | " swpl wzr, wzr, %0") | |
226 | : "=Q" (rw->lock) :: "memory"); | |
08e875c1 CM |
227 | } |
228 | ||
229 | /* write_can_lock - would write_trylock() succeed? */ | |
230 | #define arch_write_can_lock(x) ((x)->lock == 0) | |
231 | ||
232 | /* | |
233 | * Read lock implementation. | |
234 | * | |
235 | * It exclusively loads the lock value, increments it and stores the new value | |
236 | * back if positive and the CPU still exclusively owns the location. If the | |
237 | * value is negative, the lock is already held. | |
238 | * | |
239 | * During unlocking there may be multiple active read locks but no write lock. | |
240 | * | |
241 | * The memory barriers are implicit with the load-acquire and store-release | |
242 | * instructions. | |
81bb5c64 WD |
243 | * |
244 | * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC | |
245 | * and LSE implementations may exhibit different behaviour (although this | |
246 | * will have no effect on lockdep). | |
08e875c1 CM |
247 | */ |
248 | static inline void arch_read_lock(arch_rwlock_t *rw) | |
249 | { | |
250 | unsigned int tmp, tmp2; | |
251 | ||
252 | asm volatile( | |
253 | " sevl\n" | |
81bb5c64 WD |
254 | ARM64_LSE_ATOMIC_INSN( |
255 | /* LL/SC */ | |
08e875c1 | 256 | "1: wfe\n" |
3a0310eb | 257 | "2: ldaxr %w0, %2\n" |
08e875c1 CM |
258 | " add %w0, %w0, #1\n" |
259 | " tbnz %w0, #31, 1b\n" | |
3a0310eb | 260 | " stxr %w1, %w0, %2\n" |
81bb5c64 WD |
261 | " nop\n" |
262 | " cbnz %w1, 2b", | |
263 | /* LSE atomics */ | |
264 | "1: wfe\n" | |
265 | "2: ldxr %w0, %2\n" | |
266 | " adds %w1, %w0, #1\n" | |
267 | " tbnz %w1, #31, 1b\n" | |
268 | " casa %w0, %w1, %2\n" | |
269 | " sbc %w0, %w1, %w0\n" | |
270 | " cbnz %w0, 2b") | |
3a0310eb WD |
271 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
272 | : | |
81bb5c64 | 273 | : "cc", "memory"); |
08e875c1 CM |
274 | } |
275 | ||
276 | static inline void arch_read_unlock(arch_rwlock_t *rw) | |
277 | { | |
278 | unsigned int tmp, tmp2; | |
279 | ||
81bb5c64 WD |
280 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
281 | /* LL/SC */ | |
3a0310eb | 282 | "1: ldxr %w0, %2\n" |
08e875c1 | 283 | " sub %w0, %w0, #1\n" |
3a0310eb | 284 | " stlxr %w1, %w0, %2\n" |
81bb5c64 WD |
285 | " cbnz %w1, 1b", |
286 | /* LSE atomics */ | |
287 | " movn %w0, #0\n" | |
288 | " nop\n" | |
289 | " nop\n" | |
290 | " staddl %w0, %2") | |
3a0310eb WD |
291 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
292 | : | |
95c41896 | 293 | : "memory"); |
08e875c1 CM |
294 | } |
295 | ||
296 | static inline int arch_read_trylock(arch_rwlock_t *rw) | |
297 | { | |
81bb5c64 | 298 | unsigned int tmp, tmp2; |
08e875c1 | 299 | |
81bb5c64 WD |
300 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
301 | /* LL/SC */ | |
302 | " mov %w1, #1\n" | |
9511ca19 | 303 | "1: ldaxr %w0, %2\n" |
08e875c1 | 304 | " add %w0, %w0, #1\n" |
9511ca19 | 305 | " tbnz %w0, #31, 2f\n" |
3a0310eb | 306 | " stxr %w1, %w0, %2\n" |
9511ca19 | 307 | " cbnz %w1, 1b\n" |
81bb5c64 WD |
308 | "2:", |
309 | /* LSE atomics */ | |
310 | " ldr %w0, %2\n" | |
311 | " adds %w1, %w0, #1\n" | |
312 | " tbnz %w1, #31, 1f\n" | |
313 | " casa %w0, %w1, %2\n" | |
314 | " sbc %w1, %w1, %w0\n" | |
315 | " nop\n" | |
316 | "1:") | |
317 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) | |
3a0310eb | 318 | : |
81bb5c64 | 319 | : "cc", "memory"); |
08e875c1 CM |
320 | |
321 | return !tmp2; | |
322 | } | |
323 | ||
324 | /* read_can_lock - would read_trylock() succeed? */ | |
325 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) | |
326 | ||
327 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | |
328 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
329 | ||
330 | #define arch_spin_relax(lock) cpu_relax() | |
331 | #define arch_read_relax(lock) cpu_relax() | |
332 | #define arch_write_relax(lock) cpu_relax() | |
333 | ||
334 | #endif /* __ASM_SPINLOCK_H */ |