Merge tag 'soc-ep93xx-dt-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-block.git] / arch / arm64 / include / asm / atomic_ll_sc.h
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
c275f76b
WD
2/*
3 * Based on arch/arm/include/asm/atomic.h
4 *
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * Copyright (C) 2012 ARM Ltd.
c275f76b
WD
8 */
9
10#ifndef __ASM_ATOMIC_LL_SC_H
11#define __ASM_ATOMIC_LL_SC_H
12
03adcbd9
WD
13#include <linux/stringify.h>
14
03adcbd9
WD
15#ifndef CONFIG_CC_HAS_K_CONSTRAINT
16#define K
17#endif
18
c275f76b
WD
19/*
20 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
21 * store exclusive to ensure that these are atomic. We may loop
22 * to ensure that the update happens.
c275f76b
WD
23 */
24
580fa1b8 25#define ATOMIC_OP(op, asm_op, constraint) \
78f6f5c9 26static __always_inline void \
addfc386 27__ll_sc_atomic_##op(int i, atomic_t *v) \
c275f76b
WD
28{ \
29 unsigned long tmp; \
30 int result; \
31 \
32 asm volatile("// atomic_" #op "\n" \
8e6082e9
MR
33 " prfm pstl1strm, %2\n" \
34 "1: ldxr %w0, %2\n" \
35 " " #asm_op " %w0, %w0, %w3\n" \
36 " stxr %w1, %w0, %2\n" \
b2c3ccbd 37 " cbnz %w1, 1b\n" \
c275f76b 38 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
03adcbd9 39 : __stringify(constraint) "r" (i)); \
addfc386 40}
c275f76b 41
580fa1b8 42#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
78f6f5c9 43static __always_inline int \
addfc386 44__ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
c275f76b
WD
45{ \
46 unsigned long tmp; \
47 int result; \
48 \
305d454a 49 asm volatile("// atomic_" #op "_return" #name "\n" \
8e6082e9
MR
50 " prfm pstl1strm, %2\n" \
51 "1: ld" #acq "xr %w0, %2\n" \
52 " " #asm_op " %w0, %w0, %w3\n" \
53 " st" #rel "xr %w1, %w0, %2\n" \
54 " cbnz %w1, 1b\n" \
b2c3ccbd 55 " " #mb \
c275f76b 56 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
03adcbd9 57 : __stringify(constraint) "r" (i) \
305d454a 58 : cl); \
c275f76b 59 \
c275f76b 60 return result; \
addfc386 61}
305d454a 62
addfc386 63#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
78f6f5c9 64static __always_inline int \
addfc386 65__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
e490f9b1
PZ
66{ \
67 unsigned long tmp; \
68 int val, result; \
69 \
70 asm volatile("// atomic_fetch_" #op #name "\n" \
8e6082e9
MR
71 " prfm pstl1strm, %3\n" \
72 "1: ld" #acq "xr %w0, %3\n" \
73 " " #asm_op " %w1, %w0, %w4\n" \
74 " st" #rel "xr %w2, %w1, %3\n" \
75 " cbnz %w2, 1b\n" \
b2c3ccbd 76 " " #mb \
e490f9b1 77 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
03adcbd9 78 : __stringify(constraint) "r" (i) \
e490f9b1
PZ
79 : cl); \
80 \
81 return result; \
addfc386 82}
e490f9b1 83
305d454a
WD
84#define ATOMIC_OPS(...) \
85 ATOMIC_OP(__VA_ARGS__) \
e490f9b1 86 ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\
305d454a
WD
87 ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
88 ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
e490f9b1
PZ
89 ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\
90 ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
91 ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
92 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
93 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
c275f76b 94
580fa1b8
AM
95ATOMIC_OPS(add, add, I)
96ATOMIC_OPS(sub, sub, J)
e490f9b1
PZ
97
98#undef ATOMIC_OPS
99#define ATOMIC_OPS(...) \
100 ATOMIC_OP(__VA_ARGS__) \
101 ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
102 ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
103 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
104 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
c275f76b 105
03adcbd9
WD
106ATOMIC_OPS(and, and, K)
107ATOMIC_OPS(or, orr, K)
108ATOMIC_OPS(xor, eor, K)
109/*
110 * GAS converts the mysterious and undocumented BIC (immediate) alias to
111 * an AND (immediate) instruction with the immediate inverted. We don't
112 * have a constraint for this, so fall back to register.
113 */
580fa1b8 114ATOMIC_OPS(andnot, bic, )
c275f76b
WD
115
116#undef ATOMIC_OPS
e490f9b1 117#undef ATOMIC_FETCH_OP
c275f76b
WD
118#undef ATOMIC_OP_RETURN
119#undef ATOMIC_OP
120
580fa1b8 121#define ATOMIC64_OP(op, asm_op, constraint) \
78f6f5c9 122static __always_inline void \
addfc386 123__ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
c275f76b 124{ \
16f18688 125 s64 result; \
c275f76b
WD
126 unsigned long tmp; \
127 \
128 asm volatile("// atomic64_" #op "\n" \
8e6082e9
MR
129 " prfm pstl1strm, %2\n" \
130 "1: ldxr %0, %2\n" \
131 " " #asm_op " %0, %0, %3\n" \
132 " stxr %w1, %0, %2\n" \
b2c3ccbd 133 " cbnz %w1, 1b" \
c275f76b 134 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
03adcbd9 135 : __stringify(constraint) "r" (i)); \
addfc386 136}
c275f76b 137
580fa1b8 138#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
78f6f5c9 139static __always_inline long \
addfc386 140__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
c275f76b 141{ \
16f18688 142 s64 result; \
c275f76b
WD
143 unsigned long tmp; \
144 \
305d454a 145 asm volatile("// atomic64_" #op "_return" #name "\n" \
8e6082e9
MR
146 " prfm pstl1strm, %2\n" \
147 "1: ld" #acq "xr %0, %2\n" \
148 " " #asm_op " %0, %0, %3\n" \
149 " st" #rel "xr %w1, %0, %2\n" \
150 " cbnz %w1, 1b\n" \
b2c3ccbd 151 " " #mb \
c275f76b 152 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
03adcbd9 153 : __stringify(constraint) "r" (i) \
305d454a 154 : cl); \
c275f76b 155 \
c275f76b 156 return result; \
addfc386 157}
305d454a 158
580fa1b8 159#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
78f6f5c9 160static __always_inline long \
8e6082e9 161__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
e490f9b1 162{ \
16f18688 163 s64 result, val; \
e490f9b1
PZ
164 unsigned long tmp; \
165 \
166 asm volatile("// atomic64_fetch_" #op #name "\n" \
8e6082e9
MR
167 " prfm pstl1strm, %3\n" \
168 "1: ld" #acq "xr %0, %3\n" \
169 " " #asm_op " %1, %0, %4\n" \
170 " st" #rel "xr %w2, %1, %3\n" \
171 " cbnz %w2, 1b\n" \
b2c3ccbd 172 " " #mb \
e490f9b1 173 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
03adcbd9 174 : __stringify(constraint) "r" (i) \
e490f9b1
PZ
175 : cl); \
176 \
177 return result; \
addfc386 178}
e490f9b1 179
305d454a
WD
180#define ATOMIC64_OPS(...) \
181 ATOMIC64_OP(__VA_ARGS__) \
e490f9b1 182 ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \
305d454a
WD
183 ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
184 ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
e490f9b1
PZ
185 ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \
186 ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
187 ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
188 ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
189 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
c275f76b 190
580fa1b8
AM
191ATOMIC64_OPS(add, add, I)
192ATOMIC64_OPS(sub, sub, J)
e490f9b1
PZ
193
194#undef ATOMIC64_OPS
195#define ATOMIC64_OPS(...) \
196 ATOMIC64_OP(__VA_ARGS__) \
197 ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
198 ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
199 ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
200 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
c275f76b 201
580fa1b8 202ATOMIC64_OPS(and, and, L)
580fa1b8
AM
203ATOMIC64_OPS(or, orr, L)
204ATOMIC64_OPS(xor, eor, L)
03adcbd9
WD
205/*
206 * GAS converts the mysterious and undocumented BIC (immediate) alias to
207 * an AND (immediate) instruction with the immediate inverted. We don't
208 * have a constraint for this, so fall back to register.
209 */
210ATOMIC64_OPS(andnot, bic, )
c275f76b
WD
211
212#undef ATOMIC64_OPS
e490f9b1 213#undef ATOMIC64_FETCH_OP
c275f76b
WD
214#undef ATOMIC64_OP_RETURN
215#undef ATOMIC64_OP
216
78f6f5c9 217static __always_inline s64
addfc386 218__ll_sc_atomic64_dec_if_positive(atomic64_t *v)
c275f76b 219{
16f18688 220 s64 result;
c275f76b
WD
221 unsigned long tmp;
222
223 asm volatile("// atomic64_dec_if_positive\n"
8e6082e9
MR
224 " prfm pstl1strm, %2\n"
225 "1: ldxr %0, %2\n"
226 " subs %0, %0, #1\n"
227 " b.lt 2f\n"
228 " stlxr %w1, %0, %2\n"
229 " cbnz %w1, 1b\n"
230 " dmb ish\n"
b2c3ccbd 231 "2:"
c275f76b
WD
232 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
233 :
234 : "cc", "memory");
235
236 return result;
237}
238
580fa1b8 239#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
78f6f5c9 240static __always_inline u##sz \
addfc386 241__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
5ef3fe4c 242 unsigned long old, \
addfc386 243 u##sz new) \
c342f782 244{ \
5ef3fe4c
WD
245 unsigned long tmp; \
246 u##sz oldval; \
c342f782 247 \
b4f9209b
WD
248 /* \
249 * Sub-word sizes require explicit casting so that the compare \
250 * part of the cmpxchg doesn't end up interpreting non-zero \
251 * upper bits of the register containing "old". \
252 */ \
253 if (sz < 32) \
254 old = (u##sz)old; \
255 \
c342f782 256 asm volatile( \
7f08a414 257 " prfm pstl1strm, %[v]\n" \
5ef3fe4c 258 "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
c342f782
WD
259 " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
260 " cbnz %" #w "[tmp], 2f\n" \
5ef3fe4c 261 " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
c342f782
WD
262 " cbnz %w[tmp], 1b\n" \
263 " " #mb "\n" \
b2c3ccbd 264 "2:" \
c342f782 265 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
5ef3fe4c 266 [v] "+Q" (*(u##sz *)ptr) \
03adcbd9 267 : [old] __stringify(constraint) "r" (old), [new] "r" (new) \
c342f782
WD
268 : cl); \
269 \
270 return oldval; \
addfc386 271}
c342f782 272
580fa1b8
AM
273/*
274 * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
275 * handle the 'K' constraint for the value 4294967295 - thus we use no
276 * constraint for 32 bit operations.
277 */
03adcbd9
WD
278__CMPXCHG_CASE(w, b, , 8, , , , , K)
279__CMPXCHG_CASE(w, h, , 16, , , , , K)
280__CMPXCHG_CASE(w, , , 32, , , , , K)
580fa1b8 281__CMPXCHG_CASE( , , , 64, , , , , L)
03adcbd9
WD
282__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K)
283__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K)
284__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K)
580fa1b8 285__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
03adcbd9
WD
286__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K)
287__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K)
288__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K)
580fa1b8 289__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
03adcbd9
WD
290__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K)
291__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K)
292__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K)
580fa1b8 293__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
c342f782
WD
294
295#undef __CMPXCHG_CASE
296
b23e139d
PZ
297union __u128_halves {
298 u128 full;
299 struct {
300 u64 low, high;
301 };
302};
303
304#define __CMPXCHG128(name, mb, rel, cl...) \
305static __always_inline u128 \
306__ll_sc__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new) \
307{ \
308 union __u128_halves r, o = { .full = (old) }, \
309 n = { .full = (new) }; \
310 unsigned int tmp; \
311 \
312 asm volatile("// __cmpxchg128" #name "\n" \
313 " prfm pstl1strm, %[v]\n" \
314 "1: ldxp %[rl], %[rh], %[v]\n" \
315 " cmp %[rl], %[ol]\n" \
316 " ccmp %[rh], %[oh], 0, eq\n" \
317 " b.ne 2f\n" \
318 " st" #rel "xp %w[tmp], %[nl], %[nh], %[v]\n" \
319 " cbnz %w[tmp], 1b\n" \
320 " " #mb "\n" \
321 "2:" \
322 : [v] "+Q" (*(u128 *)ptr), \
323 [rl] "=&r" (r.low), [rh] "=&r" (r.high), \
324 [tmp] "=&r" (tmp) \
325 : [ol] "r" (o.low), [oh] "r" (o.high), \
326 [nl] "r" (n.low), [nh] "r" (n.high) \
327 : "cc", ##cl); \
328 \
329 return r.full; \
330}
331
332__CMPXCHG128( , , )
333__CMPXCHG128(_mb, dmb ish, l, "memory")
334
335#undef __CMPXCHG128
336
03adcbd9 337#undef K
e9a4b795 338
c275f76b 339#endif /* __ASM_ATOMIC_LL_SC_H */