Commit | Line | Data |
---|---|---|
c275f76b WD |
1 | /* |
2 | * Based on arch/arm/include/asm/atomic.h | |
3 | * | |
4 | * Copyright (C) 1996 Russell King. | |
5 | * Copyright (C) 2002 Deep Blue Solutions Ltd. | |
6 | * Copyright (C) 2012 ARM Ltd. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #ifndef __ASM_ATOMIC_LL_SC_H | |
22 | #define __ASM_ATOMIC_LL_SC_H | |
23 | ||
c0385b24 WD |
24 | #ifndef __ARM64_IN_ATOMIC_IMPL |
25 | #error "please don't include this file directly" | |
26 | #endif | |
27 | ||
c275f76b WD |
28 | /* |
29 | * AArch64 UP and SMP safe atomic ops. We use load exclusive and | |
30 | * store exclusive to ensure that these are atomic. We may loop | |
31 | * to ensure that the update happens. | |
32 | * | |
33 | * NOTE: these functions do *not* follow the PCS and must explicitly | |
34 | * save any clobbered registers other than x0 (regardless of return | |
35 | * value). This is achieved through -fcall-saved-* compiler flags for | |
36 | * this file, which unfortunately don't work on a per-function basis | |
37 | * (the optimize attribute silently ignores these options). | |
38 | */ | |
39 | ||
c275f76b WD |
40 | #define ATOMIC_OP(op, asm_op) \ |
41 | __LL_SC_INLINE void \ | |
42 | __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ | |
43 | { \ | |
44 | unsigned long tmp; \ | |
45 | int result; \ | |
46 | \ | |
47 | asm volatile("// atomic_" #op "\n" \ | |
0ea366f5 | 48 | " prfm pstl1strm, %2\n" \ |
c275f76b WD |
49 | "1: ldxr %w0, %2\n" \ |
50 | " " #asm_op " %w0, %w0, %w3\n" \ | |
51 | " stxr %w1, %w0, %2\n" \ | |
52 | " cbnz %w1, 1b" \ | |
53 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ | |
54 | : "Ir" (i)); \ | |
55 | } \ | |
c0385b24 | 56 | __LL_SC_EXPORT(atomic_##op); |
c275f76b | 57 | |
305d454a | 58 | #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ |
c275f76b | 59 | __LL_SC_INLINE int \ |
305d454a | 60 | __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ |
c275f76b WD |
61 | { \ |
62 | unsigned long tmp; \ | |
63 | int result; \ | |
64 | \ | |
305d454a | 65 | asm volatile("// atomic_" #op "_return" #name "\n" \ |
0ea366f5 | 66 | " prfm pstl1strm, %2\n" \ |
305d454a | 67 | "1: ld" #acq "xr %w0, %2\n" \ |
c275f76b | 68 | " " #asm_op " %w0, %w0, %w3\n" \ |
305d454a WD |
69 | " st" #rel "xr %w1, %w0, %2\n" \ |
70 | " cbnz %w1, 1b\n" \ | |
71 | " " #mb \ | |
c275f76b WD |
72 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
73 | : "Ir" (i) \ | |
305d454a | 74 | : cl); \ |
c275f76b | 75 | \ |
c275f76b | 76 | return result; \ |
c0385b24 | 77 | } \ |
305d454a WD |
78 | __LL_SC_EXPORT(atomic_##op##_return##name); |
79 | ||
e490f9b1 PZ |
80 | #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ |
81 | __LL_SC_INLINE int \ | |
82 | __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ | |
83 | { \ | |
84 | unsigned long tmp; \ | |
85 | int val, result; \ | |
86 | \ | |
87 | asm volatile("// atomic_fetch_" #op #name "\n" \ | |
88 | " prfm pstl1strm, %3\n" \ | |
89 | "1: ld" #acq "xr %w0, %3\n" \ | |
90 | " " #asm_op " %w1, %w0, %w4\n" \ | |
91 | " st" #rel "xr %w2, %w1, %3\n" \ | |
92 | " cbnz %w2, 1b\n" \ | |
93 | " " #mb \ | |
94 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ | |
95 | : "Ir" (i) \ | |
96 | : cl); \ | |
97 | \ | |
98 | return result; \ | |
99 | } \ | |
100 | __LL_SC_EXPORT(atomic_fetch_##op##name); | |
101 | ||
305d454a WD |
102 | #define ATOMIC_OPS(...) \ |
103 | ATOMIC_OP(__VA_ARGS__) \ | |
e490f9b1 | 104 | ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\ |
305d454a WD |
105 | ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\ |
106 | ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\ | |
e490f9b1 PZ |
107 | ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\ |
108 | ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\ | |
109 | ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\ | |
110 | ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ | |
111 | ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) | |
c275f76b | 112 | |
e490f9b1 PZ |
113 | ATOMIC_OPS(add, add) |
114 | ATOMIC_OPS(sub, sub) | |
115 | ||
116 | #undef ATOMIC_OPS | |
117 | #define ATOMIC_OPS(...) \ | |
118 | ATOMIC_OP(__VA_ARGS__) \ | |
119 | ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\ | |
120 | ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\ | |
121 | ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ | |
122 | ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) | |
c275f76b | 123 | |
e490f9b1 PZ |
124 | ATOMIC_OPS(and, and) |
125 | ATOMIC_OPS(andnot, bic) | |
126 | ATOMIC_OPS(or, orr) | |
127 | ATOMIC_OPS(xor, eor) | |
c275f76b WD |
128 | |
129 | #undef ATOMIC_OPS | |
e490f9b1 | 130 | #undef ATOMIC_FETCH_OP |
c275f76b WD |
131 | #undef ATOMIC_OP_RETURN |
132 | #undef ATOMIC_OP | |
133 | ||
c275f76b WD |
134 | #define ATOMIC64_OP(op, asm_op) \ |
135 | __LL_SC_INLINE void \ | |
136 | __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ | |
137 | { \ | |
138 | long result; \ | |
139 | unsigned long tmp; \ | |
140 | \ | |
141 | asm volatile("// atomic64_" #op "\n" \ | |
0ea366f5 | 142 | " prfm pstl1strm, %2\n" \ |
c275f76b WD |
143 | "1: ldxr %0, %2\n" \ |
144 | " " #asm_op " %0, %0, %3\n" \ | |
145 | " stxr %w1, %0, %2\n" \ | |
146 | " cbnz %w1, 1b" \ | |
147 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ | |
148 | : "Ir" (i)); \ | |
149 | } \ | |
c0385b24 | 150 | __LL_SC_EXPORT(atomic64_##op); |
c275f76b | 151 | |
305d454a | 152 | #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ |
c275f76b | 153 | __LL_SC_INLINE long \ |
305d454a | 154 | __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ |
c275f76b WD |
155 | { \ |
156 | long result; \ | |
157 | unsigned long tmp; \ | |
158 | \ | |
305d454a | 159 | asm volatile("// atomic64_" #op "_return" #name "\n" \ |
0ea366f5 | 160 | " prfm pstl1strm, %2\n" \ |
305d454a | 161 | "1: ld" #acq "xr %0, %2\n" \ |
c275f76b | 162 | " " #asm_op " %0, %0, %3\n" \ |
305d454a WD |
163 | " st" #rel "xr %w1, %0, %2\n" \ |
164 | " cbnz %w1, 1b\n" \ | |
165 | " " #mb \ | |
c275f76b WD |
166 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
167 | : "Ir" (i) \ | |
305d454a | 168 | : cl); \ |
c275f76b | 169 | \ |
c275f76b | 170 | return result; \ |
c0385b24 | 171 | } \ |
305d454a WD |
172 | __LL_SC_EXPORT(atomic64_##op##_return##name); |
173 | ||
e490f9b1 PZ |
174 | #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ |
175 | __LL_SC_INLINE long \ | |
176 | __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ | |
177 | { \ | |
178 | long result, val; \ | |
179 | unsigned long tmp; \ | |
180 | \ | |
181 | asm volatile("// atomic64_fetch_" #op #name "\n" \ | |
182 | " prfm pstl1strm, %3\n" \ | |
183 | "1: ld" #acq "xr %0, %3\n" \ | |
184 | " " #asm_op " %1, %0, %4\n" \ | |
185 | " st" #rel "xr %w2, %1, %3\n" \ | |
186 | " cbnz %w2, 1b\n" \ | |
187 | " " #mb \ | |
188 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ | |
189 | : "Ir" (i) \ | |
190 | : cl); \ | |
191 | \ | |
192 | return result; \ | |
193 | } \ | |
194 | __LL_SC_EXPORT(atomic64_fetch_##op##name); | |
195 | ||
305d454a WD |
196 | #define ATOMIC64_OPS(...) \ |
197 | ATOMIC64_OP(__VA_ARGS__) \ | |
e490f9b1 | 198 | ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \ |
305d454a WD |
199 | ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \ |
200 | ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \ | |
e490f9b1 PZ |
201 | ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \ |
202 | ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \ | |
203 | ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \ | |
204 | ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ | |
205 | ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) | |
c275f76b | 206 | |
e490f9b1 PZ |
207 | ATOMIC64_OPS(add, add) |
208 | ATOMIC64_OPS(sub, sub) | |
209 | ||
210 | #undef ATOMIC64_OPS | |
211 | #define ATOMIC64_OPS(...) \ | |
212 | ATOMIC64_OP(__VA_ARGS__) \ | |
213 | ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \ | |
214 | ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \ | |
215 | ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ | |
216 | ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) | |
c275f76b | 217 | |
e490f9b1 PZ |
218 | ATOMIC64_OPS(and, and) |
219 | ATOMIC64_OPS(andnot, bic) | |
220 | ATOMIC64_OPS(or, orr) | |
221 | ATOMIC64_OPS(xor, eor) | |
c275f76b WD |
222 | |
223 | #undef ATOMIC64_OPS | |
e490f9b1 | 224 | #undef ATOMIC64_FETCH_OP |
c275f76b WD |
225 | #undef ATOMIC64_OP_RETURN |
226 | #undef ATOMIC64_OP | |
227 | ||
c275f76b WD |
228 | __LL_SC_INLINE long |
229 | __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) | |
230 | { | |
231 | long result; | |
232 | unsigned long tmp; | |
233 | ||
234 | asm volatile("// atomic64_dec_if_positive\n" | |
0ea366f5 | 235 | " prfm pstl1strm, %2\n" |
c275f76b WD |
236 | "1: ldxr %0, %2\n" |
237 | " subs %0, %0, #1\n" | |
db26217e | 238 | " b.lt 2f\n" |
c275f76b WD |
239 | " stlxr %w1, %0, %2\n" |
240 | " cbnz %w1, 1b\n" | |
241 | " dmb ish\n" | |
242 | "2:" | |
243 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | |
244 | : | |
245 | : "cc", "memory"); | |
246 | ||
247 | return result; | |
248 | } | |
c0385b24 | 249 | __LL_SC_EXPORT(atomic64_dec_if_positive); |
c275f76b | 250 | |
305d454a | 251 | #define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \ |
c342f782 WD |
252 | __LL_SC_INLINE unsigned long \ |
253 | __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ | |
254 | unsigned long old, \ | |
255 | unsigned long new)) \ | |
256 | { \ | |
257 | unsigned long tmp, oldval; \ | |
258 | \ | |
259 | asm volatile( \ | |
7f08a414 | 260 | " prfm pstl1strm, %[v]\n" \ |
305d454a | 261 | "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \ |
c342f782 WD |
262 | " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ |
263 | " cbnz %" #w "[tmp], 2f\n" \ | |
4e39715f | 264 | " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ |
c342f782 WD |
265 | " cbnz %w[tmp], 1b\n" \ |
266 | " " #mb "\n" \ | |
267 | " mov %" #w "[oldval], %" #w "[old]\n" \ | |
268 | "2:" \ | |
269 | : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ | |
270 | [v] "+Q" (*(unsigned long *)ptr) \ | |
271 | : [old] "Lr" (old), [new] "r" (new) \ | |
272 | : cl); \ | |
273 | \ | |
274 | return oldval; \ | |
275 | } \ | |
276 | __LL_SC_EXPORT(__cmpxchg_case_##name); | |
277 | ||
305d454a WD |
278 | __CMPXCHG_CASE(w, b, 1, , , , ) |
279 | __CMPXCHG_CASE(w, h, 2, , , , ) | |
280 | __CMPXCHG_CASE(w, , 4, , , , ) | |
281 | __CMPXCHG_CASE( , , 8, , , , ) | |
282 | __CMPXCHG_CASE(w, b, acq_1, , a, , "memory") | |
283 | __CMPXCHG_CASE(w, h, acq_2, , a, , "memory") | |
284 | __CMPXCHG_CASE(w, , acq_4, , a, , "memory") | |
285 | __CMPXCHG_CASE( , , acq_8, , a, , "memory") | |
286 | __CMPXCHG_CASE(w, b, rel_1, , , l, "memory") | |
287 | __CMPXCHG_CASE(w, h, rel_2, , , l, "memory") | |
288 | __CMPXCHG_CASE(w, , rel_4, , , l, "memory") | |
289 | __CMPXCHG_CASE( , , rel_8, , , l, "memory") | |
290 | __CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory") | |
291 | __CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory") | |
292 | __CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory") | |
293 | __CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory") | |
c342f782 WD |
294 | |
295 | #undef __CMPXCHG_CASE | |
296 | ||
4e39715f | 297 | #define __CMPXCHG_DBL(name, mb, rel, cl) \ |
57a65667 | 298 | __LL_SC_INLINE long \ |
e9a4b795 WD |
299 | __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ |
300 | unsigned long old2, \ | |
301 | unsigned long new1, \ | |
302 | unsigned long new2, \ | |
303 | volatile void *ptr)) \ | |
304 | { \ | |
305 | unsigned long tmp, ret; \ | |
306 | \ | |
307 | asm volatile("// __cmpxchg_double" #name "\n" \ | |
0ea366f5 | 308 | " prfm pstl1strm, %2\n" \ |
e9a4b795 WD |
309 | "1: ldxp %0, %1, %2\n" \ |
310 | " eor %0, %0, %3\n" \ | |
311 | " eor %1, %1, %4\n" \ | |
312 | " orr %1, %0, %1\n" \ | |
313 | " cbnz %1, 2f\n" \ | |
4e39715f | 314 | " st" #rel "xp %w0, %5, %6, %2\n" \ |
e9a4b795 WD |
315 | " cbnz %w0, 1b\n" \ |
316 | " " #mb "\n" \ | |
317 | "2:" \ | |
318 | : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ | |
319 | : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ | |
320 | : cl); \ | |
321 | \ | |
322 | return ret; \ | |
323 | } \ | |
324 | __LL_SC_EXPORT(__cmpxchg_double##name); | |
325 | ||
4e39715f WD |
326 | __CMPXCHG_DBL( , , , ) |
327 | __CMPXCHG_DBL(_mb, dmb ish, l, "memory") | |
e9a4b795 WD |
328 | |
329 | #undef __CMPXCHG_DBL | |
330 | ||
c275f76b | 331 | #endif /* __ASM_ATOMIC_LL_SC_H */ |