arm64: atomics: implement atomic{,64}_cmpxchg using cmpxchg
[linux-2.6-block.git] / arch / arm64 / include / asm / atomic_lse.h
CommitLineData
c0385b24
WD
1/*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __ASM_ATOMIC_LSE_H
22#define __ASM_ATOMIC_LSE_H
23
24#ifndef __ARM64_IN_ATOMIC_IMPL
25#error "please don't include this file directly"
26#endif
27
c09d6a04
WD
28#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
29
30static inline void atomic_andnot(int i, atomic_t *v)
31{
32 register int w0 asm ("w0") = i;
33 register atomic_t *x1 asm ("x1") = v;
34
35 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
36 " stclr %w[i], %[v]\n")
37 : [i] "+r" (w0), [v] "+Q" (v->counter)
38 : "r" (x1)
39 : "x30");
40}
41
42static inline void atomic_or(int i, atomic_t *v)
43{
44 register int w0 asm ("w0") = i;
45 register atomic_t *x1 asm ("x1") = v;
46
47 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
48 " stset %w[i], %[v]\n")
49 : [i] "+r" (w0), [v] "+Q" (v->counter)
50 : "r" (x1)
51 : "x30");
52}
53
54static inline void atomic_xor(int i, atomic_t *v)
55{
56 register int w0 asm ("w0") = i;
57 register atomic_t *x1 asm ("x1") = v;
58
59 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
60 " steor %w[i], %[v]\n")
61 : [i] "+r" (w0), [v] "+Q" (v->counter)
62 : "r" (x1)
63 : "x30");
64}
65
66static inline void atomic_add(int i, atomic_t *v)
67{
68 register int w0 asm ("w0") = i;
69 register atomic_t *x1 asm ("x1") = v;
70
71 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
72 " stadd %w[i], %[v]\n")
73 : [i] "+r" (w0), [v] "+Q" (v->counter)
74 : "r" (x1)
75 : "x30");
76}
77
78static inline int atomic_add_return(int i, atomic_t *v)
79{
80 register int w0 asm ("w0") = i;
81 register atomic_t *x1 asm ("x1") = v;
82
83 asm volatile(ARM64_LSE_ATOMIC_INSN(
84 /* LL/SC */
85 " nop\n"
86 __LL_SC_ATOMIC(add_return),
87 /* LSE atomics */
88 " ldaddal %w[i], w30, %[v]\n"
89 " add %w[i], %w[i], w30")
90 : [i] "+r" (w0), [v] "+Q" (v->counter)
91 : "r" (x1)
92 : "x30", "memory");
93
94 return w0;
95}
96
97static inline void atomic_and(int i, atomic_t *v)
98{
99 register int w0 asm ("w0") = i;
100 register atomic_t *x1 asm ("x1") = v;
101
102 asm volatile(ARM64_LSE_ATOMIC_INSN(
103 /* LL/SC */
104 " nop\n"
105 __LL_SC_ATOMIC(and),
106 /* LSE atomics */
107 " mvn %w[i], %w[i]\n"
108 " stclr %w[i], %[v]")
109 : [i] "+r" (w0), [v] "+Q" (v->counter)
110 : "r" (x1)
111 : "x30");
112}
113
114static inline void atomic_sub(int i, atomic_t *v)
115{
116 register int w0 asm ("w0") = i;
117 register atomic_t *x1 asm ("x1") = v;
118
119 asm volatile(ARM64_LSE_ATOMIC_INSN(
120 /* LL/SC */
121 " nop\n"
122 __LL_SC_ATOMIC(sub),
123 /* LSE atomics */
124 " neg %w[i], %w[i]\n"
125 " stadd %w[i], %[v]")
126 : [i] "+r" (w0), [v] "+Q" (v->counter)
127 : "r" (x1)
128 : "x30");
129}
130
131static inline int atomic_sub_return(int i, atomic_t *v)
132{
133 register int w0 asm ("w0") = i;
134 register atomic_t *x1 asm ("x1") = v;
135
136 asm volatile(ARM64_LSE_ATOMIC_INSN(
137 /* LL/SC */
138 " nop\n"
139 __LL_SC_ATOMIC(sub_return)
140 " nop",
141 /* LSE atomics */
142 " neg %w[i], %w[i]\n"
143 " ldaddal %w[i], w30, %[v]\n"
144 " add %w[i], %w[i], w30")
145 : [i] "+r" (w0), [v] "+Q" (v->counter)
146 : "r" (x1)
147 : "x30", "memory");
148
149 return w0;
150}
c0385b24 151
c09d6a04 152#undef __LL_SC_ATOMIC
c0385b24 153
c09d6a04
WD
154#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
155
156static inline void atomic64_andnot(long i, atomic64_t *v)
157{
158 register long x0 asm ("x0") = i;
159 register atomic64_t *x1 asm ("x1") = v;
160
161 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
162 " stclr %[i], %[v]\n")
163 : [i] "+r" (x0), [v] "+Q" (v->counter)
164 : "r" (x1)
165 : "x30");
166}
167
168static inline void atomic64_or(long i, atomic64_t *v)
169{
170 register long x0 asm ("x0") = i;
171 register atomic64_t *x1 asm ("x1") = v;
172
173 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
174 " stset %[i], %[v]\n")
175 : [i] "+r" (x0), [v] "+Q" (v->counter)
176 : "r" (x1)
177 : "x30");
178}
179
180static inline void atomic64_xor(long i, atomic64_t *v)
181{
182 register long x0 asm ("x0") = i;
183 register atomic64_t *x1 asm ("x1") = v;
184
185 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
186 " steor %[i], %[v]\n")
187 : [i] "+r" (x0), [v] "+Q" (v->counter)
188 : "r" (x1)
189 : "x30");
190}
191
192static inline void atomic64_add(long i, atomic64_t *v)
193{
194 register long x0 asm ("x0") = i;
195 register atomic64_t *x1 asm ("x1") = v;
196
197 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
198 " stadd %[i], %[v]\n")
199 : [i] "+r" (x0), [v] "+Q" (v->counter)
200 : "r" (x1)
201 : "x30");
202}
203
204static inline long atomic64_add_return(long i, atomic64_t *v)
205{
206 register long x0 asm ("x0") = i;
207 register atomic64_t *x1 asm ("x1") = v;
208
209 asm volatile(ARM64_LSE_ATOMIC_INSN(
210 /* LL/SC */
211 " nop\n"
212 __LL_SC_ATOMIC64(add_return),
213 /* LSE atomics */
214 " ldaddal %[i], x30, %[v]\n"
215 " add %[i], %[i], x30")
216 : [i] "+r" (x0), [v] "+Q" (v->counter)
217 : "r" (x1)
218 : "x30", "memory");
219
220 return x0;
221}
222
223static inline void atomic64_and(long i, atomic64_t *v)
224{
225 register long x0 asm ("x0") = i;
226 register atomic64_t *x1 asm ("x1") = v;
227
228 asm volatile(ARM64_LSE_ATOMIC_INSN(
229 /* LL/SC */
230 " nop\n"
231 __LL_SC_ATOMIC64(and),
232 /* LSE atomics */
233 " mvn %[i], %[i]\n"
234 " stclr %[i], %[v]")
235 : [i] "+r" (x0), [v] "+Q" (v->counter)
236 : "r" (x1)
237 : "x30");
238}
239
240static inline void atomic64_sub(long i, atomic64_t *v)
241{
242 register long x0 asm ("x0") = i;
243 register atomic64_t *x1 asm ("x1") = v;
244
245 asm volatile(ARM64_LSE_ATOMIC_INSN(
246 /* LL/SC */
247 " nop\n"
248 __LL_SC_ATOMIC64(sub),
249 /* LSE atomics */
250 " neg %[i], %[i]\n"
251 " stadd %[i], %[v]")
252 : [i] "+r" (x0), [v] "+Q" (v->counter)
253 : "r" (x1)
254 : "x30");
255}
256
257static inline long atomic64_sub_return(long i, atomic64_t *v)
258{
259 register long x0 asm ("x0") = i;
260 register atomic64_t *x1 asm ("x1") = v;
261
262 asm volatile(ARM64_LSE_ATOMIC_INSN(
263 /* LL/SC */
264 " nop\n"
265 __LL_SC_ATOMIC64(sub_return)
266 " nop",
267 /* LSE atomics */
268 " neg %[i], %[i]\n"
269 " ldaddal %[i], x30, %[v]\n"
270 " add %[i], %[i], x30")
271 : [i] "+r" (x0), [v] "+Q" (v->counter)
272 : "r" (x1)
273 : "x30", "memory");
274
275 return x0;
276}
c0385b24
WD
277
278static inline long atomic64_dec_if_positive(atomic64_t *v)
279{
c09d6a04 280 register long x0 asm ("x0") = (long)v;
c0385b24 281
c09d6a04
WD
282 asm volatile(ARM64_LSE_ATOMIC_INSN(
283 /* LL/SC */
284 " nop\n"
285 __LL_SC_ATOMIC64(dec_if_positive)
286 " nop\n"
287 " nop\n"
288 " nop\n"
289 " nop\n"
290 " nop",
291 /* LSE atomics */
292 "1: ldr x30, %[v]\n"
293 " subs %[ret], x30, #1\n"
294 " b.mi 2f\n"
295 " casal x30, %[ret], %[v]\n"
296 " sub x30, x30, #1\n"
297 " sub x30, x30, %[ret]\n"
298 " cbnz x30, 1b\n"
299 "2:")
300 : [ret] "+&r" (x0), [v] "+Q" (v->counter)
c0385b24
WD
301 :
302 : "x30", "cc", "memory");
303
304 return x0;
305}
306
c09d6a04
WD
307#undef __LL_SC_ATOMIC64
308
c342f782
WD
309#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
310
311#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
312static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
313 unsigned long old, \
314 unsigned long new) \
315{ \
316 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
317 register unsigned long x1 asm ("x1") = old; \
318 register unsigned long x2 asm ("x2") = new; \
319 \
320 asm volatile(ARM64_LSE_ATOMIC_INSN( \
321 /* LL/SC */ \
322 "nop\n" \
323 __LL_SC_CMPXCHG(name) \
324 "nop", \
325 /* LSE atomics */ \
326 " mov " #w "30, %" #w "[old]\n" \
327 " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
328 " mov %" #w "[ret], " #w "30") \
329 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
330 : [old] "r" (x1), [new] "r" (x2) \
331 : "x30" , ##cl); \
332 \
333 return x0; \
334}
335
336__CMPXCHG_CASE(w, b, 1, )
337__CMPXCHG_CASE(w, h, 2, )
338__CMPXCHG_CASE(w, , 4, )
339__CMPXCHG_CASE(x, , 8, )
340__CMPXCHG_CASE(w, b, mb_1, al, "memory")
341__CMPXCHG_CASE(w, h, mb_2, al, "memory")
342__CMPXCHG_CASE(w, , mb_4, al, "memory")
343__CMPXCHG_CASE(x, , mb_8, al, "memory")
344
345#undef __LL_SC_CMPXCHG
346#undef __CMPXCHG_CASE
347
e9a4b795
WD
348#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
349
350#define __CMPXCHG_DBL(name, mb, cl...) \
351static inline int __cmpxchg_double##name(unsigned long old1, \
352 unsigned long old2, \
353 unsigned long new1, \
354 unsigned long new2, \
355 volatile void *ptr) \
356{ \
357 unsigned long oldval1 = old1; \
358 unsigned long oldval2 = old2; \
359 register unsigned long x0 asm ("x0") = old1; \
360 register unsigned long x1 asm ("x1") = old2; \
361 register unsigned long x2 asm ("x2") = new1; \
362 register unsigned long x3 asm ("x3") = new2; \
363 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
364 \
365 asm volatile(ARM64_LSE_ATOMIC_INSN( \
366 /* LL/SC */ \
367 " nop\n" \
368 " nop\n" \
369 " nop\n" \
370 __LL_SC_CMPXCHG_DBL(name), \
371 /* LSE atomics */ \
372 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
373 " eor %[old1], %[old1], %[oldval1]\n" \
374 " eor %[old2], %[old2], %[oldval2]\n" \
375 " orr %[old1], %[old1], %[old2]") \
376 : [old1] "+r" (x0), [old2] "+r" (x1), \
377 [v] "+Q" (*(unsigned long *)ptr) \
378 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
379 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
380 : "x30" , ##cl); \
381 \
382 return x0; \
383}
384
385__CMPXCHG_DBL( , )
386__CMPXCHG_DBL(_mb, al, "memory")
387
388#undef __LL_SC_CMPXCHG_DBL
389#undef __CMPXCHG_DBL
390
c0385b24 391#endif /* __ASM_ATOMIC_LSE_H */