arm64: cpufeature.h: add missing #include of kernel.h
[linux-2.6-block.git] / arch / arm64 / include / asm / atomic.h
CommitLineData
6170a974
CM
1/*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20#ifndef __ASM_ATOMIC_H
21#define __ASM_ATOMIC_H
22
23#include <linux/compiler.h>
24#include <linux/types.h>
25
26#include <asm/barrier.h>
27#include <asm/cmpxchg.h>
28
29#define ATOMIC_INIT(i) { (i) }
30
31#ifdef __KERNEL__
32
33/*
34 * On ARM, ordinary assignment (str instruction) doesn't clear the local
35 * strex/ldrex monitor on some implementations. The reason we can use it for
36 * atomic_set() is the clrex or dummy strex done on every exception return.
37 */
2291059c 38#define atomic_read(v) ACCESS_ONCE((v)->counter)
6170a974
CM
39#define atomic_set(v,i) (((v)->counter) = (i))
40
41/*
42 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
43 * store exclusive to ensure that these are atomic. We may loop
44 * to ensure that the update happens.
45 */
6170a974 46
92ba1f53
PZ
47#define ATOMIC_OP(op, asm_op) \
48static inline void atomic_##op(int i, atomic_t *v) \
49{ \
50 unsigned long tmp; \
51 int result; \
52 \
53 asm volatile("// atomic_" #op "\n" \
54"1: ldxr %w0, %2\n" \
55" " #asm_op " %w0, %w0, %w3\n" \
56" stxr %w1, %w0, %2\n" \
57" cbnz %w1, 1b" \
58 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
59 : "Ir" (i)); \
60} \
61
62#define ATOMIC_OP_RETURN(op, asm_op) \
63static inline int atomic_##op##_return(int i, atomic_t *v) \
64{ \
65 unsigned long tmp; \
66 int result; \
67 \
68 asm volatile("// atomic_" #op "_return\n" \
69"1: ldxr %w0, %2\n" \
70" " #asm_op " %w0, %w0, %w3\n" \
71" stlxr %w1, %w0, %2\n" \
72" cbnz %w1, 1b" \
73 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
74 : "Ir" (i) \
75 : "memory"); \
76 \
77 smp_mb(); \
78 return result; \
6170a974
CM
79}
80
92ba1f53
PZ
81#define ATOMIC_OPS(op, asm_op) \
82 ATOMIC_OP(op, asm_op) \
83 ATOMIC_OP_RETURN(op, asm_op)
6170a974 84
92ba1f53
PZ
85ATOMIC_OPS(add, add)
86ATOMIC_OPS(sub, sub)
6170a974 87
22288b40
PZ
88#define atomic_andnot atomic_andnot
89
90ATOMIC_OP(and, and)
91ATOMIC_OP(andnot, bic)
92ATOMIC_OP(or, orr)
93ATOMIC_OP(xor, eor)
94
92ba1f53
PZ
95#undef ATOMIC_OPS
96#undef ATOMIC_OP_RETURN
97#undef ATOMIC_OP
6170a974
CM
98
99static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
100{
101 unsigned long tmp;
102 int oldval;
103
8e86f0b4
WD
104 smp_mb();
105
6170a974 106 asm volatile("// atomic_cmpxchg\n"
8e86f0b4 107"1: ldxr %w1, %2\n"
3a0310eb 108" cmp %w1, %w3\n"
6170a974 109" b.ne 2f\n"
8e86f0b4 110" stxr %w0, %w4, %2\n"
6170a974
CM
111" cbnz %w0, 1b\n"
112"2:"
3a0310eb
WD
113 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
114 : "Ir" (old), "r" (new)
95c41896 115 : "cc");
6170a974 116
8e86f0b4 117 smp_mb();
6170a974
CM
118 return oldval;
119}
120
6170a974
CM
121#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
122
123static inline int __atomic_add_unless(atomic_t *v, int a, int u)
124{
125 int c, old;
126
127 c = atomic_read(v);
128 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
129 c = old;
130 return c;
131}
132
133#define atomic_inc(v) atomic_add(1, v)
134#define atomic_dec(v) atomic_sub(1, v)
135
136#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
137#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
138#define atomic_inc_return(v) (atomic_add_return(1, v))
139#define atomic_dec_return(v) (atomic_sub_return(1, v))
140#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
141
142#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
143
6170a974
CM
144/*
145 * 64-bit atomic operations.
146 */
147#define ATOMIC64_INIT(i) { (i) }
148
2291059c 149#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6170a974
CM
150#define atomic64_set(v,i) (((v)->counter) = (i))
151
92ba1f53
PZ
152#define ATOMIC64_OP(op, asm_op) \
153static inline void atomic64_##op(long i, atomic64_t *v) \
154{ \
155 long result; \
156 unsigned long tmp; \
157 \
158 asm volatile("// atomic64_" #op "\n" \
159"1: ldxr %0, %2\n" \
160" " #asm_op " %0, %0, %3\n" \
161" stxr %w1, %0, %2\n" \
162" cbnz %w1, 1b" \
163 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
164 : "Ir" (i)); \
165} \
166
167#define ATOMIC64_OP_RETURN(op, asm_op) \
168static inline long atomic64_##op##_return(long i, atomic64_t *v) \
169{ \
170 long result; \
171 unsigned long tmp; \
172 \
173 asm volatile("// atomic64_" #op "_return\n" \
174"1: ldxr %0, %2\n" \
175" " #asm_op " %0, %0, %3\n" \
176" stlxr %w1, %0, %2\n" \
177" cbnz %w1, 1b" \
178 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
179 : "Ir" (i) \
180 : "memory"); \
181 \
182 smp_mb(); \
183 return result; \
6170a974
CM
184}
185
92ba1f53
PZ
186#define ATOMIC64_OPS(op, asm_op) \
187 ATOMIC64_OP(op, asm_op) \
188 ATOMIC64_OP_RETURN(op, asm_op)
6170a974 189
92ba1f53
PZ
190ATOMIC64_OPS(add, add)
191ATOMIC64_OPS(sub, sub)
6170a974 192
22288b40
PZ
193#define atomic64_andnot atomic64_andnot
194
195ATOMIC64_OP(and, and)
196ATOMIC64_OP(andnot, bic)
197ATOMIC64_OP(or, orr)
198ATOMIC64_OP(xor, eor)
199
92ba1f53
PZ
200#undef ATOMIC64_OPS
201#undef ATOMIC64_OP_RETURN
202#undef ATOMIC64_OP
6170a974
CM
203
204static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
205{
206 long oldval;
207 unsigned long res;
208
8e86f0b4
WD
209 smp_mb();
210
6170a974 211 asm volatile("// atomic64_cmpxchg\n"
8e86f0b4 212"1: ldxr %1, %2\n"
3a0310eb 213" cmp %1, %3\n"
6170a974 214" b.ne 2f\n"
8e86f0b4 215" stxr %w0, %4, %2\n"
6170a974
CM
216" cbnz %w0, 1b\n"
217"2:"
3a0310eb
WD
218 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
219 : "Ir" (old), "r" (new)
95c41896 220 : "cc");
6170a974 221
8e86f0b4 222 smp_mb();
6170a974
CM
223 return oldval;
224}
225
226#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
227
228static inline long atomic64_dec_if_positive(atomic64_t *v)
229{
230 long result;
231 unsigned long tmp;
232
233 asm volatile("// atomic64_dec_if_positive\n"
8e86f0b4 234"1: ldxr %0, %2\n"
6170a974
CM
235" subs %0, %0, #1\n"
236" b.mi 2f\n"
3a0310eb 237" stlxr %w1, %0, %2\n"
6170a974 238" cbnz %w1, 1b\n"
8e86f0b4 239" dmb ish\n"
6170a974 240"2:"
3a0310eb
WD
241 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
242 :
243 : "cc", "memory");
6170a974
CM
244
245 return result;
246}
247
248static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
249{
250 long c, old;
251
252 c = atomic64_read(v);
253 while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
254 c = old;
255
256 return c != u;
257}
258
259#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
260#define atomic64_inc(v) atomic64_add(1LL, (v))
261#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
262#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
263#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
264#define atomic64_dec(v) atomic64_sub(1LL, (v))
265#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
266#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
267#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
268
269#endif
270#endif