1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_ATOMIC_H
3 #define _ALPHA_ATOMIC_H
5 #include <linux/types.h>
6 #include <asm/barrier.h>
7 #include <asm/cmpxchg.h>
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc...
13 * But use these as seldom as possible since they are much slower
14 * than regular operations.
18 * To ensure dependency ordering is preserved for the _relaxed and
19 * _release atomics, an smp_read_barrier_depends() is unconditionally
20 * inserted into the _relaxed variants, which are used to build the
21 * barriered versions. To avoid redundant back-to-back fences, we can
22 * define the _acquire and _fence versions explicitly.
24 #define __atomic_op_acquire(op, args...) op##_relaxed(args)
25 #define __atomic_op_fence __atomic_op_release
27 #define ATOMIC_INIT(i) { (i) }
28 #define ATOMIC64_INIT(i) { (i) }
30 #define atomic_read(v) READ_ONCE((v)->counter)
31 #define atomic64_read(v) READ_ONCE((v)->counter)
33 #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
34 #define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
37 * To get proper branch prediction for the main line, we must branch
38 * forward to code at the end of this object's .text section, then
39 * branch back to restart the operation.
42 #define ATOMIC_OP(op, asm_op) \
43 static __inline__ void atomic_##op(int i, atomic_t * v) \
46 __asm__ __volatile__( \
48 " " #asm_op " %0,%2,%0\n" \
54 :"=&r" (temp), "=m" (v->counter) \
55 :"Ir" (i), "m" (v->counter)); \
58 #define ATOMIC_OP_RETURN(op, asm_op) \
59 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
62 __asm__ __volatile__( \
64 " " #asm_op " %0,%3,%2\n" \
65 " " #asm_op " %0,%3,%0\n" \
71 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
72 :"Ir" (i), "m" (v->counter) : "memory"); \
73 smp_read_barrier_depends(); \
77 #define ATOMIC_FETCH_OP(op, asm_op) \
78 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
81 __asm__ __volatile__( \
83 " " #asm_op " %2,%3,%0\n" \
89 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
90 :"Ir" (i), "m" (v->counter) : "memory"); \
91 smp_read_barrier_depends(); \
95 #define ATOMIC64_OP(op, asm_op) \
96 static __inline__ void atomic64_##op(long i, atomic64_t * v) \
99 __asm__ __volatile__( \
101 " " #asm_op " %0,%2,%0\n" \
107 :"=&r" (temp), "=m" (v->counter) \
108 :"Ir" (i), "m" (v->counter)); \
111 #define ATOMIC64_OP_RETURN(op, asm_op) \
112 static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
115 __asm__ __volatile__( \
117 " " #asm_op " %0,%3,%2\n" \
118 " " #asm_op " %0,%3,%0\n" \
124 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
125 :"Ir" (i), "m" (v->counter) : "memory"); \
126 smp_read_barrier_depends(); \
130 #define ATOMIC64_FETCH_OP(op, asm_op) \
131 static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
134 __asm__ __volatile__( \
136 " " #asm_op " %2,%3,%0\n" \
142 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
143 :"Ir" (i), "m" (v->counter) : "memory"); \
144 smp_read_barrier_depends(); \
148 #define ATOMIC_OPS(op) \
149 ATOMIC_OP(op, op##l) \
150 ATOMIC_OP_RETURN(op, op##l) \
151 ATOMIC_FETCH_OP(op, op##l) \
152 ATOMIC64_OP(op, op##q) \
153 ATOMIC64_OP_RETURN(op, op##q) \
154 ATOMIC64_FETCH_OP(op, op##q)
159 #define atomic_add_return_relaxed atomic_add_return_relaxed
160 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
161 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
162 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
164 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
165 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
166 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
167 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
169 #define atomic_andnot atomic_andnot
170 #define atomic64_andnot atomic64_andnot
173 #define ATOMIC_OPS(op, asm) \
175 ATOMIC_FETCH_OP(op, asm) \
176 ATOMIC64_OP(op, asm) \
177 ATOMIC64_FETCH_OP(op, asm)
180 ATOMIC_OPS(andnot, bic)
184 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
185 #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
186 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
187 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
189 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
190 #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
191 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
192 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
195 #undef ATOMIC64_FETCH_OP
196 #undef ATOMIC64_OP_RETURN
198 #undef ATOMIC_FETCH_OP
199 #undef ATOMIC_OP_RETURN
202 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
203 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
205 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
206 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
209 * __atomic_add_unless - add unless the number is a given value
210 * @v: pointer of type atomic_t
211 * @a: the amount to add to v...
212 * @u: ...unless v is equal to u.
214 * Atomically adds @a to @v, so long as it was not @u.
215 * Returns the old value of @v.
217 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
221 __asm__ __volatile__(
222 "1: ldl_l %[old],%[mem]\n"
223 " cmpeq %[old],%[u],%[c]\n"
224 " addl %[old],%[a],%[new]\n"
226 " stl_c %[new],%[mem]\n"
232 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
233 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
241 * atomic64_add_unless - add unless the number is a given value
242 * @v: pointer of type atomic64_t
243 * @a: the amount to add to v...
244 * @u: ...unless v is equal to u.
246 * Atomically adds @a to @v, so long as it was not @u.
247 * Returns true iff @v was not @u.
249 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
253 __asm__ __volatile__(
254 "1: ldq_l %[tmp],%[mem]\n"
255 " cmpeq %[tmp],%[u],%[c]\n"
256 " addq %[tmp],%[a],%[tmp]\n"
258 " stq_c %[tmp],%[mem]\n"
264 : [tmp] "=&r"(tmp), [c] "=&r"(c)
265 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
272 * atomic64_dec_if_positive - decrement by 1 if old value positive
273 * @v: pointer of type atomic_t
275 * The function returns the old value of *v minus 1, even if
276 * the atomic variable, v, was not decremented.
278 static inline long atomic64_dec_if_positive(atomic64_t *v)
282 __asm__ __volatile__(
283 "1: ldq_l %[old],%[mem]\n"
284 " subq %[old],1,%[tmp]\n"
286 " stq_c %[tmp],%[mem]\n"
292 : [old] "=&r"(old), [tmp] "=&r"(tmp)
299 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
301 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
302 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
304 #define atomic_dec_return(v) atomic_sub_return(1,(v))
305 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
307 #define atomic_inc_return(v) atomic_add_return(1,(v))
308 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
310 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
311 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
313 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
314 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
316 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
317 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
319 #define atomic_inc(v) atomic_add(1,(v))
320 #define atomic64_inc(v) atomic64_add(1,(v))
322 #define atomic_dec(v) atomic_sub(1,(v))
323 #define atomic64_dec(v) atomic64_sub(1,(v))
325 #endif /* _ALPHA_ATOMIC_H */