1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
6 * PowerPC atomic operations
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 #include <asm/asm-const.h>
14 #include <asm/asm-compat.h>
17 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
18 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
19 * on the platform without lwsync.
21 #define __atomic_acquire_fence() \
22 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
24 #define __atomic_release_fence() \
25 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
27 static __inline__ int arch_atomic_read(const atomic_t *v)
31 /* -mprefixed can generate offsets beyond range, fall back hack */
32 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
33 __asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
35 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
40 static __inline__ void arch_atomic_set(atomic_t *v, int i)
42 /* -mprefixed can generate offsets beyond range, fall back hack */
43 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
44 __asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
46 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
49 #define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
50 static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
54 __asm__ __volatile__( \
55 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
56 #asm_op "%I2" suffix " %0,%0,%2\n" \
57 " stwcx. %0,0,%3 \n" \
59 : "=&r" (t), "+m" (v->counter) \
60 : "r"#sign (a), "r" (&v->counter) \
61 : "cc", ##__VA_ARGS__); \
64 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...) \
65 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
69 __asm__ __volatile__( \
70 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
71 #asm_op "%I2" suffix " %0,%0,%2\n" \
74 : "=&r" (t), "+m" (v->counter) \
75 : "r"#sign (a), "r" (&v->counter) \
76 : "cc", ##__VA_ARGS__); \
81 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...) \
82 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
86 __asm__ __volatile__( \
87 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
88 #asm_op "%I3" suffix " %1,%0,%3\n" \
91 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
92 : "r"#sign (a), "r" (&v->counter) \
93 : "cc", ##__VA_ARGS__); \
98 #define ATOMIC_OPS(op, asm_op, suffix, sign, ...) \
99 ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__) \
100 ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
101 ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
103 ATOMIC_OPS(add, add, "c", I, "xer")
104 ATOMIC_OPS(sub, sub, "c", I, "xer")
106 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
107 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
109 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
110 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
113 #define ATOMIC_OPS(op, asm_op, suffix, sign) \
114 ATOMIC_OP(op, asm_op, suffix, sign) \
115 ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
117 ATOMIC_OPS(and, and, ".", K)
118 ATOMIC_OPS(or, or, "", K)
119 ATOMIC_OPS(xor, xor, "", K)
121 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
122 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
123 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
126 #undef ATOMIC_FETCH_OP_RELAXED
127 #undef ATOMIC_OP_RETURN_RELAXED
131 * atomic_fetch_add_unless - add unless the number is a given value
132 * @v: pointer of type atomic_t
133 * @a: the amount to add to v...
134 * @u: ...unless v is equal to u.
136 * Atomically adds @a to @v, so long as it was not @u.
137 * Returns the old value of @v.
139 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
143 __asm__ __volatile__ (
144 PPC_ATOMIC_ENTRY_BARRIER
145 "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
151 PPC_ATOMIC_EXIT_BARRIER
152 " sub%I2c %0,%0,%2 \n\
155 : "r" (&v->counter), "rI" (a), "r" (u)
156 : "cc", "memory", "xer");
160 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
163 * Atomically test *v and decrement if it is greater than 0.
164 * The function returns the old value of *v minus 1, even if
165 * the atomic variable, v, was not decremented.
167 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
171 __asm__ __volatile__(
172 PPC_ATOMIC_ENTRY_BARRIER
173 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
179 PPC_ATOMIC_EXIT_BARRIER
187 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
191 #define ATOMIC64_INIT(i) { (i) }
193 static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
197 /* -mprefixed can generate offsets beyond range, fall back hack */
198 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
199 __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
201 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
206 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
208 /* -mprefixed can generate offsets beyond range, fall back hack */
209 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
210 __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
212 __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i));
215 #define ATOMIC64_OP(op, asm_op) \
216 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
220 __asm__ __volatile__( \
221 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
222 #asm_op " %0,%2,%0\n" \
223 " stdcx. %0,0,%3 \n" \
225 : "=&r" (t), "+m" (v->counter) \
226 : "r" (a), "r" (&v->counter) \
230 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
232 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
236 __asm__ __volatile__( \
237 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
238 #asm_op " %0,%2,%0\n" \
239 " stdcx. %0,0,%3\n" \
241 : "=&r" (t), "+m" (v->counter) \
242 : "r" (a), "r" (&v->counter) \
248 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
250 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
254 __asm__ __volatile__( \
255 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
256 #asm_op " %1,%3,%0\n" \
257 " stdcx. %1,0,%4\n" \
259 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
260 : "r" (a), "r" (&v->counter) \
266 #define ATOMIC64_OPS(op, asm_op) \
267 ATOMIC64_OP(op, asm_op) \
268 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
269 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
271 ATOMIC64_OPS(add, add)
272 ATOMIC64_OPS(sub, subf)
274 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
275 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
277 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
278 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
281 #define ATOMIC64_OPS(op, asm_op) \
282 ATOMIC64_OP(op, asm_op) \
283 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
285 ATOMIC64_OPS(and, and)
287 ATOMIC64_OPS(xor, xor)
289 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
290 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
291 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
294 #undef ATOMIC64_FETCH_OP_RELAXED
295 #undef ATOMIC64_OP_RETURN_RELAXED
298 static __inline__ void arch_atomic64_inc(atomic64_t *v)
302 __asm__ __volatile__(
303 "1: ldarx %0,0,%2 # atomic64_inc\n\
307 : "=&r" (t), "+m" (v->counter)
311 #define arch_atomic64_inc arch_atomic64_inc
313 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
317 __asm__ __volatile__(
318 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
322 : "=&r" (t), "+m" (v->counter)
329 static __inline__ void arch_atomic64_dec(atomic64_t *v)
333 __asm__ __volatile__(
334 "1: ldarx %0,0,%2 # atomic64_dec\n\
338 : "=&r" (t), "+m" (v->counter)
342 #define arch_atomic64_dec arch_atomic64_dec
344 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
348 __asm__ __volatile__(
349 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
353 : "=&r" (t), "+m" (v->counter)
360 #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
361 #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
364 * Atomically test *v and decrement if it is greater than 0.
365 * The function returns the old value of *v minus 1.
367 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
371 __asm__ __volatile__(
372 PPC_ATOMIC_ENTRY_BARRIER
373 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
378 PPC_ATOMIC_EXIT_BARRIER
382 : "cc", "xer", "memory");
386 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
389 * atomic64_fetch_add_unless - add unless the number is a given value
390 * @v: pointer of type atomic64_t
391 * @a: the amount to add to v...
392 * @u: ...unless v is equal to u.
394 * Atomically adds @a to @v, so long as it was not @u.
395 * Returns the old value of @v.
397 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
401 __asm__ __volatile__ (
402 PPC_ATOMIC_ENTRY_BARRIER
403 "1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
409 PPC_ATOMIC_EXIT_BARRIER
413 : "r" (&v->counter), "r" (a), "r" (u)
418 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
421 * atomic_inc64_not_zero - increment unless the number is zero
422 * @v: pointer of type atomic64_t
424 * Atomically increments @v by 1, so long as @v is non-zero.
425 * Returns non-zero if @v was non-zero, and zero otherwise.
427 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
431 __asm__ __volatile__ (
432 PPC_ATOMIC_ENTRY_BARRIER
433 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
439 PPC_ATOMIC_EXIT_BARRIER
442 : "=&r" (t1), "=&r" (t2)
444 : "cc", "xer", "memory");
448 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
450 #endif /* __powerpc64__ */
452 #endif /* __KERNEL__ */
453 #endif /* _ASM_POWERPC_ATOMIC_H_ */