1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC64_64_H
3 #define _ASM_X86_ATOMIC64_64_H
5 #include <linux/types.h>
6 #include <asm/alternative.h>
7 #include <asm/cmpxchg.h>
9 /* The 64-bit atomic type */
11 #define ATOMIC64_INIT(i) { (i) }
14 * arch_atomic64_read - read atomic64 variable
15 * @v: pointer of type atomic64_t
17 * Atomically reads the value of @v.
18 * Doesn't imply a read memory barrier.
20 static inline s64 arch_atomic64_read(const atomic64_t *v)
22 return READ_ONCE((v)->counter);
26 * arch_atomic64_set - set atomic64 variable
27 * @v: pointer to type atomic64_t
30 * Atomically sets the value of @v to @i.
32 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
34 WRITE_ONCE(v->counter, i);
38 * arch_atomic64_add - add integer to atomic64 variable
39 * @i: integer value to add
40 * @v: pointer to type atomic64_t
42 * Atomically adds @i to @v.
44 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
46 asm volatile(LOCK_PREFIX "addq %1,%0"
48 : "er" (i), "m" (v->counter) : "memory");
52 * arch_atomic64_sub - subtract the atomic64 variable
53 * @i: integer value to subtract
54 * @v: pointer to type atomic64_t
56 * Atomically subtracts @i from @v.
58 static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
60 asm volatile(LOCK_PREFIX "subq %1,%0"
62 : "er" (i), "m" (v->counter) : "memory");
66 * arch_atomic64_sub_and_test - subtract value from variable and test result
67 * @i: integer value to subtract
68 * @v: pointer to type atomic64_t
70 * Atomically subtracts @i from @v and returns
71 * true if the result is zero, or false for all
74 static inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
76 return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
78 #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
81 * arch_atomic64_inc - increment atomic64 variable
82 * @v: pointer to type atomic64_t
84 * Atomically increments @v by 1.
86 static __always_inline void arch_atomic64_inc(atomic64_t *v)
88 asm volatile(LOCK_PREFIX "incq %0"
90 : "m" (v->counter) : "memory");
92 #define arch_atomic64_inc arch_atomic64_inc
95 * arch_atomic64_dec - decrement atomic64 variable
96 * @v: pointer to type atomic64_t
98 * Atomically decrements @v by 1.
100 static __always_inline void arch_atomic64_dec(atomic64_t *v)
102 asm volatile(LOCK_PREFIX "decq %0"
104 : "m" (v->counter) : "memory");
106 #define arch_atomic64_dec arch_atomic64_dec
109 * arch_atomic64_dec_and_test - decrement and test
110 * @v: pointer to type atomic64_t
112 * Atomically decrements @v by 1 and
113 * returns true if the result is 0, or false for all other
116 static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
118 return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
120 #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
123 * arch_atomic64_inc_and_test - increment and test
124 * @v: pointer to type atomic64_t
126 * Atomically increments @v by 1
127 * and returns true if the result is zero, or false for all
130 static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
132 return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
134 #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
137 * arch_atomic64_add_negative - add and test if negative
138 * @i: integer value to add
139 * @v: pointer to type atomic64_t
141 * Atomically adds @i to @v and returns true
142 * if the result is negative, or false when
143 * result is greater than or equal to zero.
145 static inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
147 return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
149 #define arch_atomic64_add_negative arch_atomic64_add_negative
152 * arch_atomic64_add_return - add and return
153 * @i: integer value to add
154 * @v: pointer to type atomic64_t
156 * Atomically adds @i to @v and returns @i + @v
158 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
160 return i + xadd(&v->counter, i);
163 static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
165 return arch_atomic64_add_return(-i, v);
168 static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
170 return xadd(&v->counter, i);
173 static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
175 return xadd(&v->counter, -i);
178 static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
180 return arch_cmpxchg(&v->counter, old, new);
183 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
184 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
186 return try_cmpxchg(&v->counter, old, new);
189 static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
191 return arch_xchg(&v->counter, new);
194 static inline void arch_atomic64_and(s64 i, atomic64_t *v)
196 asm volatile(LOCK_PREFIX "andq %1,%0"
202 static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
204 s64 val = arch_atomic64_read(v);
207 } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
211 static inline void arch_atomic64_or(s64 i, atomic64_t *v)
213 asm volatile(LOCK_PREFIX "orq %1,%0"
219 static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
221 s64 val = arch_atomic64_read(v);
224 } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
228 static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
230 asm volatile(LOCK_PREFIX "xorq %1,%0"
236 static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
238 s64 val = arch_atomic64_read(v);
241 } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
245 #endif /* _ASM_X86_ATOMIC64_64_H */