1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC64_32_H
3 #define _ASM_X86_ATOMIC64_32_H
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 //#include <asm/cmpxchg.h>
9 /* An 64bit atomic type */
12 s64 __aligned(8) counter;
15 #define ATOMIC64_INIT(val) { (val) }
18 * Read an atomic64_t non-atomically.
20 * This is intended to be used in cases where a subsequent atomic operation
21 * will handle the torn value, and can be used to prime the first iteration
22 * of unconditional try_cmpxchg() loops, e.g.:
24 * s64 val = arch_atomic64_read_nonatomic(v);
25 * do { } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
27 * This is NOT safe to use where the value is not always checked by a
28 * subsequent atomic operation, such as in conditional try_cmpxchg() loops
29 * that can break before the atomic operation, e.g.:
31 * s64 val = arch_atomic64_read_nonatomic(v);
35 * } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
37 static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
39 /* See comment in arch_atomic_read(). */
40 return __READ_ONCE(v->counter);
43 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
44 #ifndef ATOMIC64_EXPORT
45 #define ATOMIC64_DECL_ONE __ATOMIC64_DECL
47 #define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
48 ATOMIC64_EXPORT(atomic64_##sym)
52 #define __alternative_atomic64(f, g, out, in, clobbers...) \
53 asm volatile("call %c[func]" \
54 : ALT_OUTPUT_SP(out) \
55 : [func] "i" (atomic64_##g##_cx8) \
59 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
61 #define __alternative_atomic64(f, g, out, in, clobbers...) \
62 alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
63 X86_FEATURE_CX8, ASM_OUTPUT(out), \
64 ASM_INPUT(in), clobbers)
66 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
67 ATOMIC64_DECL_ONE(sym##_386)
69 ATOMIC64_DECL_ONE(add_386);
70 ATOMIC64_DECL_ONE(sub_386);
71 ATOMIC64_DECL_ONE(inc_386);
72 ATOMIC64_DECL_ONE(dec_386);
75 #define alternative_atomic64(f, out, in, clobbers...) \
76 __alternative_atomic64(f, f, ASM_OUTPUT(out), ASM_INPUT(in), clobbers)
81 ATOMIC64_DECL(add_return);
82 ATOMIC64_DECL(sub_return);
83 ATOMIC64_DECL(inc_return);
84 ATOMIC64_DECL(dec_return);
85 ATOMIC64_DECL(dec_if_positive);
86 ATOMIC64_DECL(inc_not_zero);
87 ATOMIC64_DECL(add_unless);
90 #undef ATOMIC64_DECL_ONE
91 #undef __ATOMIC64_DECL
92 #undef ATOMIC64_EXPORT
94 static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
96 return arch_cmpxchg64(&v->counter, old, new);
98 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
100 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
102 return arch_try_cmpxchg64(&v->counter, old, new);
104 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
106 static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
109 unsigned high = (unsigned)(n >> 32);
110 unsigned low = (unsigned)n;
111 alternative_atomic64(xchg,
113 ASM_INPUT("S" (v), "b" (low), "c" (high)),
117 #define arch_atomic64_xchg arch_atomic64_xchg
119 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
121 unsigned high = (unsigned)(i >> 32);
122 unsigned low = (unsigned)i;
123 alternative_atomic64(set,
125 ASM_INPUT("S" (v), "b" (low), "c" (high)),
126 "eax", "edx", "memory");
129 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
132 alternative_atomic64(read, "=&A" (r), "c" (v), "memory");
136 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
138 alternative_atomic64(add_return,
139 ASM_OUTPUT("+A" (i), "+c" (v)),
144 #define arch_atomic64_add_return arch_atomic64_add_return
146 static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
148 alternative_atomic64(sub_return,
149 ASM_OUTPUT("+A" (i), "+c" (v)),
154 #define arch_atomic64_sub_return arch_atomic64_sub_return
156 static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
159 alternative_atomic64(inc_return,
165 #define arch_atomic64_inc_return arch_atomic64_inc_return
167 static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
170 alternative_atomic64(dec_return,
176 #define arch_atomic64_dec_return arch_atomic64_dec_return
178 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
180 __alternative_atomic64(add, add_return,
181 ASM_OUTPUT("+A" (i), "+c" (v)),
186 static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
188 __alternative_atomic64(sub, sub_return,
189 ASM_OUTPUT("+A" (i), "+c" (v)),
194 static __always_inline void arch_atomic64_inc(atomic64_t *v)
196 __alternative_atomic64(inc, inc_return,
199 "memory", "eax", "ecx", "edx");
201 #define arch_atomic64_inc arch_atomic64_inc
203 static __always_inline void arch_atomic64_dec(atomic64_t *v)
205 __alternative_atomic64(dec, dec_return,
208 "memory", "eax", "ecx", "edx");
210 #define arch_atomic64_dec arch_atomic64_dec
212 static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
214 unsigned low = (unsigned)u;
215 unsigned high = (unsigned)(u >> 32);
216 alternative_atomic64(add_unless,
217 ASM_OUTPUT("+A" (a), "+c" (low), "+D" (high)),
222 #define arch_atomic64_add_unless arch_atomic64_add_unless
224 static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
227 alternative_atomic64(inc_not_zero,
230 "ecx", "edx", "memory");
233 #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
235 static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
238 alternative_atomic64(dec_if_positive,
244 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
246 #undef alternative_atomic64
247 #undef __alternative_atomic64
249 static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
251 s64 val = arch_atomic64_read_nonatomic(v);
253 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
256 static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
258 s64 val = arch_atomic64_read_nonatomic(v);
260 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
264 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
266 static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
268 s64 val = arch_atomic64_read_nonatomic(v);
270 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
273 static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
275 s64 val = arch_atomic64_read_nonatomic(v);
277 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
281 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
283 static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
285 s64 val = arch_atomic64_read_nonatomic(v);
287 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
290 static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
292 s64 val = arch_atomic64_read_nonatomic(v);
294 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
298 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
300 static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
302 s64 val = arch_atomic64_read_nonatomic(v);
304 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val + i));
308 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
310 #define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
312 #endif /* _ASM_X86_ATOMIC64_32_H */