1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC_H
3 #define _ASM_X86_ATOMIC_H
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 #include <asm/alternative.h>
8 #include <asm/cmpxchg.h>
10 #include <asm/barrier.h>
13 * Atomic operations that C can't guarantee us. Useful for
14 * resource counting etc..
17 static __always_inline int arch_atomic_read(const atomic_t *v)
20 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
21 * it's non-inlined function that increases binary size and stack usage.
23 return __READ_ONCE((v)->counter);
26 static __always_inline void arch_atomic_set(atomic_t *v, int i)
28 __WRITE_ONCE(v->counter, i);
31 static __always_inline void arch_atomic_add(int i, atomic_t *v)
33 asm volatile(LOCK_PREFIX "addl %1,%0"
35 : "ir" (i) : "memory");
38 static __always_inline void arch_atomic_sub(int i, atomic_t *v)
40 asm volatile(LOCK_PREFIX "subl %1,%0"
42 : "ir" (i) : "memory");
45 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
47 return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
49 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
51 static __always_inline void arch_atomic_inc(atomic_t *v)
53 asm volatile(LOCK_PREFIX "incl %0"
54 : "+m" (v->counter) :: "memory");
56 #define arch_atomic_inc arch_atomic_inc
58 static __always_inline void arch_atomic_dec(atomic_t *v)
60 asm volatile(LOCK_PREFIX "decl %0"
61 : "+m" (v->counter) :: "memory");
63 #define arch_atomic_dec arch_atomic_dec
65 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
67 return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
69 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
71 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
73 return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
75 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
77 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
79 return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
81 #define arch_atomic_add_negative arch_atomic_add_negative
83 static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
85 return i + xadd(&v->counter, i);
87 #define arch_atomic_add_return arch_atomic_add_return
89 #define arch_atomic_sub_return(i, v) arch_atomic_add_return(-(i), v)
91 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
93 return xadd(&v->counter, i);
95 #define arch_atomic_fetch_add arch_atomic_fetch_add
97 #define arch_atomic_fetch_sub(i, v) arch_atomic_fetch_add(-(i), v)
99 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
101 return arch_cmpxchg(&v->counter, old, new);
103 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
105 static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
107 return arch_try_cmpxchg(&v->counter, old, new);
109 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
111 static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
113 return arch_xchg(&v->counter, new);
115 #define arch_atomic_xchg arch_atomic_xchg
117 static __always_inline void arch_atomic_and(int i, atomic_t *v)
119 asm volatile(LOCK_PREFIX "andl %1,%0"
125 static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
127 int val = arch_atomic_read(v);
129 do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
133 #define arch_atomic_fetch_and arch_atomic_fetch_and
135 static __always_inline void arch_atomic_or(int i, atomic_t *v)
137 asm volatile(LOCK_PREFIX "orl %1,%0"
143 static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
145 int val = arch_atomic_read(v);
147 do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
151 #define arch_atomic_fetch_or arch_atomic_fetch_or
153 static __always_inline void arch_atomic_xor(int i, atomic_t *v)
155 asm volatile(LOCK_PREFIX "xorl %1,%0"
161 static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
163 int val = arch_atomic_read(v);
165 do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
169 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
172 # include <asm/atomic64_32.h>
174 # include <asm/atomic64_64.h>
177 #endif /* _ASM_X86_ATOMIC_H */