Merge tag 'pull-nios2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-block.git] / arch / x86 / include / asm / atomic64_64.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1a3b1d89
BG
2#ifndef _ASM_X86_ATOMIC64_64_H
3#define _ASM_X86_ATOMIC64_64_H
4
5#include <linux/types.h>
6#include <asm/alternative.h>
7#include <asm/cmpxchg.h>
8
9/* The 64-bit atomic type */
10
11#define ATOMIC64_INIT(i) { (i) }
12
13/**
8bf705d1 14 * arch_atomic64_read - read atomic64 variable
1a3b1d89
BG
15 * @v: pointer of type atomic64_t
16 *
17 * Atomically reads the value of @v.
18 * Doesn't imply a read memory barrier.
19 */
7aab7aa4 20static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
1a3b1d89 21{
37f8173d 22 return __READ_ONCE((v)->counter);
1a3b1d89
BG
23}
24
25/**
8bf705d1 26 * arch_atomic64_set - set atomic64 variable
1a3b1d89
BG
27 * @v: pointer to type atomic64_t
28 * @i: required value
29 *
30 * Atomically sets the value of @v to @i.
31 */
7aab7aa4 32static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
1a3b1d89 33{
37f8173d 34 __WRITE_ONCE(v->counter, i);
1a3b1d89
BG
35}
36
37/**
8bf705d1 38 * arch_atomic64_add - add integer to atomic64 variable
1a3b1d89
BG
39 * @i: integer value to add
40 * @v: pointer to type atomic64_t
41 *
42 * Atomically adds @i to @v.
43 */
79c53a83 44static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
1a3b1d89
BG
45{
46 asm volatile(LOCK_PREFIX "addq %1,%0"
47 : "=m" (v->counter)
69d927bb 48 : "er" (i), "m" (v->counter) : "memory");
1a3b1d89
BG
49}
50
51/**
8bf705d1 52 * arch_atomic64_sub - subtract the atomic64 variable
1a3b1d89
BG
53 * @i: integer value to subtract
54 * @v: pointer to type atomic64_t
55 *
56 * Atomically subtracts @i from @v.
57 */
7aab7aa4 58static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
1a3b1d89
BG
59{
60 asm volatile(LOCK_PREFIX "subq %1,%0"
61 : "=m" (v->counter)
69d927bb 62 : "er" (i), "m" (v->counter) : "memory");
1a3b1d89
BG
63}
64
65/**
8bf705d1 66 * arch_atomic64_sub_and_test - subtract value from variable and test result
1a3b1d89
BG
67 * @i: integer value to subtract
68 * @v: pointer to type atomic64_t
69 *
70 * Atomically subtracts @i from @v and returns
71 * true if the result is zero, or false for all
72 * other cases.
73 */
7aab7aa4 74static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
1a3b1d89 75{
288e4521 76 return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
1a3b1d89 77}
4331f4d5 78#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
1a3b1d89
BG
79
80/**
8bf705d1 81 * arch_atomic64_inc - increment atomic64 variable
1a3b1d89
BG
82 * @v: pointer to type atomic64_t
83 *
84 * Atomically increments @v by 1.
85 */
8bf705d1 86static __always_inline void arch_atomic64_inc(atomic64_t *v)
1a3b1d89
BG
87{
88 asm volatile(LOCK_PREFIX "incq %0"
89 : "=m" (v->counter)
69d927bb 90 : "m" (v->counter) : "memory");
1a3b1d89 91}
4331f4d5 92#define arch_atomic64_inc arch_atomic64_inc
1a3b1d89
BG
93
94/**
8bf705d1 95 * arch_atomic64_dec - decrement atomic64 variable
1a3b1d89
BG
96 * @v: pointer to type atomic64_t
97 *
98 * Atomically decrements @v by 1.
99 */
8bf705d1 100static __always_inline void arch_atomic64_dec(atomic64_t *v)
1a3b1d89
BG
101{
102 asm volatile(LOCK_PREFIX "decq %0"
103 : "=m" (v->counter)
69d927bb 104 : "m" (v->counter) : "memory");
1a3b1d89 105}
4331f4d5 106#define arch_atomic64_dec arch_atomic64_dec
1a3b1d89
BG
107
108/**
8bf705d1 109 * arch_atomic64_dec_and_test - decrement and test
1a3b1d89
BG
110 * @v: pointer to type atomic64_t
111 *
112 * Atomically decrements @v by 1 and
113 * returns true if the result is 0, or false for all other
114 * cases.
115 */
7aab7aa4 116static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
1a3b1d89 117{
288e4521 118 return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
1a3b1d89 119}
4331f4d5 120#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
1a3b1d89
BG
121
122/**
8bf705d1 123 * arch_atomic64_inc_and_test - increment and test
1a3b1d89
BG
124 * @v: pointer to type atomic64_t
125 *
126 * Atomically increments @v by 1
127 * and returns true if the result is zero, or false for all
128 * other cases.
129 */
7aab7aa4 130static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
1a3b1d89 131{
288e4521 132 return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
1a3b1d89 133}
4331f4d5 134#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
1a3b1d89
BG
135
136/**
8bf705d1 137 * arch_atomic64_add_negative - add and test if negative
1a3b1d89
BG
138 * @i: integer value to add
139 * @v: pointer to type atomic64_t
140 *
141 * Atomically adds @i to @v and returns true
142 * if the result is negative, or false when
143 * result is greater than or equal to zero.
144 */
7aab7aa4 145static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
1a3b1d89 146{
288e4521 147 return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
1a3b1d89 148}
4331f4d5 149#define arch_atomic64_add_negative arch_atomic64_add_negative
1a3b1d89
BG
150
151/**
8bf705d1 152 * arch_atomic64_add_return - add and return
1a3b1d89
BG
153 * @i: integer value to add
154 * @v: pointer to type atomic64_t
155 *
156 * Atomically adds @i to @v and returns @i + @v
157 */
79c53a83 158static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
1a3b1d89 159{
8b8bc2f7 160 return i + xadd(&v->counter, i);
1a3b1d89 161}
37f8173d 162#define arch_atomic64_add_return arch_atomic64_add_return
1a3b1d89 163
7aab7aa4 164static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
1a3b1d89 165{
8bf705d1 166 return arch_atomic64_add_return(-i, v);
1a3b1d89 167}
37f8173d 168#define arch_atomic64_sub_return arch_atomic64_sub_return
1a3b1d89 169
7aab7aa4 170static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
a8bcccab
PZ
171{
172 return xadd(&v->counter, i);
173}
37f8173d 174#define arch_atomic64_fetch_add arch_atomic64_fetch_add
a8bcccab 175
7aab7aa4 176static __always_inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
a8bcccab
PZ
177{
178 return xadd(&v->counter, -i);
179}
37f8173d 180#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
a8bcccab 181
7aab7aa4 182static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
1a3b1d89 183{
8bf705d1 184 return arch_cmpxchg(&v->counter, old, new);
1a3b1d89 185}
37f8173d 186#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
1a3b1d89 187
79c53a83 188static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
a9ebf306 189{
29f006fd 190 return arch_try_cmpxchg(&v->counter, old, new);
a9ebf306 191}
37f8173d 192#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
a9ebf306 193
7aab7aa4 194static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
1a3b1d89 195{
f9881cc4 196 return arch_xchg(&v->counter, new);
1a3b1d89 197}
37f8173d 198#define arch_atomic64_xchg arch_atomic64_xchg
1a3b1d89 199
7aab7aa4 200static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
ba1c9f83
DV
201{
202 asm volatile(LOCK_PREFIX "andq %1,%0"
203 : "+m" (v->counter)
204 : "er" (i)
205 : "memory");
7fc1845d
PZ
206}
207
7aab7aa4 208static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
ba1c9f83 209{
8bf705d1 210 s64 val = arch_atomic64_read(v);
ba1c9f83
DV
211
212 do {
8bf705d1 213 } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
ba1c9f83 214 return val;
a8bcccab 215}
37f8173d 216#define arch_atomic64_fetch_and arch_atomic64_fetch_and
a8bcccab 217
7aab7aa4 218static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
ba1c9f83
DV
219{
220 asm volatile(LOCK_PREFIX "orq %1,%0"
221 : "+m" (v->counter)
222 : "er" (i)
223 : "memory");
224}
a8bcccab 225
7aab7aa4 226static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
ba1c9f83 227{
8bf705d1 228 s64 val = arch_atomic64_read(v);
7fc1845d 229
ba1c9f83 230 do {
8bf705d1 231 } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
ba1c9f83
DV
232 return val;
233}
37f8173d 234#define arch_atomic64_fetch_or arch_atomic64_fetch_or
ba1c9f83 235
7aab7aa4 236static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
ba1c9f83
DV
237{
238 asm volatile(LOCK_PREFIX "xorq %1,%0"
239 : "+m" (v->counter)
240 : "er" (i)
241 : "memory");
242}
243
7aab7aa4 244static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
ba1c9f83 245{
8bf705d1 246 s64 val = arch_atomic64_read(v);
ba1c9f83
DV
247
248 do {
8bf705d1 249 } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
ba1c9f83
DV
250 return val;
251}
37f8173d 252#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
7fc1845d 253
1a3b1d89 254#endif /* _ASM_X86_ATOMIC64_64_H */