Merge tag 'soc-drivers-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-block.git] / include / linux / atomic / atomic-arch-fallback.h
CommitLineData
37f8173d
PZ
1// SPDX-License-Identifier: GPL-2.0
2
3// Generated by scripts/atomic/gen-atomic-fallback.sh
4// DO NOT MODIFY THIS FILE DIRECTLY
5
6#ifndef _LINUX_ATOMIC_FALLBACK_H
7#define _LINUX_ATOMIC_FALLBACK_H
8
9#include <linux/compiler.h>
10
9257959a
MR
11#if defined(arch_xchg)
12#define raw_xchg arch_xchg
13#elif defined(arch_xchg_relaxed)
14#define raw_xchg(...) \
15 __atomic_op_fence(arch_xchg, __VA_ARGS__)
16#else
17extern void raw_xchg_not_implemented(void);
18#define raw_xchg(...) raw_xchg_not_implemented()
37f8173d
PZ
19#endif
20
9257959a
MR
21#if defined(arch_xchg_acquire)
22#define raw_xchg_acquire arch_xchg_acquire
23#elif defined(arch_xchg_relaxed)
24#define raw_xchg_acquire(...) \
25 __atomic_op_acquire(arch_xchg, __VA_ARGS__)
26#elif defined(arch_xchg)
27#define raw_xchg_acquire arch_xchg
28#else
29extern void raw_xchg_acquire_not_implemented(void);
30#define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
37f8173d
PZ
31#endif
32
9257959a
MR
33#if defined(arch_xchg_release)
34#define raw_xchg_release arch_xchg_release
35#elif defined(arch_xchg_relaxed)
36#define raw_xchg_release(...) \
37 __atomic_op_release(arch_xchg, __VA_ARGS__)
38#elif defined(arch_xchg)
39#define raw_xchg_release arch_xchg
40#else
41extern void raw_xchg_release_not_implemented(void);
42#define raw_xchg_release(...) raw_xchg_release_not_implemented()
43#endif
44
45#if defined(arch_xchg_relaxed)
46#define raw_xchg_relaxed arch_xchg_relaxed
47#elif defined(arch_xchg)
48#define raw_xchg_relaxed arch_xchg
49#else
50extern void raw_xchg_relaxed_not_implemented(void);
51#define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
52#endif
53
54#if defined(arch_cmpxchg)
55#define raw_cmpxchg arch_cmpxchg
56#elif defined(arch_cmpxchg_relaxed)
57#define raw_cmpxchg(...) \
58 __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
59#else
60extern void raw_cmpxchg_not_implemented(void);
61#define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
37f8173d
PZ
62#endif
63
9257959a
MR
64#if defined(arch_cmpxchg_acquire)
65#define raw_cmpxchg_acquire arch_cmpxchg_acquire
66#elif defined(arch_cmpxchg_relaxed)
67#define raw_cmpxchg_acquire(...) \
37f8173d 68 __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
9257959a
MR
69#elif defined(arch_cmpxchg)
70#define raw_cmpxchg_acquire arch_cmpxchg
71#else
72extern void raw_cmpxchg_acquire_not_implemented(void);
73#define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
37f8173d
PZ
74#endif
75
9257959a
MR
76#if defined(arch_cmpxchg_release)
77#define raw_cmpxchg_release arch_cmpxchg_release
78#elif defined(arch_cmpxchg_relaxed)
79#define raw_cmpxchg_release(...) \
37f8173d 80 __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
9257959a
MR
81#elif defined(arch_cmpxchg)
82#define raw_cmpxchg_release arch_cmpxchg
83#else
84extern void raw_cmpxchg_release_not_implemented(void);
85#define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
86#endif
87
88#if defined(arch_cmpxchg_relaxed)
89#define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
90#elif defined(arch_cmpxchg)
91#define raw_cmpxchg_relaxed arch_cmpxchg
92#else
93extern void raw_cmpxchg_relaxed_not_implemented(void);
94#define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
95#endif
96
97#if defined(arch_cmpxchg64)
98#define raw_cmpxchg64 arch_cmpxchg64
99#elif defined(arch_cmpxchg64_relaxed)
100#define raw_cmpxchg64(...) \
101 __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
102#else
103extern void raw_cmpxchg64_not_implemented(void);
104#define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
37f8173d
PZ
105#endif
106
9257959a
MR
107#if defined(arch_cmpxchg64_acquire)
108#define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
109#elif defined(arch_cmpxchg64_relaxed)
110#define raw_cmpxchg64_acquire(...) \
37f8173d 111 __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
9257959a
MR
112#elif defined(arch_cmpxchg64)
113#define raw_cmpxchg64_acquire arch_cmpxchg64
114#else
115extern void raw_cmpxchg64_acquire_not_implemented(void);
116#define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
37f8173d
PZ
117#endif
118
9257959a
MR
119#if defined(arch_cmpxchg64_release)
120#define raw_cmpxchg64_release arch_cmpxchg64_release
121#elif defined(arch_cmpxchg64_relaxed)
122#define raw_cmpxchg64_release(...) \
37f8173d 123 __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
9257959a
MR
124#elif defined(arch_cmpxchg64)
125#define raw_cmpxchg64_release arch_cmpxchg64
126#else
127extern void raw_cmpxchg64_release_not_implemented(void);
128#define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
129#endif
130
131#if defined(arch_cmpxchg64_relaxed)
132#define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
133#elif defined(arch_cmpxchg64)
134#define raw_cmpxchg64_relaxed arch_cmpxchg64
135#else
136extern void raw_cmpxchg64_relaxed_not_implemented(void);
137#define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
138#endif
139
140#if defined(arch_cmpxchg128)
141#define raw_cmpxchg128 arch_cmpxchg128
142#elif defined(arch_cmpxchg128_relaxed)
143#define raw_cmpxchg128(...) \
144 __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
145#else
146extern void raw_cmpxchg128_not_implemented(void);
147#define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
37f8173d
PZ
148#endif
149
9257959a
MR
150#if defined(arch_cmpxchg128_acquire)
151#define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
152#elif defined(arch_cmpxchg128_relaxed)
153#define raw_cmpxchg128_acquire(...) \
8c8b096a 154 __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
9257959a
MR
155#elif defined(arch_cmpxchg128)
156#define raw_cmpxchg128_acquire arch_cmpxchg128
157#else
158extern void raw_cmpxchg128_acquire_not_implemented(void);
159#define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
8c8b096a
PZ
160#endif
161
9257959a
MR
162#if defined(arch_cmpxchg128_release)
163#define raw_cmpxchg128_release arch_cmpxchg128_release
164#elif defined(arch_cmpxchg128_relaxed)
165#define raw_cmpxchg128_release(...) \
8c8b096a 166 __atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
9257959a
MR
167#elif defined(arch_cmpxchg128)
168#define raw_cmpxchg128_release arch_cmpxchg128
169#else
170extern void raw_cmpxchg128_release_not_implemented(void);
171#define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
172#endif
173
174#if defined(arch_cmpxchg128_relaxed)
175#define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
176#elif defined(arch_cmpxchg128)
177#define raw_cmpxchg128_relaxed arch_cmpxchg128
178#else
179extern void raw_cmpxchg128_relaxed_not_implemented(void);
180#define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
181#endif
182
183#if defined(arch_try_cmpxchg)
184#define raw_try_cmpxchg arch_try_cmpxchg
185#elif defined(arch_try_cmpxchg_relaxed)
186#define raw_try_cmpxchg(...) \
187 __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
188#else
189#define raw_try_cmpxchg(_ptr, _oldp, _new) \
29f006fd
PZ
190({ \
191 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 192 ___r = raw_cmpxchg((_ptr), ___o, (_new)); \
29f006fd
PZ
193 if (unlikely(___r != ___o)) \
194 *___op = ___r; \
195 likely(___r == ___o); \
196})
9257959a 197#endif
29f006fd 198
9257959a
MR
199#if defined(arch_try_cmpxchg_acquire)
200#define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
201#elif defined(arch_try_cmpxchg_relaxed)
202#define raw_try_cmpxchg_acquire(...) \
203 __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
204#elif defined(arch_try_cmpxchg)
205#define raw_try_cmpxchg_acquire arch_try_cmpxchg
206#else
207#define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
29f006fd
PZ
208({ \
209 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 210 ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
29f006fd
PZ
211 if (unlikely(___r != ___o)) \
212 *___op = ___r; \
213 likely(___r == ___o); \
214})
9257959a 215#endif
29f006fd 216
9257959a
MR
217#if defined(arch_try_cmpxchg_release)
218#define raw_try_cmpxchg_release arch_try_cmpxchg_release
219#elif defined(arch_try_cmpxchg_relaxed)
220#define raw_try_cmpxchg_release(...) \
221 __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
222#elif defined(arch_try_cmpxchg)
223#define raw_try_cmpxchg_release arch_try_cmpxchg
224#else
225#define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
29f006fd
PZ
226({ \
227 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 228 ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
29f006fd
PZ
229 if (unlikely(___r != ___o)) \
230 *___op = ___r; \
231 likely(___r == ___o); \
232})
9257959a 233#endif
29f006fd 234
9257959a
MR
235#if defined(arch_try_cmpxchg_relaxed)
236#define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
237#elif defined(arch_try_cmpxchg)
238#define raw_try_cmpxchg_relaxed arch_try_cmpxchg
239#else
240#define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
29f006fd
PZ
241({ \
242 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 243 ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
29f006fd
PZ
244 if (unlikely(___r != ___o)) \
245 *___op = ___r; \
246 likely(___r == ___o); \
247})
29f006fd
PZ
248#endif
249
9257959a
MR
250#if defined(arch_try_cmpxchg64)
251#define raw_try_cmpxchg64 arch_try_cmpxchg64
252#elif defined(arch_try_cmpxchg64_relaxed)
253#define raw_try_cmpxchg64(...) \
254 __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
255#else
256#define raw_try_cmpxchg64(_ptr, _oldp, _new) \
0aa7be05
UB
257({ \
258 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 259 ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
0aa7be05
UB
260 if (unlikely(___r != ___o)) \
261 *___op = ___r; \
262 likely(___r == ___o); \
263})
9257959a 264#endif
0aa7be05 265
9257959a
MR
266#if defined(arch_try_cmpxchg64_acquire)
267#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
268#elif defined(arch_try_cmpxchg64_relaxed)
269#define raw_try_cmpxchg64_acquire(...) \
270 __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
271#elif defined(arch_try_cmpxchg64)
272#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
273#else
274#define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
0aa7be05
UB
275({ \
276 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 277 ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
0aa7be05
UB
278 if (unlikely(___r != ___o)) \
279 *___op = ___r; \
280 likely(___r == ___o); \
281})
9257959a 282#endif
0aa7be05 283
9257959a
MR
284#if defined(arch_try_cmpxchg64_release)
285#define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
286#elif defined(arch_try_cmpxchg64_relaxed)
287#define raw_try_cmpxchg64_release(...) \
288 __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
289#elif defined(arch_try_cmpxchg64)
290#define raw_try_cmpxchg64_release arch_try_cmpxchg64
291#else
292#define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
0aa7be05
UB
293({ \
294 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 295 ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
0aa7be05
UB
296 if (unlikely(___r != ___o)) \
297 *___op = ___r; \
298 likely(___r == ___o); \
299})
9257959a 300#endif
0aa7be05 301
9257959a
MR
302#if defined(arch_try_cmpxchg64_relaxed)
303#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
304#elif defined(arch_try_cmpxchg64)
305#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
306#else
307#define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
0aa7be05
UB
308({ \
309 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 310 ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
0aa7be05
UB
311 if (unlikely(___r != ___o)) \
312 *___op = ___r; \
313 likely(___r == ___o); \
314})
0aa7be05
UB
315#endif
316
9257959a
MR
317#if defined(arch_try_cmpxchg128)
318#define raw_try_cmpxchg128 arch_try_cmpxchg128
319#elif defined(arch_try_cmpxchg128_relaxed)
320#define raw_try_cmpxchg128(...) \
321 __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
322#else
323#define raw_try_cmpxchg128(_ptr, _oldp, _new) \
8c8b096a
PZ
324({ \
325 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 326 ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
8c8b096a
PZ
327 if (unlikely(___r != ___o)) \
328 *___op = ___r; \
329 likely(___r == ___o); \
330})
9257959a 331#endif
8c8b096a 332
9257959a
MR
333#if defined(arch_try_cmpxchg128_acquire)
334#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
335#elif defined(arch_try_cmpxchg128_relaxed)
336#define raw_try_cmpxchg128_acquire(...) \
337 __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
338#elif defined(arch_try_cmpxchg128)
339#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
340#else
341#define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
8c8b096a
PZ
342({ \
343 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 344 ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
8c8b096a
PZ
345 if (unlikely(___r != ___o)) \
346 *___op = ___r; \
347 likely(___r == ___o); \
348})
9257959a 349#endif
8c8b096a 350
9257959a
MR
351#if defined(arch_try_cmpxchg128_release)
352#define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
353#elif defined(arch_try_cmpxchg128_relaxed)
354#define raw_try_cmpxchg128_release(...) \
355 __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
356#elif defined(arch_try_cmpxchg128)
357#define raw_try_cmpxchg128_release arch_try_cmpxchg128
358#else
359#define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
8c8b096a
PZ
360({ \
361 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 362 ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
8c8b096a
PZ
363 if (unlikely(___r != ___o)) \
364 *___op = ___r; \
365 likely(___r == ___o); \
366})
9257959a 367#endif
8c8b096a 368
9257959a
MR
369#if defined(arch_try_cmpxchg128_relaxed)
370#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
371#elif defined(arch_try_cmpxchg128)
372#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
373#else
374#define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
8c8b096a
PZ
375({ \
376 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 377 ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
8c8b096a
PZ
378 if (unlikely(___r != ___o)) \
379 *___op = ___r; \
380 likely(___r == ___o); \
381})
8c8b096a
PZ
382#endif
383
9257959a 384#define raw_cmpxchg_local arch_cmpxchg_local
8c8b096a 385
9257959a
MR
386#ifdef arch_try_cmpxchg_local
387#define raw_try_cmpxchg_local arch_try_cmpxchg_local
388#else
389#define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
390({ \
391 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
392 ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
393 if (unlikely(___r != ___o)) \
394 *___op = ___r; \
395 likely(___r == ___o); \
396})
8c8b096a
PZ
397#endif
398
9257959a 399#define raw_cmpxchg64_local arch_cmpxchg64_local
8c8b096a 400
9257959a
MR
401#ifdef arch_try_cmpxchg64_local
402#define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
403#else
404#define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
e6ce9d74
UB
405({ \
406 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 407 ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
e6ce9d74
UB
408 if (unlikely(___r != ___o)) \
409 *___op = ___r; \
410 likely(___r == ___o); \
411})
9257959a
MR
412#endif
413
414#define raw_cmpxchg128_local arch_cmpxchg128_local
e6ce9d74 415
9257959a
MR
416#ifdef arch_try_cmpxchg128_local
417#define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
418#else
419#define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
e6ce9d74
UB
420({ \
421 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
9257959a 422 ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
e6ce9d74
UB
423 if (unlikely(___r != ___o)) \
424 *___op = ___r; \
425 likely(___r == ___o); \
426})
9257959a
MR
427#endif
428
429#define raw_sync_cmpxchg arch_sync_cmpxchg
e6ce9d74 430
e01cc1e8
UB
431#ifdef arch_sync_try_cmpxchg
432#define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
433#else
434#define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
435({ \
436 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
437 ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
438 if (unlikely(___r != ___o)) \
439 *___op = ___r; \
440 likely(___r == ___o); \
441})
442#endif
443
ad811070
MR
444/**
445 * raw_atomic_read() - atomic load with relaxed ordering
446 * @v: pointer to atomic_t
447 *
448 * Atomically loads the value of @v with relaxed ordering.
449 *
450 * Safe to use in noinstr code; prefer atomic_read() elsewhere.
451 *
452 * Return: The value loaded from @v.
453 */
1d78814d
MR
454static __always_inline int
455raw_atomic_read(const atomic_t *v)
456{
457 return arch_atomic_read(v);
458}
9257959a 459
ad811070
MR
460/**
461 * raw_atomic_read_acquire() - atomic load with acquire ordering
462 * @v: pointer to atomic_t
463 *
464 * Atomically loads the value of @v with acquire ordering.
465 *
466 * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
467 *
468 * Return: The value loaded from @v.
469 */
37f8173d 470static __always_inline int
9257959a 471raw_atomic_read_acquire(const atomic_t *v)
37f8173d 472{
1d78814d
MR
473#if defined(arch_atomic_read_acquire)
474 return arch_atomic_read_acquire(v);
1d78814d 475#else
dc1b4df0
MR
476 int ret;
477
478 if (__native_word(atomic_t)) {
479 ret = smp_load_acquire(&(v)->counter);
480 } else {
9257959a 481 ret = raw_atomic_read(v);
dc1b4df0
MR
482 __atomic_acquire_fence();
483 }
484
485 return ret;
37f8173d 486#endif
1d78814d 487}
37f8173d 488
ad811070
MR
489/**
490 * raw_atomic_set() - atomic set with relaxed ordering
491 * @v: pointer to atomic_t
492 * @i: int value to assign
493 *
494 * Atomically sets @v to @i with relaxed ordering.
495 *
496 * Safe to use in noinstr code; prefer atomic_set() elsewhere.
497 *
498 * Return: Nothing.
499 */
1d78814d
MR
500static __always_inline void
501raw_atomic_set(atomic_t *v, int i)
502{
503 arch_atomic_set(v, i);
504}
9257959a 505
ad811070
MR
506/**
507 * raw_atomic_set_release() - atomic set with release ordering
508 * @v: pointer to atomic_t
509 * @i: int value to assign
510 *
511 * Atomically sets @v to @i with release ordering.
512 *
513 * Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
514 *
515 * Return: Nothing.
516 */
37f8173d 517static __always_inline void
9257959a 518raw_atomic_set_release(atomic_t *v, int i)
37f8173d 519{
1d78814d
MR
520#if defined(arch_atomic_set_release)
521 arch_atomic_set_release(v, i);
1d78814d 522#else
dc1b4df0
MR
523 if (__native_word(atomic_t)) {
524 smp_store_release(&(v)->counter, i);
525 } else {
526 __atomic_release_fence();
9257959a 527 raw_atomic_set(v, i);
dc1b4df0 528 }
37f8173d 529#endif
1d78814d 530}
37f8173d 531
ad811070
MR
532/**
533 * raw_atomic_add() - atomic add with relaxed ordering
534 * @i: int value to add
535 * @v: pointer to atomic_t
536 *
537 * Atomically updates @v to (@v + @i) with relaxed ordering.
538 *
539 * Safe to use in noinstr code; prefer atomic_add() elsewhere.
540 *
541 * Return: Nothing.
542 */
1d78814d
MR
543static __always_inline void
544raw_atomic_add(int i, atomic_t *v)
545{
546 arch_atomic_add(i, v);
547}
9257959a 548
ad811070
MR
549/**
550 * raw_atomic_add_return() - atomic add with full ordering
551 * @i: int value to add
552 * @v: pointer to atomic_t
553 *
554 * Atomically updates @v to (@v + @i) with full ordering.
555 *
556 * Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
557 *
558 * Return: The updated value of @v.
559 */
9257959a
MR
560static __always_inline int
561raw_atomic_add_return(int i, atomic_t *v)
562{
1d78814d
MR
563#if defined(arch_atomic_add_return)
564 return arch_atomic_add_return(i, v);
565#elif defined(arch_atomic_add_return_relaxed)
9257959a
MR
566 int ret;
567 __atomic_pre_full_fence();
568 ret = arch_atomic_add_return_relaxed(i, v);
569 __atomic_post_full_fence();
570 return ret;
9257959a
MR
571#else
572#error "Unable to define raw_atomic_add_return"
573#endif
1d78814d 574}
37f8173d 575
ad811070
MR
576/**
577 * raw_atomic_add_return_acquire() - atomic add with acquire ordering
578 * @i: int value to add
579 * @v: pointer to atomic_t
580 *
581 * Atomically updates @v to (@v + @i) with acquire ordering.
582 *
583 * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
584 *
585 * Return: The updated value of @v.
586 */
37f8173d 587static __always_inline int
9257959a 588raw_atomic_add_return_acquire(int i, atomic_t *v)
37f8173d 589{
1d78814d
MR
590#if defined(arch_atomic_add_return_acquire)
591 return arch_atomic_add_return_acquire(i, v);
592#elif defined(arch_atomic_add_return_relaxed)
37f8173d
PZ
593 int ret = arch_atomic_add_return_relaxed(i, v);
594 __atomic_acquire_fence();
595 return ret;
9257959a 596#elif defined(arch_atomic_add_return)
1d78814d 597 return arch_atomic_add_return(i, v);
9257959a
MR
598#else
599#error "Unable to define raw_atomic_add_return_acquire"
37f8173d 600#endif
1d78814d 601}
37f8173d 602
ad811070
MR
603/**
604 * raw_atomic_add_return_release() - atomic add with release ordering
605 * @i: int value to add
606 * @v: pointer to atomic_t
607 *
608 * Atomically updates @v to (@v + @i) with release ordering.
609 *
610 * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
611 *
612 * Return: The updated value of @v.
613 */
37f8173d 614static __always_inline int
9257959a 615raw_atomic_add_return_release(int i, atomic_t *v)
37f8173d 616{
1d78814d
MR
617#if defined(arch_atomic_add_return_release)
618 return arch_atomic_add_return_release(i, v);
619#elif defined(arch_atomic_add_return_relaxed)
37f8173d
PZ
620 __atomic_release_fence();
621 return arch_atomic_add_return_relaxed(i, v);
9257959a 622#elif defined(arch_atomic_add_return)
1d78814d 623 return arch_atomic_add_return(i, v);
9257959a
MR
624#else
625#error "Unable to define raw_atomic_add_return_release"
37f8173d 626#endif
1d78814d 627}
37f8173d 628
ad811070
MR
629/**
630 * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
631 * @i: int value to add
632 * @v: pointer to atomic_t
633 *
634 * Atomically updates @v to (@v + @i) with relaxed ordering.
635 *
636 * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
637 *
638 * Return: The updated value of @v.
639 */
1d78814d
MR
640static __always_inline int
641raw_atomic_add_return_relaxed(int i, atomic_t *v)
642{
9257959a 643#if defined(arch_atomic_add_return_relaxed)
1d78814d 644 return arch_atomic_add_return_relaxed(i, v);
9257959a 645#elif defined(arch_atomic_add_return)
1d78814d 646 return arch_atomic_add_return(i, v);
9257959a
MR
647#else
648#error "Unable to define raw_atomic_add_return_relaxed"
649#endif
1d78814d 650}
9257959a 651
ad811070
MR
652/**
653 * raw_atomic_fetch_add() - atomic add with full ordering
654 * @i: int value to add
655 * @v: pointer to atomic_t
656 *
657 * Atomically updates @v to (@v + @i) with full ordering.
658 *
659 * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
660 *
661 * Return: The original value of @v.
662 */
37f8173d 663static __always_inline int
9257959a 664raw_atomic_fetch_add(int i, atomic_t *v)
37f8173d 665{
1d78814d
MR
666#if defined(arch_atomic_fetch_add)
667 return arch_atomic_fetch_add(i, v);
668#elif defined(arch_atomic_fetch_add_relaxed)
37f8173d
PZ
669 int ret;
670 __atomic_pre_full_fence();
9257959a 671 ret = arch_atomic_fetch_add_relaxed(i, v);
37f8173d
PZ
672 __atomic_post_full_fence();
673 return ret;
9257959a
MR
674#else
675#error "Unable to define raw_atomic_fetch_add"
37f8173d 676#endif
1d78814d 677}
37f8173d 678
ad811070
MR
679/**
680 * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
681 * @i: int value to add
682 * @v: pointer to atomic_t
683 *
684 * Atomically updates @v to (@v + @i) with acquire ordering.
685 *
686 * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
687 *
688 * Return: The original value of @v.
689 */
37f8173d 690static __always_inline int
9257959a 691raw_atomic_fetch_add_acquire(int i, atomic_t *v)
37f8173d 692{
1d78814d
MR
693#if defined(arch_atomic_fetch_add_acquire)
694 return arch_atomic_fetch_add_acquire(i, v);
695#elif defined(arch_atomic_fetch_add_relaxed)
37f8173d
PZ
696 int ret = arch_atomic_fetch_add_relaxed(i, v);
697 __atomic_acquire_fence();
698 return ret;
9257959a 699#elif defined(arch_atomic_fetch_add)
1d78814d 700 return arch_atomic_fetch_add(i, v);
9257959a
MR
701#else
702#error "Unable to define raw_atomic_fetch_add_acquire"
37f8173d 703#endif
1d78814d 704}
37f8173d 705
ad811070
MR
706/**
707 * raw_atomic_fetch_add_release() - atomic add with release ordering
708 * @i: int value to add
709 * @v: pointer to atomic_t
710 *
711 * Atomically updates @v to (@v + @i) with release ordering.
712 *
713 * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
714 *
715 * Return: The original value of @v.
716 */
37f8173d 717static __always_inline int
9257959a 718raw_atomic_fetch_add_release(int i, atomic_t *v)
37f8173d 719{
1d78814d
MR
720#if defined(arch_atomic_fetch_add_release)
721 return arch_atomic_fetch_add_release(i, v);
722#elif defined(arch_atomic_fetch_add_relaxed)
37f8173d
PZ
723 __atomic_release_fence();
724 return arch_atomic_fetch_add_relaxed(i, v);
9257959a 725#elif defined(arch_atomic_fetch_add)
1d78814d 726 return arch_atomic_fetch_add(i, v);
9257959a
MR
727#else
728#error "Unable to define raw_atomic_fetch_add_release"
729#endif
1d78814d 730}
9257959a 731
ad811070
MR
732/**
733 * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
734 * @i: int value to add
735 * @v: pointer to atomic_t
736 *
737 * Atomically updates @v to (@v + @i) with relaxed ordering.
738 *
739 * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
740 *
741 * Return: The original value of @v.
742 */
1d78814d
MR
743static __always_inline int
744raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
745{
9257959a 746#if defined(arch_atomic_fetch_add_relaxed)
1d78814d 747 return arch_atomic_fetch_add_relaxed(i, v);
9257959a 748#elif defined(arch_atomic_fetch_add)
1d78814d 749 return arch_atomic_fetch_add(i, v);
9257959a
MR
750#else
751#error "Unable to define raw_atomic_fetch_add_relaxed"
37f8173d 752#endif
1d78814d 753}
37f8173d 754
ad811070
MR
755/**
756 * raw_atomic_sub() - atomic subtract with relaxed ordering
757 * @i: int value to subtract
758 * @v: pointer to atomic_t
759 *
760 * Atomically updates @v to (@v - @i) with relaxed ordering.
761 *
762 * Safe to use in noinstr code; prefer atomic_sub() elsewhere.
763 *
764 * Return: Nothing.
765 */
1d78814d
MR
766static __always_inline void
767raw_atomic_sub(int i, atomic_t *v)
768{
769 arch_atomic_sub(i, v);
770}
9257959a 771
ad811070
MR
772/**
773 * raw_atomic_sub_return() - atomic subtract with full ordering
774 * @i: int value to subtract
775 * @v: pointer to atomic_t
776 *
777 * Atomically updates @v to (@v - @i) with full ordering.
778 *
779 * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
780 *
781 * Return: The updated value of @v.
782 */
37f8173d 783static __always_inline int
9257959a 784raw_atomic_sub_return(int i, atomic_t *v)
37f8173d 785{
1d78814d
MR
786#if defined(arch_atomic_sub_return)
787 return arch_atomic_sub_return(i, v);
788#elif defined(arch_atomic_sub_return_relaxed)
37f8173d
PZ
789 int ret;
790 __atomic_pre_full_fence();
9257959a 791 ret = arch_atomic_sub_return_relaxed(i, v);
37f8173d
PZ
792 __atomic_post_full_fence();
793 return ret;
9257959a
MR
794#else
795#error "Unable to define raw_atomic_sub_return"
37f8173d 796#endif
1d78814d 797}
37f8173d 798
ad811070
MR
799/**
800 * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
801 * @i: int value to subtract
802 * @v: pointer to atomic_t
803 *
804 * Atomically updates @v to (@v - @i) with acquire ordering.
805 *
806 * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
807 *
808 * Return: The updated value of @v.
809 */
37f8173d 810static __always_inline int
9257959a 811raw_atomic_sub_return_acquire(int i, atomic_t *v)
37f8173d 812{
1d78814d
MR
813#if defined(arch_atomic_sub_return_acquire)
814 return arch_atomic_sub_return_acquire(i, v);
815#elif defined(arch_atomic_sub_return_relaxed)
37f8173d
PZ
816 int ret = arch_atomic_sub_return_relaxed(i, v);
817 __atomic_acquire_fence();
818 return ret;
9257959a 819#elif defined(arch_atomic_sub_return)
1d78814d 820 return arch_atomic_sub_return(i, v);
9257959a
MR
821#else
822#error "Unable to define raw_atomic_sub_return_acquire"
37f8173d 823#endif
1d78814d 824}
37f8173d 825
ad811070
MR
826/**
827 * raw_atomic_sub_return_release() - atomic subtract with release ordering
828 * @i: int value to subtract
829 * @v: pointer to atomic_t
830 *
831 * Atomically updates @v to (@v - @i) with release ordering.
832 *
833 * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
834 *
835 * Return: The updated value of @v.
836 */
37f8173d 837static __always_inline int
9257959a 838raw_atomic_sub_return_release(int i, atomic_t *v)
37f8173d 839{
1d78814d
MR
840#if defined(arch_atomic_sub_return_release)
841 return arch_atomic_sub_return_release(i, v);
842#elif defined(arch_atomic_sub_return_relaxed)
37f8173d
PZ
843 __atomic_release_fence();
844 return arch_atomic_sub_return_relaxed(i, v);
9257959a 845#elif defined(arch_atomic_sub_return)
1d78814d 846 return arch_atomic_sub_return(i, v);
9257959a
MR
847#else
848#error "Unable to define raw_atomic_sub_return_release"
849#endif
1d78814d 850}
9257959a 851
ad811070
MR
852/**
853 * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
854 * @i: int value to subtract
855 * @v: pointer to atomic_t
856 *
857 * Atomically updates @v to (@v - @i) with relaxed ordering.
858 *
859 * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
860 *
861 * Return: The updated value of @v.
862 */
1d78814d
MR
863static __always_inline int
864raw_atomic_sub_return_relaxed(int i, atomic_t *v)
865{
9257959a 866#if defined(arch_atomic_sub_return_relaxed)
1d78814d 867 return arch_atomic_sub_return_relaxed(i, v);
9257959a 868#elif defined(arch_atomic_sub_return)
1d78814d 869 return arch_atomic_sub_return(i, v);
9257959a
MR
870#else
871#error "Unable to define raw_atomic_sub_return_relaxed"
37f8173d 872#endif
1d78814d 873}
37f8173d 874
ad811070
MR
875/**
876 * raw_atomic_fetch_sub() - atomic subtract with full ordering
877 * @i: int value to subtract
878 * @v: pointer to atomic_t
879 *
880 * Atomically updates @v to (@v - @i) with full ordering.
881 *
882 * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
883 *
884 * Return: The original value of @v.
885 */
37f8173d 886static __always_inline int
9257959a 887raw_atomic_fetch_sub(int i, atomic_t *v)
37f8173d 888{
1d78814d
MR
889#if defined(arch_atomic_fetch_sub)
890 return arch_atomic_fetch_sub(i, v);
891#elif defined(arch_atomic_fetch_sub_relaxed)
37f8173d
PZ
892 int ret;
893 __atomic_pre_full_fence();
9257959a 894 ret = arch_atomic_fetch_sub_relaxed(i, v);
37f8173d
PZ
895 __atomic_post_full_fence();
896 return ret;
9257959a
MR
897#else
898#error "Unable to define raw_atomic_fetch_sub"
37f8173d 899#endif
1d78814d 900}
37f8173d 901
ad811070
MR
902/**
903 * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
904 * @i: int value to subtract
905 * @v: pointer to atomic_t
906 *
907 * Atomically updates @v to (@v - @i) with acquire ordering.
908 *
909 * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
910 *
911 * Return: The original value of @v.
912 */
37f8173d 913static __always_inline int
9257959a 914raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
37f8173d 915{
1d78814d
MR
916#if defined(arch_atomic_fetch_sub_acquire)
917 return arch_atomic_fetch_sub_acquire(i, v);
918#elif defined(arch_atomic_fetch_sub_relaxed)
37f8173d
PZ
919 int ret = arch_atomic_fetch_sub_relaxed(i, v);
920 __atomic_acquire_fence();
921 return ret;
9257959a 922#elif defined(arch_atomic_fetch_sub)
1d78814d 923 return arch_atomic_fetch_sub(i, v);
9257959a
MR
924#else
925#error "Unable to define raw_atomic_fetch_sub_acquire"
37f8173d 926#endif
1d78814d 927}
37f8173d 928
ad811070
MR
929/**
930 * raw_atomic_fetch_sub_release() - atomic subtract with release ordering
931 * @i: int value to subtract
932 * @v: pointer to atomic_t
933 *
934 * Atomically updates @v to (@v - @i) with release ordering.
935 *
936 * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
937 *
938 * Return: The original value of @v.
939 */
37f8173d 940static __always_inline int
9257959a 941raw_atomic_fetch_sub_release(int i, atomic_t *v)
37f8173d 942{
1d78814d
MR
943#if defined(arch_atomic_fetch_sub_release)
944 return arch_atomic_fetch_sub_release(i, v);
945#elif defined(arch_atomic_fetch_sub_relaxed)
37f8173d
PZ
946 __atomic_release_fence();
947 return arch_atomic_fetch_sub_relaxed(i, v);
9257959a 948#elif defined(arch_atomic_fetch_sub)
1d78814d 949 return arch_atomic_fetch_sub(i, v);
9257959a
MR
950#else
951#error "Unable to define raw_atomic_fetch_sub_release"
37f8173d 952#endif
1d78814d 953}
37f8173d 954
ad811070
MR
955/**
956 * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
957 * @i: int value to subtract
958 * @v: pointer to atomic_t
959 *
960 * Atomically updates @v to (@v - @i) with relaxed ordering.
961 *
962 * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
963 *
964 * Return: The original value of @v.
965 */
1d78814d
MR
966static __always_inline int
967raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
968{
9257959a 969#if defined(arch_atomic_fetch_sub_relaxed)
1d78814d 970 return arch_atomic_fetch_sub_relaxed(i, v);
9257959a 971#elif defined(arch_atomic_fetch_sub)
1d78814d 972 return arch_atomic_fetch_sub(i, v);
9257959a
MR
973#else
974#error "Unable to define raw_atomic_fetch_sub_relaxed"
37f8173d 975#endif
1d78814d 976}
37f8173d 977
ad811070
MR
978/**
979 * raw_atomic_inc() - atomic increment with relaxed ordering
980 * @v: pointer to atomic_t
981 *
982 * Atomically updates @v to (@v + 1) with relaxed ordering.
983 *
984 * Safe to use in noinstr code; prefer atomic_inc() elsewhere.
985 *
986 * Return: Nothing.
987 */
37f8173d 988static __always_inline void
9257959a 989raw_atomic_inc(atomic_t *v)
37f8173d 990{
1d78814d
MR
991#if defined(arch_atomic_inc)
992 arch_atomic_inc(v);
993#else
9257959a 994 raw_atomic_add(1, v);
37f8173d 995#endif
1d78814d 996}
37f8173d 997
ad811070
MR
998/**
999 * raw_atomic_inc_return() - atomic increment with full ordering
1000 * @v: pointer to atomic_t
1001 *
1002 * Atomically updates @v to (@v + 1) with full ordering.
1003 *
1004 * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
1005 *
1006 * Return: The updated value of @v.
1007 */
9257959a
MR
1008static __always_inline int
1009raw_atomic_inc_return(atomic_t *v)
1010{
1d78814d
MR
1011#if defined(arch_atomic_inc_return)
1012 return arch_atomic_inc_return(v);
1013#elif defined(arch_atomic_inc_return_relaxed)
9257959a
MR
1014 int ret;
1015 __atomic_pre_full_fence();
1016 ret = arch_atomic_inc_return_relaxed(v);
1017 __atomic_post_full_fence();
1018 return ret;
9257959a 1019#else
9257959a 1020 return raw_atomic_add_return(1, v);
37f8173d 1021#endif
1d78814d 1022}
37f8173d 1023
ad811070
MR
1024/**
1025 * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
1026 * @v: pointer to atomic_t
1027 *
1028 * Atomically updates @v to (@v + 1) with acquire ordering.
1029 *
1030 * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
1031 *
1032 * Return: The updated value of @v.
1033 */
37f8173d 1034static __always_inline int
9257959a 1035raw_atomic_inc_return_acquire(atomic_t *v)
37f8173d 1036{
1d78814d
MR
1037#if defined(arch_atomic_inc_return_acquire)
1038 return arch_atomic_inc_return_acquire(v);
1039#elif defined(arch_atomic_inc_return_relaxed)
9257959a
MR
1040 int ret = arch_atomic_inc_return_relaxed(v);
1041 __atomic_acquire_fence();
1042 return ret;
9257959a 1043#elif defined(arch_atomic_inc_return)
1d78814d 1044 return arch_atomic_inc_return(v);
9257959a 1045#else
9257959a 1046 return raw_atomic_add_return_acquire(1, v);
37f8173d 1047#endif
1d78814d 1048}
37f8173d 1049
ad811070
MR
1050/**
1051 * raw_atomic_inc_return_release() - atomic increment with release ordering
1052 * @v: pointer to atomic_t
1053 *
1054 * Atomically updates @v to (@v + 1) with release ordering.
1055 *
1056 * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
1057 *
1058 * Return: The updated value of @v.
1059 */
37f8173d 1060static __always_inline int
9257959a 1061raw_atomic_inc_return_release(atomic_t *v)
37f8173d 1062{
1d78814d
MR
1063#if defined(arch_atomic_inc_return_release)
1064 return arch_atomic_inc_return_release(v);
1065#elif defined(arch_atomic_inc_return_relaxed)
9257959a
MR
1066 __atomic_release_fence();
1067 return arch_atomic_inc_return_relaxed(v);
9257959a 1068#elif defined(arch_atomic_inc_return)
1d78814d 1069 return arch_atomic_inc_return(v);
9257959a 1070#else
9257959a 1071 return raw_atomic_add_return_release(1, v);
37f8173d 1072#endif
1d78814d 1073}
37f8173d 1074
ad811070
MR
1075/**
1076 * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
1077 * @v: pointer to atomic_t
1078 *
1079 * Atomically updates @v to (@v + 1) with relaxed ordering.
1080 *
1081 * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
1082 *
1083 * Return: The updated value of @v.
1084 */
37f8173d 1085static __always_inline int
9257959a 1086raw_atomic_inc_return_relaxed(atomic_t *v)
37f8173d 1087{
1d78814d
MR
1088#if defined(arch_atomic_inc_return_relaxed)
1089 return arch_atomic_inc_return_relaxed(v);
1090#elif defined(arch_atomic_inc_return)
1091 return arch_atomic_inc_return(v);
1092#else
9257959a 1093 return raw_atomic_add_return_relaxed(1, v);
37f8173d 1094#endif
1d78814d 1095}
37f8173d 1096
ad811070
MR
1097/**
1098 * raw_atomic_fetch_inc() - atomic increment with full ordering
1099 * @v: pointer to atomic_t
1100 *
1101 * Atomically updates @v to (@v + 1) with full ordering.
1102 *
1103 * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
1104 *
1105 * Return: The original value of @v.
1106 */
37f8173d 1107static __always_inline int
9257959a 1108raw_atomic_fetch_inc(atomic_t *v)
37f8173d 1109{
1d78814d
MR
1110#if defined(arch_atomic_fetch_inc)
1111 return arch_atomic_fetch_inc(v);
1112#elif defined(arch_atomic_fetch_inc_relaxed)
37f8173d
PZ
1113 int ret;
1114 __atomic_pre_full_fence();
9257959a 1115 ret = arch_atomic_fetch_inc_relaxed(v);
37f8173d
PZ
1116 __atomic_post_full_fence();
1117 return ret;
9257959a 1118#else
9257959a 1119 return raw_atomic_fetch_add(1, v);
37f8173d 1120#endif
1d78814d 1121}
37f8173d 1122
ad811070
MR
1123/**
1124 * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
1125 * @v: pointer to atomic_t
1126 *
1127 * Atomically updates @v to (@v + 1) with acquire ordering.
1128 *
1129 * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
1130 *
1131 * Return: The original value of @v.
1132 */
37f8173d 1133static __always_inline int
9257959a 1134raw_atomic_fetch_inc_acquire(atomic_t *v)
37f8173d 1135{
1d78814d
MR
1136#if defined(arch_atomic_fetch_inc_acquire)
1137 return arch_atomic_fetch_inc_acquire(v);
1138#elif defined(arch_atomic_fetch_inc_relaxed)
9257959a
MR
1139 int ret = arch_atomic_fetch_inc_relaxed(v);
1140 __atomic_acquire_fence();
1141 return ret;
9257959a 1142#elif defined(arch_atomic_fetch_inc)
1d78814d 1143 return arch_atomic_fetch_inc(v);
9257959a 1144#else
9257959a 1145 return raw_atomic_fetch_add_acquire(1, v);
37f8173d 1146#endif
1d78814d 1147}
37f8173d 1148
ad811070
MR
1149/**
1150 * raw_atomic_fetch_inc_release() - atomic increment with release ordering
1151 * @v: pointer to atomic_t
1152 *
1153 * Atomically updates @v to (@v + 1) with release ordering.
1154 *
1155 * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
1156 *
1157 * Return: The original value of @v.
1158 */
9257959a
MR
1159static __always_inline int
1160raw_atomic_fetch_inc_release(atomic_t *v)
1161{
1d78814d
MR
1162#if defined(arch_atomic_fetch_inc_release)
1163 return arch_atomic_fetch_inc_release(v);
1164#elif defined(arch_atomic_fetch_inc_relaxed)
9257959a
MR
1165 __atomic_release_fence();
1166 return arch_atomic_fetch_inc_relaxed(v);
9257959a 1167#elif defined(arch_atomic_fetch_inc)
1d78814d 1168 return arch_atomic_fetch_inc(v);
9257959a 1169#else
9257959a 1170 return raw_atomic_fetch_add_release(1, v);
37f8173d 1171#endif
1d78814d 1172}
37f8173d 1173
ad811070
MR
1174/**
1175 * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
1176 * @v: pointer to atomic_t
1177 *
1178 * Atomically updates @v to (@v + 1) with relaxed ordering.
1179 *
1180 * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
1181 *
1182 * Return: The original value of @v.
1183 */
37f8173d 1184static __always_inline int
9257959a 1185raw_atomic_fetch_inc_relaxed(atomic_t *v)
37f8173d 1186{
1d78814d
MR
1187#if defined(arch_atomic_fetch_inc_relaxed)
1188 return arch_atomic_fetch_inc_relaxed(v);
1189#elif defined(arch_atomic_fetch_inc)
1190 return arch_atomic_fetch_inc(v);
1191#else
9257959a 1192 return raw_atomic_fetch_add_relaxed(1, v);
37f8173d 1193#endif
1d78814d 1194}
37f8173d 1195
ad811070
MR
1196/**
1197 * raw_atomic_dec() - atomic decrement with relaxed ordering
1198 * @v: pointer to atomic_t
1199 *
1200 * Atomically updates @v to (@v - 1) with relaxed ordering.
1201 *
1202 * Safe to use in noinstr code; prefer atomic_dec() elsewhere.
1203 *
1204 * Return: Nothing.
1205 */
9257959a
MR
1206static __always_inline void
1207raw_atomic_dec(atomic_t *v)
37f8173d 1208{
1d78814d
MR
1209#if defined(arch_atomic_dec)
1210 arch_atomic_dec(v);
1211#else
9257959a 1212 raw_atomic_sub(1, v);
37f8173d 1213#endif
1d78814d 1214}
37f8173d 1215
ad811070
MR
1216/**
1217 * raw_atomic_dec_return() - atomic decrement with full ordering
1218 * @v: pointer to atomic_t
1219 *
1220 * Atomically updates @v to (@v - 1) with full ordering.
1221 *
1222 * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
1223 *
1224 * Return: The updated value of @v.
1225 */
37f8173d 1226static __always_inline int
9257959a 1227raw_atomic_dec_return(atomic_t *v)
37f8173d 1228{
1d78814d
MR
1229#if defined(arch_atomic_dec_return)
1230 return arch_atomic_dec_return(v);
1231#elif defined(arch_atomic_dec_return_relaxed)
37f8173d
PZ
1232 int ret;
1233 __atomic_pre_full_fence();
9257959a 1234 ret = arch_atomic_dec_return_relaxed(v);
37f8173d
PZ
1235 __atomic_post_full_fence();
1236 return ret;
9257959a 1237#else
9257959a 1238 return raw_atomic_sub_return(1, v);
37f8173d 1239#endif
1d78814d 1240}
37f8173d 1241
ad811070
MR
1242/**
1243 * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
1244 * @v: pointer to atomic_t
1245 *
1246 * Atomically updates @v to (@v - 1) with acquire ordering.
1247 *
1248 * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
1249 *
1250 * Return: The updated value of @v.
1251 */
37f8173d 1252static __always_inline int
9257959a 1253raw_atomic_dec_return_acquire(atomic_t *v)
37f8173d 1254{
1d78814d
MR
1255#if defined(arch_atomic_dec_return_acquire)
1256 return arch_atomic_dec_return_acquire(v);
1257#elif defined(arch_atomic_dec_return_relaxed)
9257959a
MR
1258 int ret = arch_atomic_dec_return_relaxed(v);
1259 __atomic_acquire_fence();
1260 return ret;
9257959a 1261#elif defined(arch_atomic_dec_return)
1d78814d 1262 return arch_atomic_dec_return(v);
9257959a 1263#else
9257959a 1264 return raw_atomic_sub_return_acquire(1, v);
37f8173d 1265#endif
1d78814d 1266}
37f8173d 1267
ad811070
MR
1268/**
1269 * raw_atomic_dec_return_release() - atomic decrement with release ordering
1270 * @v: pointer to atomic_t
1271 *
1272 * Atomically updates @v to (@v - 1) with release ordering.
1273 *
1274 * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
1275 *
1276 * Return: The updated value of @v.
1277 */
37f8173d 1278static __always_inline int
9257959a 1279raw_atomic_dec_return_release(atomic_t *v)
37f8173d 1280{
1d78814d
MR
1281#if defined(arch_atomic_dec_return_release)
1282 return arch_atomic_dec_return_release(v);
1283#elif defined(arch_atomic_dec_return_relaxed)
9257959a
MR
1284 __atomic_release_fence();
1285 return arch_atomic_dec_return_relaxed(v);
9257959a 1286#elif defined(arch_atomic_dec_return)
1d78814d 1287 return arch_atomic_dec_return(v);
9257959a 1288#else
9257959a 1289 return raw_atomic_sub_return_release(1, v);
37f8173d 1290#endif
1d78814d 1291}
37f8173d 1292
ad811070
MR
1293/**
1294 * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
1295 * @v: pointer to atomic_t
1296 *
1297 * Atomically updates @v to (@v - 1) with relaxed ordering.
1298 *
1299 * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
1300 *
1301 * Return: The updated value of @v.
1302 */
37f8173d 1303static __always_inline int
9257959a 1304raw_atomic_dec_return_relaxed(atomic_t *v)
37f8173d 1305{
1d78814d
MR
1306#if defined(arch_atomic_dec_return_relaxed)
1307 return arch_atomic_dec_return_relaxed(v);
1308#elif defined(arch_atomic_dec_return)
1309 return arch_atomic_dec_return(v);
1310#else
9257959a 1311 return raw_atomic_sub_return_relaxed(1, v);
37f8173d 1312#endif
1d78814d 1313}
37f8173d 1314
ad811070
MR
1315/**
1316 * raw_atomic_fetch_dec() - atomic decrement with full ordering
1317 * @v: pointer to atomic_t
1318 *
1319 * Atomically updates @v to (@v - 1) with full ordering.
1320 *
1321 * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
1322 *
1323 * Return: The original value of @v.
1324 */
37f8173d 1325static __always_inline int
9257959a 1326raw_atomic_fetch_dec(atomic_t *v)
37f8173d 1327{
1d78814d
MR
1328#if defined(arch_atomic_fetch_dec)
1329 return arch_atomic_fetch_dec(v);
1330#elif defined(arch_atomic_fetch_dec_relaxed)
37f8173d
PZ
1331 int ret;
1332 __atomic_pre_full_fence();
9257959a 1333 ret = arch_atomic_fetch_dec_relaxed(v);
37f8173d
PZ
1334 __atomic_post_full_fence();
1335 return ret;
9257959a 1336#else
9257959a 1337 return raw_atomic_fetch_sub(1, v);
37f8173d 1338#endif
1d78814d 1339}
37f8173d 1340
ad811070
MR
1341/**
1342 * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
1343 * @v: pointer to atomic_t
1344 *
1345 * Atomically updates @v to (@v - 1) with acquire ordering.
1346 *
1347 * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
1348 *
1349 * Return: The original value of @v.
1350 */
37f8173d 1351static __always_inline int
9257959a 1352raw_atomic_fetch_dec_acquire(atomic_t *v)
37f8173d 1353{
1d78814d
MR
1354#if defined(arch_atomic_fetch_dec_acquire)
1355 return arch_atomic_fetch_dec_acquire(v);
1356#elif defined(arch_atomic_fetch_dec_relaxed)
9257959a
MR
1357 int ret = arch_atomic_fetch_dec_relaxed(v);
1358 __atomic_acquire_fence();
1359 return ret;
9257959a 1360#elif defined(arch_atomic_fetch_dec)
1d78814d 1361 return arch_atomic_fetch_dec(v);
9257959a 1362#else
9257959a 1363 return raw_atomic_fetch_sub_acquire(1, v);
37f8173d 1364#endif
1d78814d 1365}
37f8173d 1366
ad811070
MR
1367/**
1368 * raw_atomic_fetch_dec_release() - atomic decrement with release ordering
1369 * @v: pointer to atomic_t
1370 *
1371 * Atomically updates @v to (@v - 1) with release ordering.
1372 *
1373 * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
1374 *
1375 * Return: The original value of @v.
1376 */
37f8173d 1377static __always_inline int
9257959a 1378raw_atomic_fetch_dec_release(atomic_t *v)
37f8173d 1379{
1d78814d
MR
1380#if defined(arch_atomic_fetch_dec_release)
1381 return arch_atomic_fetch_dec_release(v);
1382#elif defined(arch_atomic_fetch_dec_relaxed)
9257959a
MR
1383 __atomic_release_fence();
1384 return arch_atomic_fetch_dec_relaxed(v);
9257959a 1385#elif defined(arch_atomic_fetch_dec)
1d78814d 1386 return arch_atomic_fetch_dec(v);
9257959a 1387#else
9257959a 1388 return raw_atomic_fetch_sub_release(1, v);
37f8173d 1389#endif
1d78814d 1390}
37f8173d 1391
ad811070
MR
1392/**
1393 * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
1394 * @v: pointer to atomic_t
1395 *
1396 * Atomically updates @v to (@v - 1) with relaxed ordering.
1397 *
1398 * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
1399 *
1400 * Return: The original value of @v.
1401 */
37f8173d 1402static __always_inline int
9257959a 1403raw_atomic_fetch_dec_relaxed(atomic_t *v)
37f8173d 1404{
1d78814d
MR
1405#if defined(arch_atomic_fetch_dec_relaxed)
1406 return arch_atomic_fetch_dec_relaxed(v);
1407#elif defined(arch_atomic_fetch_dec)
1408 return arch_atomic_fetch_dec(v);
1409#else
9257959a 1410 return raw_atomic_fetch_sub_relaxed(1, v);
37f8173d 1411#endif
1d78814d 1412}
37f8173d 1413
ad811070
MR
1414/**
1415 * raw_atomic_and() - atomic bitwise AND with relaxed ordering
1416 * @i: int value
1417 * @v: pointer to atomic_t
1418 *
1419 * Atomically updates @v to (@v & @i) with relaxed ordering.
1420 *
1421 * Safe to use in noinstr code; prefer atomic_and() elsewhere.
1422 *
1423 * Return: Nothing.
1424 */
1d78814d
MR
1425static __always_inline void
1426raw_atomic_and(int i, atomic_t *v)
1427{
1428 arch_atomic_and(i, v);
1429}
9257959a 1430
ad811070
MR
1431/**
1432 * raw_atomic_fetch_and() - atomic bitwise AND with full ordering
1433 * @i: int value
1434 * @v: pointer to atomic_t
1435 *
1436 * Atomically updates @v to (@v & @i) with full ordering.
1437 *
1438 * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
1439 *
1440 * Return: The original value of @v.
1441 */
37f8173d 1442static __always_inline int
9257959a 1443raw_atomic_fetch_and(int i, atomic_t *v)
37f8173d 1444{
1d78814d
MR
1445#if defined(arch_atomic_fetch_and)
1446 return arch_atomic_fetch_and(i, v);
1447#elif defined(arch_atomic_fetch_and_relaxed)
37f8173d
PZ
1448 int ret;
1449 __atomic_pre_full_fence();
9257959a 1450 ret = arch_atomic_fetch_and_relaxed(i, v);
37f8173d
PZ
1451 __atomic_post_full_fence();
1452 return ret;
9257959a
MR
1453#else
1454#error "Unable to define raw_atomic_fetch_and"
37f8173d 1455#endif
1d78814d 1456}
37f8173d 1457
ad811070
MR
1458/**
1459 * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
1460 * @i: int value
1461 * @v: pointer to atomic_t
1462 *
1463 * Atomically updates @v to (@v & @i) with acquire ordering.
1464 *
1465 * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
1466 *
1467 * Return: The original value of @v.
1468 */
37f8173d 1469static __always_inline int
9257959a 1470raw_atomic_fetch_and_acquire(int i, atomic_t *v)
37f8173d 1471{
1d78814d
MR
1472#if defined(arch_atomic_fetch_and_acquire)
1473 return arch_atomic_fetch_and_acquire(i, v);
1474#elif defined(arch_atomic_fetch_and_relaxed)
37f8173d
PZ
1475 int ret = arch_atomic_fetch_and_relaxed(i, v);
1476 __atomic_acquire_fence();
1477 return ret;
9257959a 1478#elif defined(arch_atomic_fetch_and)
1d78814d 1479 return arch_atomic_fetch_and(i, v);
9257959a
MR
1480#else
1481#error "Unable to define raw_atomic_fetch_and_acquire"
37f8173d 1482#endif
1d78814d 1483}
37f8173d 1484
ad811070
MR
1485/**
1486 * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
1487 * @i: int value
1488 * @v: pointer to atomic_t
1489 *
1490 * Atomically updates @v to (@v & @i) with release ordering.
1491 *
1492 * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
1493 *
1494 * Return: The original value of @v.
1495 */
37f8173d 1496static __always_inline int
9257959a 1497raw_atomic_fetch_and_release(int i, atomic_t *v)
37f8173d 1498{
1d78814d
MR
1499#if defined(arch_atomic_fetch_and_release)
1500 return arch_atomic_fetch_and_release(i, v);
1501#elif defined(arch_atomic_fetch_and_relaxed)
37f8173d
PZ
1502 __atomic_release_fence();
1503 return arch_atomic_fetch_and_relaxed(i, v);
9257959a 1504#elif defined(arch_atomic_fetch_and)
1d78814d 1505 return arch_atomic_fetch_and(i, v);
9257959a
MR
1506#else
1507#error "Unable to define raw_atomic_fetch_and_release"
37f8173d 1508#endif
1d78814d 1509}
37f8173d 1510
ad811070
MR
1511/**
1512 * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
1513 * @i: int value
1514 * @v: pointer to atomic_t
1515 *
1516 * Atomically updates @v to (@v & @i) with relaxed ordering.
1517 *
1518 * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
1519 *
1520 * Return: The original value of @v.
1521 */
1d78814d
MR
1522static __always_inline int
1523raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
1524{
9257959a 1525#if defined(arch_atomic_fetch_and_relaxed)
1d78814d 1526 return arch_atomic_fetch_and_relaxed(i, v);
9257959a 1527#elif defined(arch_atomic_fetch_and)
1d78814d 1528 return arch_atomic_fetch_and(i, v);
9257959a
MR
1529#else
1530#error "Unable to define raw_atomic_fetch_and_relaxed"
1531#endif
1d78814d 1532}
9257959a 1533
ad811070
MR
1534/**
1535 * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
1536 * @i: int value
1537 * @v: pointer to atomic_t
1538 *
1539 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1540 *
1541 * Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
1542 *
1543 * Return: Nothing.
1544 */
9257959a
MR
1545static __always_inline void
1546raw_atomic_andnot(int i, atomic_t *v)
1547{
1d78814d
MR
1548#if defined(arch_atomic_andnot)
1549 arch_atomic_andnot(i, v);
1550#else
9257959a 1551 raw_atomic_and(~i, v);
9257959a 1552#endif
1d78814d 1553}
9257959a 1554
ad811070
MR
1555/**
1556 * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
1557 * @i: int value
1558 * @v: pointer to atomic_t
1559 *
1560 * Atomically updates @v to (@v & ~@i) with full ordering.
1561 *
1562 * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
1563 *
1564 * Return: The original value of @v.
1565 */
37f8173d 1566static __always_inline int
9257959a 1567raw_atomic_fetch_andnot(int i, atomic_t *v)
37f8173d 1568{
1d78814d
MR
1569#if defined(arch_atomic_fetch_andnot)
1570 return arch_atomic_fetch_andnot(i, v);
1571#elif defined(arch_atomic_fetch_andnot_relaxed)
37f8173d
PZ
1572 int ret;
1573 __atomic_pre_full_fence();
9257959a 1574 ret = arch_atomic_fetch_andnot_relaxed(i, v);
37f8173d
PZ
1575 __atomic_post_full_fence();
1576 return ret;
9257959a 1577#else
9257959a 1578 return raw_atomic_fetch_and(~i, v);
37f8173d 1579#endif
1d78814d 1580}
37f8173d 1581
ad811070
MR
1582/**
1583 * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
1584 * @i: int value
1585 * @v: pointer to atomic_t
1586 *
1587 * Atomically updates @v to (@v & ~@i) with acquire ordering.
1588 *
1589 * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
1590 *
1591 * Return: The original value of @v.
1592 */
9257959a
MR
1593static __always_inline int
1594raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
1595{
1d78814d
MR
1596#if defined(arch_atomic_fetch_andnot_acquire)
1597 return arch_atomic_fetch_andnot_acquire(i, v);
1598#elif defined(arch_atomic_fetch_andnot_relaxed)
9257959a
MR
1599 int ret = arch_atomic_fetch_andnot_relaxed(i, v);
1600 __atomic_acquire_fence();
1601 return ret;
9257959a 1602#elif defined(arch_atomic_fetch_andnot)
1d78814d 1603 return arch_atomic_fetch_andnot(i, v);
9257959a 1604#else
9257959a 1605 return raw_atomic_fetch_and_acquire(~i, v);
37f8173d 1606#endif
1d78814d 1607}
37f8173d 1608
ad811070
MR
1609/**
1610 * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
1611 * @i: int value
1612 * @v: pointer to atomic_t
1613 *
1614 * Atomically updates @v to (@v & ~@i) with release ordering.
1615 *
1616 * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
1617 *
1618 * Return: The original value of @v.
1619 */
37f8173d 1620static __always_inline int
9257959a 1621raw_atomic_fetch_andnot_release(int i, atomic_t *v)
37f8173d 1622{
1d78814d
MR
1623#if defined(arch_atomic_fetch_andnot_release)
1624 return arch_atomic_fetch_andnot_release(i, v);
1625#elif defined(arch_atomic_fetch_andnot_relaxed)
9257959a
MR
1626 __atomic_release_fence();
1627 return arch_atomic_fetch_andnot_relaxed(i, v);
9257959a 1628#elif defined(arch_atomic_fetch_andnot)
1d78814d 1629 return arch_atomic_fetch_andnot(i, v);
9257959a 1630#else
9257959a 1631 return raw_atomic_fetch_and_release(~i, v);
37f8173d 1632#endif
1d78814d 1633}
37f8173d 1634
ad811070
MR
1635/**
1636 * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
1637 * @i: int value
1638 * @v: pointer to atomic_t
1639 *
1640 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1641 *
1642 * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
1643 *
1644 * Return: The original value of @v.
1645 */
37f8173d 1646static __always_inline int
9257959a 1647raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
37f8173d 1648{
1d78814d
MR
1649#if defined(arch_atomic_fetch_andnot_relaxed)
1650 return arch_atomic_fetch_andnot_relaxed(i, v);
1651#elif defined(arch_atomic_fetch_andnot)
1652 return arch_atomic_fetch_andnot(i, v);
1653#else
9257959a 1654 return raw_atomic_fetch_and_relaxed(~i, v);
37f8173d 1655#endif
1d78814d 1656}
37f8173d 1657
ad811070
MR
1658/**
1659 * raw_atomic_or() - atomic bitwise OR with relaxed ordering
1660 * @i: int value
1661 * @v: pointer to atomic_t
1662 *
1663 * Atomically updates @v to (@v | @i) with relaxed ordering.
1664 *
1665 * Safe to use in noinstr code; prefer atomic_or() elsewhere.
1666 *
1667 * Return: Nothing.
1668 */
1d78814d
MR
1669static __always_inline void
1670raw_atomic_or(int i, atomic_t *v)
1671{
1672 arch_atomic_or(i, v);
1673}
37f8173d 1674
ad811070
MR
1675/**
1676 * raw_atomic_fetch_or() - atomic bitwise OR with full ordering
1677 * @i: int value
1678 * @v: pointer to atomic_t
1679 *
1680 * Atomically updates @v to (@v | @i) with full ordering.
1681 *
1682 * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
1683 *
1684 * Return: The original value of @v.
1685 */
37f8173d 1686static __always_inline int
9257959a 1687raw_atomic_fetch_or(int i, atomic_t *v)
37f8173d 1688{
1d78814d
MR
1689#if defined(arch_atomic_fetch_or)
1690 return arch_atomic_fetch_or(i, v);
1691#elif defined(arch_atomic_fetch_or_relaxed)
9257959a
MR
1692 int ret;
1693 __atomic_pre_full_fence();
1694 ret = arch_atomic_fetch_or_relaxed(i, v);
1695 __atomic_post_full_fence();
37f8173d 1696 return ret;
9257959a
MR
1697#else
1698#error "Unable to define raw_atomic_fetch_or"
37f8173d 1699#endif
1d78814d 1700}
37f8173d 1701
ad811070
MR
1702/**
1703 * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
1704 * @i: int value
1705 * @v: pointer to atomic_t
1706 *
1707 * Atomically updates @v to (@v | @i) with acquire ordering.
1708 *
1709 * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
1710 *
1711 * Return: The original value of @v.
1712 */
37f8173d 1713static __always_inline int
9257959a 1714raw_atomic_fetch_or_acquire(int i, atomic_t *v)
37f8173d 1715{
1d78814d
MR
1716#if defined(arch_atomic_fetch_or_acquire)
1717 return arch_atomic_fetch_or_acquire(i, v);
1718#elif defined(arch_atomic_fetch_or_relaxed)
37f8173d
PZ
1719 int ret = arch_atomic_fetch_or_relaxed(i, v);
1720 __atomic_acquire_fence();
1721 return ret;
9257959a 1722#elif defined(arch_atomic_fetch_or)
1d78814d 1723 return arch_atomic_fetch_or(i, v);
9257959a
MR
1724#else
1725#error "Unable to define raw_atomic_fetch_or_acquire"
37f8173d 1726#endif
1d78814d 1727}
37f8173d 1728
ad811070
MR
1729/**
1730 * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
1731 * @i: int value
1732 * @v: pointer to atomic_t
1733 *
1734 * Atomically updates @v to (@v | @i) with release ordering.
1735 *
1736 * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
1737 *
1738 * Return: The original value of @v.
1739 */
37f8173d 1740static __always_inline int
9257959a 1741raw_atomic_fetch_or_release(int i, atomic_t *v)
37f8173d 1742{
1d78814d
MR
1743#if defined(arch_atomic_fetch_or_release)
1744 return arch_atomic_fetch_or_release(i, v);
1745#elif defined(arch_atomic_fetch_or_relaxed)
37f8173d
PZ
1746 __atomic_release_fence();
1747 return arch_atomic_fetch_or_relaxed(i, v);
9257959a 1748#elif defined(arch_atomic_fetch_or)
1d78814d 1749 return arch_atomic_fetch_or(i, v);
9257959a
MR
1750#else
1751#error "Unable to define raw_atomic_fetch_or_release"
37f8173d 1752#endif
1d78814d 1753}
37f8173d 1754
ad811070
MR
1755/**
1756 * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
1757 * @i: int value
1758 * @v: pointer to atomic_t
1759 *
1760 * Atomically updates @v to (@v | @i) with relaxed ordering.
1761 *
1762 * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
1763 *
1764 * Return: The original value of @v.
1765 */
1d78814d
MR
1766static __always_inline int
1767raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
1768{
9257959a 1769#if defined(arch_atomic_fetch_or_relaxed)
1d78814d 1770 return arch_atomic_fetch_or_relaxed(i, v);
9257959a 1771#elif defined(arch_atomic_fetch_or)
1d78814d 1772 return arch_atomic_fetch_or(i, v);
9257959a
MR
1773#else
1774#error "Unable to define raw_atomic_fetch_or_relaxed"
1775#endif
1d78814d 1776}
9257959a 1777
ad811070
MR
1778/**
1779 * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
1780 * @i: int value
1781 * @v: pointer to atomic_t
1782 *
1783 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1784 *
1785 * Safe to use in noinstr code; prefer atomic_xor() elsewhere.
1786 *
1787 * Return: Nothing.
1788 */
1d78814d
MR
1789static __always_inline void
1790raw_atomic_xor(int i, atomic_t *v)
1791{
1792 arch_atomic_xor(i, v);
1793}
9257959a 1794
ad811070
MR
1795/**
1796 * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
1797 * @i: int value
1798 * @v: pointer to atomic_t
1799 *
1800 * Atomically updates @v to (@v ^ @i) with full ordering.
1801 *
1802 * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
1803 *
1804 * Return: The original value of @v.
1805 */
37f8173d 1806static __always_inline int
9257959a 1807raw_atomic_fetch_xor(int i, atomic_t *v)
37f8173d 1808{
1d78814d
MR
1809#if defined(arch_atomic_fetch_xor)
1810 return arch_atomic_fetch_xor(i, v);
1811#elif defined(arch_atomic_fetch_xor_relaxed)
37f8173d
PZ
1812 int ret;
1813 __atomic_pre_full_fence();
9257959a 1814 ret = arch_atomic_fetch_xor_relaxed(i, v);
37f8173d
PZ
1815 __atomic_post_full_fence();
1816 return ret;
9257959a
MR
1817#else
1818#error "Unable to define raw_atomic_fetch_xor"
37f8173d 1819#endif
1d78814d 1820}
37f8173d 1821
ad811070
MR
1822/**
1823 * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
1824 * @i: int value
1825 * @v: pointer to atomic_t
1826 *
1827 * Atomically updates @v to (@v ^ @i) with acquire ordering.
1828 *
1829 * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
1830 *
1831 * Return: The original value of @v.
1832 */
37f8173d 1833static __always_inline int
9257959a 1834raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
37f8173d 1835{
1d78814d
MR
1836#if defined(arch_atomic_fetch_xor_acquire)
1837 return arch_atomic_fetch_xor_acquire(i, v);
1838#elif defined(arch_atomic_fetch_xor_relaxed)
37f8173d
PZ
1839 int ret = arch_atomic_fetch_xor_relaxed(i, v);
1840 __atomic_acquire_fence();
1841 return ret;
9257959a 1842#elif defined(arch_atomic_fetch_xor)
1d78814d 1843 return arch_atomic_fetch_xor(i, v);
9257959a
MR
1844#else
1845#error "Unable to define raw_atomic_fetch_xor_acquire"
37f8173d 1846#endif
1d78814d 1847}
37f8173d 1848
ad811070
MR
1849/**
1850 * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
1851 * @i: int value
1852 * @v: pointer to atomic_t
1853 *
1854 * Atomically updates @v to (@v ^ @i) with release ordering.
1855 *
1856 * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
1857 *
1858 * Return: The original value of @v.
1859 */
37f8173d 1860static __always_inline int
9257959a 1861raw_atomic_fetch_xor_release(int i, atomic_t *v)
37f8173d 1862{
1d78814d
MR
1863#if defined(arch_atomic_fetch_xor_release)
1864 return arch_atomic_fetch_xor_release(i, v);
1865#elif defined(arch_atomic_fetch_xor_relaxed)
37f8173d
PZ
1866 __atomic_release_fence();
1867 return arch_atomic_fetch_xor_relaxed(i, v);
9257959a 1868#elif defined(arch_atomic_fetch_xor)
1d78814d 1869 return arch_atomic_fetch_xor(i, v);
9257959a
MR
1870#else
1871#error "Unable to define raw_atomic_fetch_xor_release"
37f8173d 1872#endif
1d78814d 1873}
37f8173d 1874
ad811070
MR
1875/**
1876 * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
1877 * @i: int value
1878 * @v: pointer to atomic_t
1879 *
1880 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1881 *
1882 * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
1883 *
1884 * Return: The original value of @v.
1885 */
1d78814d
MR
1886static __always_inline int
1887raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
1888{
9257959a 1889#if defined(arch_atomic_fetch_xor_relaxed)
1d78814d 1890 return arch_atomic_fetch_xor_relaxed(i, v);
9257959a 1891#elif defined(arch_atomic_fetch_xor)
1d78814d 1892 return arch_atomic_fetch_xor(i, v);
9257959a
MR
1893#else
1894#error "Unable to define raw_atomic_fetch_xor_relaxed"
1895#endif
1d78814d 1896}
9257959a 1897
ad811070
MR
1898/**
1899 * raw_atomic_xchg() - atomic exchange with full ordering
1900 * @v: pointer to atomic_t
1901 * @new: int value to assign
1902 *
1903 * Atomically updates @v to @new with full ordering.
1904 *
1905 * Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
1906 *
1907 * Return: The original value of @v.
1908 */
37f8173d 1909static __always_inline int
1d78814d 1910raw_atomic_xchg(atomic_t *v, int new)
37f8173d 1911{
1d78814d
MR
1912#if defined(arch_atomic_xchg)
1913 return arch_atomic_xchg(v, new);
1914#elif defined(arch_atomic_xchg_relaxed)
37f8173d
PZ
1915 int ret;
1916 __atomic_pre_full_fence();
1d78814d 1917 ret = arch_atomic_xchg_relaxed(v, new);
37f8173d
PZ
1918 __atomic_post_full_fence();
1919 return ret;
9257959a 1920#else
9257959a 1921 return raw_xchg(&v->counter, new);
d12157ef 1922#endif
1d78814d 1923}
d12157ef 1924
ad811070
MR
1925/**
1926 * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
1927 * @v: pointer to atomic_t
1928 * @new: int value to assign
1929 *
1930 * Atomically updates @v to @new with acquire ordering.
1931 *
1932 * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
1933 *
1934 * Return: The original value of @v.
1935 */
d12157ef 1936static __always_inline int
1d78814d 1937raw_atomic_xchg_acquire(atomic_t *v, int new)
d12157ef 1938{
1d78814d
MR
1939#if defined(arch_atomic_xchg_acquire)
1940 return arch_atomic_xchg_acquire(v, new);
1941#elif defined(arch_atomic_xchg_relaxed)
1942 int ret = arch_atomic_xchg_relaxed(v, new);
9257959a
MR
1943 __atomic_acquire_fence();
1944 return ret;
9257959a 1945#elif defined(arch_atomic_xchg)
1d78814d 1946 return arch_atomic_xchg(v, new);
9257959a 1947#else
9257959a 1948 return raw_xchg_acquire(&v->counter, new);
d12157ef 1949#endif
1d78814d 1950}
d12157ef 1951
ad811070
MR
1952/**
1953 * raw_atomic_xchg_release() - atomic exchange with release ordering
1954 * @v: pointer to atomic_t
1955 * @new: int value to assign
1956 *
1957 * Atomically updates @v to @new with release ordering.
1958 *
1959 * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
1960 *
1961 * Return: The original value of @v.
1962 */
d12157ef 1963static __always_inline int
1d78814d 1964raw_atomic_xchg_release(atomic_t *v, int new)
d12157ef 1965{
1d78814d
MR
1966#if defined(arch_atomic_xchg_release)
1967 return arch_atomic_xchg_release(v, new);
1968#elif defined(arch_atomic_xchg_relaxed)
9257959a 1969 __atomic_release_fence();
1d78814d 1970 return arch_atomic_xchg_relaxed(v, new);
9257959a 1971#elif defined(arch_atomic_xchg)
1d78814d 1972 return arch_atomic_xchg(v, new);
9257959a 1973#else
9257959a 1974 return raw_xchg_release(&v->counter, new);
37f8173d 1975#endif
1d78814d 1976}
37f8173d 1977
ad811070
MR
1978/**
1979 * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
1980 * @v: pointer to atomic_t
1981 * @new: int value to assign
1982 *
1983 * Atomically updates @v to @new with relaxed ordering.
1984 *
1985 * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
1986 *
1987 * Return: The original value of @v.
1988 */
37f8173d 1989static __always_inline int
9257959a 1990raw_atomic_xchg_relaxed(atomic_t *v, int new)
37f8173d 1991{
1d78814d
MR
1992#if defined(arch_atomic_xchg_relaxed)
1993 return arch_atomic_xchg_relaxed(v, new);
1994#elif defined(arch_atomic_xchg)
1995 return arch_atomic_xchg(v, new);
1996#else
9257959a 1997 return raw_xchg_relaxed(&v->counter, new);
37f8173d 1998#endif
1d78814d 1999}
37f8173d 2000
ad811070
MR
2001/**
2002 * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
2003 * @v: pointer to atomic_t
2004 * @old: int value to compare with
2005 * @new: int value to assign
2006 *
2007 * If (@v == @old), atomically updates @v to @new with full ordering.
6dfee110 2008 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
2009 *
2010 * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
2011 *
2012 * Return: The original value of @v.
2013 */
37f8173d 2014static __always_inline int
9257959a 2015raw_atomic_cmpxchg(atomic_t *v, int old, int new)
37f8173d 2016{
1d78814d
MR
2017#if defined(arch_atomic_cmpxchg)
2018 return arch_atomic_cmpxchg(v, old, new);
2019#elif defined(arch_atomic_cmpxchg_relaxed)
37f8173d
PZ
2020 int ret;
2021 __atomic_pre_full_fence();
9257959a 2022 ret = arch_atomic_cmpxchg_relaxed(v, old, new);
37f8173d
PZ
2023 __atomic_post_full_fence();
2024 return ret;
9257959a 2025#else
9257959a 2026 return raw_cmpxchg(&v->counter, old, new);
d12157ef 2027#endif
1d78814d 2028}
d12157ef 2029
ad811070
MR
2030/**
2031 * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2032 * @v: pointer to atomic_t
2033 * @old: int value to compare with
2034 * @new: int value to assign
2035 *
2036 * If (@v == @old), atomically updates @v to @new with acquire ordering.
6dfee110 2037 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
2038 *
2039 * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
2040 *
2041 * Return: The original value of @v.
2042 */
d12157ef 2043static __always_inline int
9257959a 2044raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
d12157ef 2045{
1d78814d
MR
2046#if defined(arch_atomic_cmpxchg_acquire)
2047 return arch_atomic_cmpxchg_acquire(v, old, new);
2048#elif defined(arch_atomic_cmpxchg_relaxed)
9257959a
MR
2049 int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2050 __atomic_acquire_fence();
2051 return ret;
9257959a 2052#elif defined(arch_atomic_cmpxchg)
1d78814d 2053 return arch_atomic_cmpxchg(v, old, new);
9257959a 2054#else
9257959a 2055 return raw_cmpxchg_acquire(&v->counter, old, new);
d12157ef 2056#endif
1d78814d 2057}
d12157ef 2058
ad811070
MR
2059/**
2060 * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
2061 * @v: pointer to atomic_t
2062 * @old: int value to compare with
2063 * @new: int value to assign
2064 *
2065 * If (@v == @old), atomically updates @v to @new with release ordering.
6dfee110 2066 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
2067 *
2068 * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
2069 *
2070 * Return: The original value of @v.
2071 */
d12157ef 2072static __always_inline int
9257959a 2073raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
d12157ef 2074{
1d78814d
MR
2075#if defined(arch_atomic_cmpxchg_release)
2076 return arch_atomic_cmpxchg_release(v, old, new);
2077#elif defined(arch_atomic_cmpxchg_relaxed)
9257959a
MR
2078 __atomic_release_fence();
2079 return arch_atomic_cmpxchg_relaxed(v, old, new);
9257959a 2080#elif defined(arch_atomic_cmpxchg)
1d78814d 2081 return arch_atomic_cmpxchg(v, old, new);
9257959a 2082#else
9257959a 2083 return raw_cmpxchg_release(&v->counter, old, new);
37f8173d 2084#endif
1d78814d 2085}
37f8173d 2086
ad811070
MR
2087/**
2088 * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2089 * @v: pointer to atomic_t
2090 * @old: int value to compare with
2091 * @new: int value to assign
2092 *
2093 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
6dfee110 2094 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
2095 *
2096 * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
2097 *
2098 * Return: The original value of @v.
2099 */
37f8173d 2100static __always_inline int
9257959a 2101raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
37f8173d 2102{
1d78814d
MR
2103#if defined(arch_atomic_cmpxchg_relaxed)
2104 return arch_atomic_cmpxchg_relaxed(v, old, new);
2105#elif defined(arch_atomic_cmpxchg)
2106 return arch_atomic_cmpxchg(v, old, new);
2107#else
9257959a 2108 return raw_cmpxchg_relaxed(&v->counter, old, new);
37f8173d 2109#endif
1d78814d 2110}
37f8173d 2111
ad811070
MR
2112/**
2113 * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
2114 * @v: pointer to atomic_t
2115 * @old: pointer to int value to compare with
2116 * @new: int value to assign
2117 *
2118 * If (@v == @old), atomically updates @v to @new with full ordering.
6dfee110
MR
2119 * Otherwise, @v is not modified, @old is updated to the current value of @v,
2120 * and relaxed ordering is provided.
ad811070
MR
2121 *
2122 * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
2123 *
2124 * Return: @true if the exchange occured, @false otherwise.
2125 */
9257959a
MR
2126static __always_inline bool
2127raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
37f8173d 2128{
1d78814d
MR
2129#if defined(arch_atomic_try_cmpxchg)
2130 return arch_atomic_try_cmpxchg(v, old, new);
2131#elif defined(arch_atomic_try_cmpxchg_relaxed)
9257959a 2132 bool ret;
37f8173d 2133 __atomic_pre_full_fence();
9257959a 2134 ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
37f8173d
PZ
2135 __atomic_post_full_fence();
2136 return ret;
9257959a 2137#else
37f8173d 2138 int r, o = *old;
9257959a 2139 r = raw_atomic_cmpxchg(v, o, new);
37f8173d
PZ
2140 if (unlikely(r != o))
2141 *old = r;
2142 return likely(r == o);
37f8173d 2143#endif
1d78814d 2144}
37f8173d 2145
ad811070
MR
2146/**
2147 * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2148 * @v: pointer to atomic_t
2149 * @old: pointer to int value to compare with
2150 * @new: int value to assign
2151 *
2152 * If (@v == @old), atomically updates @v to @new with acquire ordering.
6dfee110
MR
2153 * Otherwise, @v is not modified, @old is updated to the current value of @v,
2154 * and relaxed ordering is provided.
ad811070
MR
2155 *
2156 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
2157 *
2158 * Return: @true if the exchange occured, @false otherwise.
2159 */
9257959a
MR
2160static __always_inline bool
2161raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
2162{
1d78814d
MR
2163#if defined(arch_atomic_try_cmpxchg_acquire)
2164 return arch_atomic_try_cmpxchg_acquire(v, old, new);
2165#elif defined(arch_atomic_try_cmpxchg_relaxed)
9257959a
MR
2166 bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2167 __atomic_acquire_fence();
2168 return ret;
9257959a 2169#elif defined(arch_atomic_try_cmpxchg)
1d78814d 2170 return arch_atomic_try_cmpxchg(v, old, new);
9257959a 2171#else
37f8173d 2172 int r, o = *old;
9257959a 2173 r = raw_atomic_cmpxchg_acquire(v, o, new);
37f8173d
PZ
2174 if (unlikely(r != o))
2175 *old = r;
2176 return likely(r == o);
37f8173d 2177#endif
1d78814d 2178}
37f8173d 2179
ad811070
MR
2180/**
2181 * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
2182 * @v: pointer to atomic_t
2183 * @old: pointer to int value to compare with
2184 * @new: int value to assign
2185 *
2186 * If (@v == @old), atomically updates @v to @new with release ordering.
6dfee110
MR
2187 * Otherwise, @v is not modified, @old is updated to the current value of @v,
2188 * and relaxed ordering is provided.
ad811070
MR
2189 *
2190 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
2191 *
2192 * Return: @true if the exchange occured, @false otherwise.
2193 */
37f8173d 2194static __always_inline bool
9257959a
MR
2195raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
2196{
1d78814d
MR
2197#if defined(arch_atomic_try_cmpxchg_release)
2198 return arch_atomic_try_cmpxchg_release(v, old, new);
2199#elif defined(arch_atomic_try_cmpxchg_relaxed)
9257959a
MR
2200 __atomic_release_fence();
2201 return arch_atomic_try_cmpxchg_relaxed(v, old, new);
9257959a 2202#elif defined(arch_atomic_try_cmpxchg)
1d78814d 2203 return arch_atomic_try_cmpxchg(v, old, new);
9257959a 2204#else
37f8173d 2205 int r, o = *old;
9257959a 2206 r = raw_atomic_cmpxchg_release(v, o, new);
37f8173d
PZ
2207 if (unlikely(r != o))
2208 *old = r;
2209 return likely(r == o);
37f8173d 2210#endif
1d78814d 2211}
37f8173d 2212
ad811070
MR
2213/**
2214 * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2215 * @v: pointer to atomic_t
2216 * @old: pointer to int value to compare with
2217 * @new: int value to assign
2218 *
2219 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
6dfee110
MR
2220 * Otherwise, @v is not modified, @old is updated to the current value of @v,
2221 * and relaxed ordering is provided.
ad811070
MR
2222 *
2223 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
2224 *
2225 * Return: @true if the exchange occured, @false otherwise.
2226 */
37f8173d 2227static __always_inline bool
9257959a 2228raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
37f8173d 2229{
1d78814d
MR
2230#if defined(arch_atomic_try_cmpxchg_relaxed)
2231 return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2232#elif defined(arch_atomic_try_cmpxchg)
2233 return arch_atomic_try_cmpxchg(v, old, new);
2234#else
37f8173d 2235 int r, o = *old;
9257959a 2236 r = raw_atomic_cmpxchg_relaxed(v, o, new);
37f8173d
PZ
2237 if (unlikely(r != o))
2238 *old = r;
2239 return likely(r == o);
37f8173d 2240#endif
1d78814d 2241}
37f8173d 2242
ad811070
MR
2243/**
2244 * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
2245 * @i: int value to add
2246 * @v: pointer to atomic_t
2247 *
2248 * Atomically updates @v to (@v - @i) with full ordering.
2249 *
2250 * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
2251 *
2252 * Return: @true if the resulting value of @v is zero, @false otherwise.
2253 */
37f8173d 2254static __always_inline bool
9257959a 2255raw_atomic_sub_and_test(int i, atomic_t *v)
37f8173d 2256{
1d78814d
MR
2257#if defined(arch_atomic_sub_and_test)
2258 return arch_atomic_sub_and_test(i, v);
2259#else
9257959a 2260 return raw_atomic_sub_return(i, v) == 0;
37f8173d 2261#endif
1d78814d 2262}
37f8173d 2263
ad811070
MR
2264/**
2265 * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
2266 * @v: pointer to atomic_t
2267 *
2268 * Atomically updates @v to (@v - 1) with full ordering.
2269 *
2270 * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
2271 *
2272 * Return: @true if the resulting value of @v is zero, @false otherwise.
2273 */
37f8173d 2274static __always_inline bool
9257959a 2275raw_atomic_dec_and_test(atomic_t *v)
37f8173d 2276{
1d78814d
MR
2277#if defined(arch_atomic_dec_and_test)
2278 return arch_atomic_dec_and_test(v);
2279#else
9257959a 2280 return raw_atomic_dec_return(v) == 0;
37f8173d 2281#endif
1d78814d 2282}
37f8173d 2283
ad811070
MR
2284/**
2285 * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
2286 * @v: pointer to atomic_t
2287 *
2288 * Atomically updates @v to (@v + 1) with full ordering.
2289 *
2290 * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
2291 *
2292 * Return: @true if the resulting value of @v is zero, @false otherwise.
2293 */
37f8173d 2294static __always_inline bool
9257959a 2295raw_atomic_inc_and_test(atomic_t *v)
37f8173d 2296{
1d78814d
MR
2297#if defined(arch_atomic_inc_and_test)
2298 return arch_atomic_inc_and_test(v);
2299#else
9257959a 2300 return raw_atomic_inc_return(v) == 0;
37f8173d 2301#endif
1d78814d 2302}
37f8173d 2303
ad811070
MR
2304/**
2305 * raw_atomic_add_negative() - atomic add and test if negative with full ordering
2306 * @i: int value to add
2307 * @v: pointer to atomic_t
2308 *
2309 * Atomically updates @v to (@v + @i) with full ordering.
2310 *
2311 * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
2312 *
2313 * Return: @true if the resulting value of @v is negative, @false otherwise.
2314 */
37f8173d 2315static __always_inline bool
9257959a 2316raw_atomic_add_negative(int i, atomic_t *v)
37f8173d 2317{
1d78814d
MR
2318#if defined(arch_atomic_add_negative)
2319 return arch_atomic_add_negative(i, v);
2320#elif defined(arch_atomic_add_negative_relaxed)
9257959a
MR
2321 bool ret;
2322 __atomic_pre_full_fence();
2323 ret = arch_atomic_add_negative_relaxed(i, v);
2324 __atomic_post_full_fence();
2325 return ret;
9257959a 2326#else
9257959a 2327 return raw_atomic_add_return(i, v) < 0;
e5ab9eff 2328#endif
1d78814d 2329}
e5ab9eff 2330
ad811070
MR
2331/**
2332 * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
2333 * @i: int value to add
2334 * @v: pointer to atomic_t
2335 *
2336 * Atomically updates @v to (@v + @i) with acquire ordering.
2337 *
2338 * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
2339 *
2340 * Return: @true if the resulting value of @v is negative, @false otherwise.
2341 */
e5ab9eff 2342static __always_inline bool
9257959a 2343raw_atomic_add_negative_acquire(int i, atomic_t *v)
e5ab9eff 2344{
1d78814d
MR
2345#if defined(arch_atomic_add_negative_acquire)
2346 return arch_atomic_add_negative_acquire(i, v);
2347#elif defined(arch_atomic_add_negative_relaxed)
9257959a
MR
2348 bool ret = arch_atomic_add_negative_relaxed(i, v);
2349 __atomic_acquire_fence();
2350 return ret;
9257959a 2351#elif defined(arch_atomic_add_negative)
1d78814d 2352 return arch_atomic_add_negative(i, v);
9257959a 2353#else
9257959a 2354 return raw_atomic_add_return_acquire(i, v) < 0;
e5ab9eff 2355#endif
1d78814d 2356}
e5ab9eff 2357
ad811070
MR
2358/**
2359 * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
2360 * @i: int value to add
2361 * @v: pointer to atomic_t
2362 *
2363 * Atomically updates @v to (@v + @i) with release ordering.
2364 *
2365 * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
2366 *
2367 * Return: @true if the resulting value of @v is negative, @false otherwise.
2368 */
e5ab9eff 2369static __always_inline bool
9257959a 2370raw_atomic_add_negative_release(int i, atomic_t *v)
e5ab9eff 2371{
1d78814d
MR
2372#if defined(arch_atomic_add_negative_release)
2373 return arch_atomic_add_negative_release(i, v);
2374#elif defined(arch_atomic_add_negative_relaxed)
9257959a
MR
2375 __atomic_release_fence();
2376 return arch_atomic_add_negative_relaxed(i, v);
9257959a 2377#elif defined(arch_atomic_add_negative)
1d78814d 2378 return arch_atomic_add_negative(i, v);
9257959a 2379#else
9257959a 2380 return raw_atomic_add_return_release(i, v) < 0;
e5ab9eff 2381#endif
1d78814d 2382}
e5ab9eff 2383
ad811070
MR
2384/**
2385 * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
2386 * @i: int value to add
2387 * @v: pointer to atomic_t
2388 *
2389 * Atomically updates @v to (@v + @i) with relaxed ordering.
2390 *
2391 * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
2392 *
2393 * Return: @true if the resulting value of @v is negative, @false otherwise.
2394 */
e5ab9eff 2395static __always_inline bool
9257959a 2396raw_atomic_add_negative_relaxed(int i, atomic_t *v)
e5ab9eff 2397{
1d78814d
MR
2398#if defined(arch_atomic_add_negative_relaxed)
2399 return arch_atomic_add_negative_relaxed(i, v);
2400#elif defined(arch_atomic_add_negative)
2401 return arch_atomic_add_negative(i, v);
2402#else
9257959a 2403 return raw_atomic_add_return_relaxed(i, v) < 0;
e5ab9eff 2404#endif
1d78814d 2405}
e5ab9eff 2406
ad811070
MR
2407/**
2408 * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
2409 * @v: pointer to atomic_t
2410 * @a: int value to add
2411 * @u: int value to compare with
2412 *
2413 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
6dfee110 2414 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
2415 *
2416 * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
2417 *
2418 * Return: The original value of @v.
2419 */
37f8173d 2420static __always_inline int
9257959a 2421raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
37f8173d 2422{
1d78814d
MR
2423#if defined(arch_atomic_fetch_add_unless)
2424 return arch_atomic_fetch_add_unless(v, a, u);
2425#else
9257959a 2426 int c = raw_atomic_read(v);
37f8173d
PZ
2427
2428 do {
2429 if (unlikely(c == u))
2430 break;
9257959a 2431 } while (!raw_atomic_try_cmpxchg(v, &c, c + a));
37f8173d
PZ
2432
2433 return c;
37f8173d 2434#endif
1d78814d 2435}
37f8173d 2436
ad811070
MR
2437/**
2438 * raw_atomic_add_unless() - atomic add unless value with full ordering
2439 * @v: pointer to atomic_t
2440 * @a: int value to add
2441 * @u: int value to compare with
2442 *
2443 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
6dfee110 2444 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
2445 *
2446 * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
2447 *
2448 * Return: @true if @v was updated, @false otherwise.
2449 */
37f8173d 2450static __always_inline bool
9257959a 2451raw_atomic_add_unless(atomic_t *v, int a, int u)
37f8173d 2452{
1d78814d
MR
2453#if defined(arch_atomic_add_unless)
2454 return arch_atomic_add_unless(v, a, u);
2455#else
9257959a 2456 return raw_atomic_fetch_add_unless(v, a, u) != u;
37f8173d 2457#endif
1d78814d 2458}
37f8173d 2459
ad811070
MR
2460/**
2461 * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
2462 * @v: pointer to atomic_t
2463 *
2464 * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
6dfee110 2465 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
2466 *
2467 * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
2468 *
2469 * Return: @true if @v was updated, @false otherwise.
2470 */
37f8173d 2471static __always_inline bool
9257959a 2472raw_atomic_inc_not_zero(atomic_t *v)
37f8173d 2473{
1d78814d
MR
2474#if defined(arch_atomic_inc_not_zero)
2475 return arch_atomic_inc_not_zero(v);
2476#else
9257959a 2477 return raw_atomic_add_unless(v, 1, 0);
37f8173d 2478#endif
1d78814d 2479}
37f8173d 2480
ad811070
MR
2481/**
2482 * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
2483 * @v: pointer to atomic_t
2484 *
2485 * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
6dfee110 2486 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
2487 *
2488 * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
2489 *
2490 * Return: @true if @v was updated, @false otherwise.
2491 */
37f8173d 2492static __always_inline bool
9257959a 2493raw_atomic_inc_unless_negative(atomic_t *v)
37f8173d 2494{
1d78814d
MR
2495#if defined(arch_atomic_inc_unless_negative)
2496 return arch_atomic_inc_unless_negative(v);
2497#else
9257959a 2498 int c = raw_atomic_read(v);
37f8173d
PZ
2499
2500 do {
2501 if (unlikely(c < 0))
2502 return false;
9257959a 2503 } while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
37f8173d
PZ
2504
2505 return true;
37f8173d 2506#endif
1d78814d 2507}
37f8173d 2508
ad811070
MR
2509/**
2510 * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
2511 * @v: pointer to atomic_t
2512 *
2513 * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
6dfee110 2514 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
2515 *
2516 * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
2517 *
2518 * Return: @true if @v was updated, @false otherwise.
2519 */
37f8173d 2520static __always_inline bool
9257959a 2521raw_atomic_dec_unless_positive(atomic_t *v)
37f8173d 2522{
1d78814d
MR
2523#if defined(arch_atomic_dec_unless_positive)
2524 return arch_atomic_dec_unless_positive(v);
2525#else
9257959a 2526 int c = raw_atomic_read(v);
37f8173d
PZ
2527
2528 do {
2529 if (unlikely(c > 0))
2530 return false;
9257959a 2531 } while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
37f8173d
PZ
2532
2533 return true;
37f8173d 2534#endif
1d78814d 2535}
37f8173d 2536
ad811070
MR
2537/**
2538 * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
2539 * @v: pointer to atomic_t
2540 *
2541 * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
6dfee110 2542 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
2543 *
2544 * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
2545 *
b33eb50a 2546 * Return: The old value of (@v - 1), regardless of whether @v was updated.
ad811070 2547 */
37f8173d 2548static __always_inline int
9257959a 2549raw_atomic_dec_if_positive(atomic_t *v)
37f8173d 2550{
1d78814d
MR
2551#if defined(arch_atomic_dec_if_positive)
2552 return arch_atomic_dec_if_positive(v);
2553#else
9257959a 2554 int dec, c = raw_atomic_read(v);
37f8173d
PZ
2555
2556 do {
2557 dec = c - 1;
2558 if (unlikely(dec < 0))
2559 break;
9257959a 2560 } while (!raw_atomic_try_cmpxchg(v, &c, dec));
37f8173d
PZ
2561
2562 return dec;
37f8173d 2563#endif
1d78814d 2564}
37f8173d
PZ
2565
2566#ifdef CONFIG_GENERIC_ATOMIC64
2567#include <asm-generic/atomic64.h>
2568#endif
2569
ad811070
MR
2570/**
2571 * raw_atomic64_read() - atomic load with relaxed ordering
2572 * @v: pointer to atomic64_t
2573 *
2574 * Atomically loads the value of @v with relaxed ordering.
2575 *
2576 * Safe to use in noinstr code; prefer atomic64_read() elsewhere.
2577 *
2578 * Return: The value loaded from @v.
2579 */
1d78814d
MR
2580static __always_inline s64
2581raw_atomic64_read(const atomic64_t *v)
2582{
2583 return arch_atomic64_read(v);
2584}
9257959a 2585
ad811070
MR
2586/**
2587 * raw_atomic64_read_acquire() - atomic load with acquire ordering
2588 * @v: pointer to atomic64_t
2589 *
2590 * Atomically loads the value of @v with acquire ordering.
2591 *
2592 * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
2593 *
2594 * Return: The value loaded from @v.
2595 */
37f8173d 2596static __always_inline s64
9257959a 2597raw_atomic64_read_acquire(const atomic64_t *v)
37f8173d 2598{
1d78814d
MR
2599#if defined(arch_atomic64_read_acquire)
2600 return arch_atomic64_read_acquire(v);
1d78814d 2601#else
dc1b4df0
MR
2602 s64 ret;
2603
2604 if (__native_word(atomic64_t)) {
2605 ret = smp_load_acquire(&(v)->counter);
2606 } else {
9257959a 2607 ret = raw_atomic64_read(v);
dc1b4df0
MR
2608 __atomic_acquire_fence();
2609 }
2610
2611 return ret;
37f8173d 2612#endif
1d78814d 2613}
37f8173d 2614
ad811070
MR
2615/**
2616 * raw_atomic64_set() - atomic set with relaxed ordering
2617 * @v: pointer to atomic64_t
2618 * @i: s64 value to assign
2619 *
2620 * Atomically sets @v to @i with relaxed ordering.
2621 *
2622 * Safe to use in noinstr code; prefer atomic64_set() elsewhere.
2623 *
2624 * Return: Nothing.
2625 */
1d78814d
MR
2626static __always_inline void
2627raw_atomic64_set(atomic64_t *v, s64 i)
2628{
2629 arch_atomic64_set(v, i);
2630}
9257959a 2631
ad811070
MR
2632/**
2633 * raw_atomic64_set_release() - atomic set with release ordering
2634 * @v: pointer to atomic64_t
2635 * @i: s64 value to assign
2636 *
2637 * Atomically sets @v to @i with release ordering.
2638 *
2639 * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
2640 *
2641 * Return: Nothing.
2642 */
37f8173d 2643static __always_inline void
9257959a 2644raw_atomic64_set_release(atomic64_t *v, s64 i)
37f8173d 2645{
1d78814d
MR
2646#if defined(arch_atomic64_set_release)
2647 arch_atomic64_set_release(v, i);
1d78814d 2648#else
dc1b4df0
MR
2649 if (__native_word(atomic64_t)) {
2650 smp_store_release(&(v)->counter, i);
2651 } else {
2652 __atomic_release_fence();
9257959a 2653 raw_atomic64_set(v, i);
dc1b4df0 2654 }
37f8173d 2655#endif
1d78814d 2656}
37f8173d 2657
ad811070
MR
2658/**
2659 * raw_atomic64_add() - atomic add with relaxed ordering
2660 * @i: s64 value to add
2661 * @v: pointer to atomic64_t
2662 *
2663 * Atomically updates @v to (@v + @i) with relaxed ordering.
2664 *
2665 * Safe to use in noinstr code; prefer atomic64_add() elsewhere.
2666 *
2667 * Return: Nothing.
2668 */
1d78814d
MR
2669static __always_inline void
2670raw_atomic64_add(s64 i, atomic64_t *v)
2671{
2672 arch_atomic64_add(i, v);
2673}
9257959a 2674
ad811070
MR
2675/**
2676 * raw_atomic64_add_return() - atomic add with full ordering
2677 * @i: s64 value to add
2678 * @v: pointer to atomic64_t
2679 *
2680 * Atomically updates @v to (@v + @i) with full ordering.
2681 *
2682 * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
2683 *
2684 * Return: The updated value of @v.
2685 */
9257959a
MR
2686static __always_inline s64
2687raw_atomic64_add_return(s64 i, atomic64_t *v)
2688{
1d78814d
MR
2689#if defined(arch_atomic64_add_return)
2690 return arch_atomic64_add_return(i, v);
2691#elif defined(arch_atomic64_add_return_relaxed)
9257959a
MR
2692 s64 ret;
2693 __atomic_pre_full_fence();
2694 ret = arch_atomic64_add_return_relaxed(i, v);
2695 __atomic_post_full_fence();
2696 return ret;
9257959a
MR
2697#else
2698#error "Unable to define raw_atomic64_add_return"
2699#endif
1d78814d 2700}
37f8173d 2701
ad811070
MR
2702/**
2703 * raw_atomic64_add_return_acquire() - atomic add with acquire ordering
2704 * @i: s64 value to add
2705 * @v: pointer to atomic64_t
2706 *
2707 * Atomically updates @v to (@v + @i) with acquire ordering.
2708 *
2709 * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
2710 *
2711 * Return: The updated value of @v.
2712 */
37f8173d 2713static __always_inline s64
9257959a 2714raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
37f8173d 2715{
1d78814d
MR
2716#if defined(arch_atomic64_add_return_acquire)
2717 return arch_atomic64_add_return_acquire(i, v);
2718#elif defined(arch_atomic64_add_return_relaxed)
37f8173d
PZ
2719 s64 ret = arch_atomic64_add_return_relaxed(i, v);
2720 __atomic_acquire_fence();
2721 return ret;
9257959a 2722#elif defined(arch_atomic64_add_return)
1d78814d 2723 return arch_atomic64_add_return(i, v);
9257959a
MR
2724#else
2725#error "Unable to define raw_atomic64_add_return_acquire"
37f8173d 2726#endif
1d78814d 2727}
37f8173d 2728
ad811070
MR
2729/**
2730 * raw_atomic64_add_return_release() - atomic add with release ordering
2731 * @i: s64 value to add
2732 * @v: pointer to atomic64_t
2733 *
2734 * Atomically updates @v to (@v + @i) with release ordering.
2735 *
2736 * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
2737 *
2738 * Return: The updated value of @v.
2739 */
37f8173d 2740static __always_inline s64
9257959a 2741raw_atomic64_add_return_release(s64 i, atomic64_t *v)
37f8173d 2742{
1d78814d
MR
2743#if defined(arch_atomic64_add_return_release)
2744 return arch_atomic64_add_return_release(i, v);
2745#elif defined(arch_atomic64_add_return_relaxed)
37f8173d
PZ
2746 __atomic_release_fence();
2747 return arch_atomic64_add_return_relaxed(i, v);
9257959a 2748#elif defined(arch_atomic64_add_return)
1d78814d 2749 return arch_atomic64_add_return(i, v);
9257959a
MR
2750#else
2751#error "Unable to define raw_atomic64_add_return_release"
2752#endif
1d78814d 2753}
9257959a 2754
ad811070
MR
2755/**
2756 * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
2757 * @i: s64 value to add
2758 * @v: pointer to atomic64_t
2759 *
2760 * Atomically updates @v to (@v + @i) with relaxed ordering.
2761 *
2762 * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
2763 *
2764 * Return: The updated value of @v.
2765 */
1d78814d
MR
2766static __always_inline s64
2767raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
2768{
9257959a 2769#if defined(arch_atomic64_add_return_relaxed)
1d78814d 2770 return arch_atomic64_add_return_relaxed(i, v);
9257959a 2771#elif defined(arch_atomic64_add_return)
1d78814d 2772 return arch_atomic64_add_return(i, v);
9257959a
MR
2773#else
2774#error "Unable to define raw_atomic64_add_return_relaxed"
37f8173d 2775#endif
1d78814d 2776}
37f8173d 2777
ad811070
MR
2778/**
2779 * raw_atomic64_fetch_add() - atomic add with full ordering
2780 * @i: s64 value to add
2781 * @v: pointer to atomic64_t
2782 *
2783 * Atomically updates @v to (@v + @i) with full ordering.
2784 *
2785 * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
2786 *
2787 * Return: The original value of @v.
2788 */
37f8173d 2789static __always_inline s64
9257959a 2790raw_atomic64_fetch_add(s64 i, atomic64_t *v)
37f8173d 2791{
1d78814d
MR
2792#if defined(arch_atomic64_fetch_add)
2793 return arch_atomic64_fetch_add(i, v);
2794#elif defined(arch_atomic64_fetch_add_relaxed)
37f8173d
PZ
2795 s64 ret;
2796 __atomic_pre_full_fence();
9257959a 2797 ret = arch_atomic64_fetch_add_relaxed(i, v);
37f8173d
PZ
2798 __atomic_post_full_fence();
2799 return ret;
9257959a
MR
2800#else
2801#error "Unable to define raw_atomic64_fetch_add"
37f8173d 2802#endif
1d78814d 2803}
37f8173d 2804
ad811070
MR
2805/**
2806 * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
2807 * @i: s64 value to add
2808 * @v: pointer to atomic64_t
2809 *
2810 * Atomically updates @v to (@v + @i) with acquire ordering.
2811 *
2812 * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
2813 *
2814 * Return: The original value of @v.
2815 */
37f8173d 2816static __always_inline s64
9257959a 2817raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
37f8173d 2818{
1d78814d
MR
2819#if defined(arch_atomic64_fetch_add_acquire)
2820 return arch_atomic64_fetch_add_acquire(i, v);
2821#elif defined(arch_atomic64_fetch_add_relaxed)
37f8173d
PZ
2822 s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
2823 __atomic_acquire_fence();
2824 return ret;
9257959a 2825#elif defined(arch_atomic64_fetch_add)
1d78814d 2826 return arch_atomic64_fetch_add(i, v);
9257959a
MR
2827#else
2828#error "Unable to define raw_atomic64_fetch_add_acquire"
37f8173d 2829#endif
1d78814d 2830}
37f8173d 2831
ad811070
MR
2832/**
2833 * raw_atomic64_fetch_add_release() - atomic add with release ordering
2834 * @i: s64 value to add
2835 * @v: pointer to atomic64_t
2836 *
2837 * Atomically updates @v to (@v + @i) with release ordering.
2838 *
2839 * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
2840 *
2841 * Return: The original value of @v.
2842 */
37f8173d 2843static __always_inline s64
9257959a 2844raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
37f8173d 2845{
1d78814d
MR
2846#if defined(arch_atomic64_fetch_add_release)
2847 return arch_atomic64_fetch_add_release(i, v);
2848#elif defined(arch_atomic64_fetch_add_relaxed)
37f8173d
PZ
2849 __atomic_release_fence();
2850 return arch_atomic64_fetch_add_relaxed(i, v);
9257959a 2851#elif defined(arch_atomic64_fetch_add)
1d78814d 2852 return arch_atomic64_fetch_add(i, v);
9257959a
MR
2853#else
2854#error "Unable to define raw_atomic64_fetch_add_release"
2855#endif
1d78814d 2856}
9257959a 2857
ad811070
MR
2858/**
2859 * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
2860 * @i: s64 value to add
2861 * @v: pointer to atomic64_t
2862 *
2863 * Atomically updates @v to (@v + @i) with relaxed ordering.
2864 *
2865 * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
2866 *
2867 * Return: The original value of @v.
2868 */
1d78814d
MR
2869static __always_inline s64
2870raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
2871{
9257959a 2872#if defined(arch_atomic64_fetch_add_relaxed)
1d78814d 2873 return arch_atomic64_fetch_add_relaxed(i, v);
9257959a 2874#elif defined(arch_atomic64_fetch_add)
1d78814d 2875 return arch_atomic64_fetch_add(i, v);
9257959a
MR
2876#else
2877#error "Unable to define raw_atomic64_fetch_add_relaxed"
37f8173d 2878#endif
1d78814d 2879}
37f8173d 2880
ad811070
MR
2881/**
2882 * raw_atomic64_sub() - atomic subtract with relaxed ordering
2883 * @i: s64 value to subtract
2884 * @v: pointer to atomic64_t
2885 *
2886 * Atomically updates @v to (@v - @i) with relaxed ordering.
2887 *
2888 * Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
2889 *
2890 * Return: Nothing.
2891 */
1d78814d
MR
2892static __always_inline void
2893raw_atomic64_sub(s64 i, atomic64_t *v)
2894{
2895 arch_atomic64_sub(i, v);
2896}
9257959a 2897
ad811070
MR
2898/**
2899 * raw_atomic64_sub_return() - atomic subtract with full ordering
2900 * @i: s64 value to subtract
2901 * @v: pointer to atomic64_t
2902 *
2903 * Atomically updates @v to (@v - @i) with full ordering.
2904 *
2905 * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
2906 *
2907 * Return: The updated value of @v.
2908 */
37f8173d 2909static __always_inline s64
9257959a 2910raw_atomic64_sub_return(s64 i, atomic64_t *v)
37f8173d 2911{
1d78814d
MR
2912#if defined(arch_atomic64_sub_return)
2913 return arch_atomic64_sub_return(i, v);
2914#elif defined(arch_atomic64_sub_return_relaxed)
37f8173d
PZ
2915 s64 ret;
2916 __atomic_pre_full_fence();
9257959a 2917 ret = arch_atomic64_sub_return_relaxed(i, v);
37f8173d
PZ
2918 __atomic_post_full_fence();
2919 return ret;
9257959a
MR
2920#else
2921#error "Unable to define raw_atomic64_sub_return"
37f8173d 2922#endif
1d78814d 2923}
37f8173d 2924
ad811070
MR
2925/**
2926 * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
2927 * @i: s64 value to subtract
2928 * @v: pointer to atomic64_t
2929 *
2930 * Atomically updates @v to (@v - @i) with acquire ordering.
2931 *
2932 * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
2933 *
2934 * Return: The updated value of @v.
2935 */
37f8173d 2936static __always_inline s64
9257959a 2937raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
37f8173d 2938{
1d78814d
MR
2939#if defined(arch_atomic64_sub_return_acquire)
2940 return arch_atomic64_sub_return_acquire(i, v);
2941#elif defined(arch_atomic64_sub_return_relaxed)
37f8173d
PZ
2942 s64 ret = arch_atomic64_sub_return_relaxed(i, v);
2943 __atomic_acquire_fence();
2944 return ret;
9257959a 2945#elif defined(arch_atomic64_sub_return)
1d78814d 2946 return arch_atomic64_sub_return(i, v);
9257959a
MR
2947#else
2948#error "Unable to define raw_atomic64_sub_return_acquire"
37f8173d 2949#endif
1d78814d 2950}
37f8173d 2951
ad811070
MR
2952/**
2953 * raw_atomic64_sub_return_release() - atomic subtract with release ordering
2954 * @i: s64 value to subtract
2955 * @v: pointer to atomic64_t
2956 *
2957 * Atomically updates @v to (@v - @i) with release ordering.
2958 *
2959 * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
2960 *
2961 * Return: The updated value of @v.
2962 */
37f8173d 2963static __always_inline s64
9257959a 2964raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
37f8173d 2965{
1d78814d
MR
2966#if defined(arch_atomic64_sub_return_release)
2967 return arch_atomic64_sub_return_release(i, v);
2968#elif defined(arch_atomic64_sub_return_relaxed)
37f8173d
PZ
2969 __atomic_release_fence();
2970 return arch_atomic64_sub_return_relaxed(i, v);
9257959a 2971#elif defined(arch_atomic64_sub_return)
1d78814d 2972 return arch_atomic64_sub_return(i, v);
9257959a
MR
2973#else
2974#error "Unable to define raw_atomic64_sub_return_release"
2975#endif
1d78814d 2976}
9257959a 2977
ad811070
MR
2978/**
2979 * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
2980 * @i: s64 value to subtract
2981 * @v: pointer to atomic64_t
2982 *
2983 * Atomically updates @v to (@v - @i) with relaxed ordering.
2984 *
2985 * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
2986 *
2987 * Return: The updated value of @v.
2988 */
1d78814d
MR
2989static __always_inline s64
2990raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
2991{
9257959a 2992#if defined(arch_atomic64_sub_return_relaxed)
1d78814d 2993 return arch_atomic64_sub_return_relaxed(i, v);
9257959a 2994#elif defined(arch_atomic64_sub_return)
1d78814d 2995 return arch_atomic64_sub_return(i, v);
9257959a
MR
2996#else
2997#error "Unable to define raw_atomic64_sub_return_relaxed"
37f8173d 2998#endif
1d78814d 2999}
37f8173d 3000
ad811070
MR
3001/**
3002 * raw_atomic64_fetch_sub() - atomic subtract with full ordering
3003 * @i: s64 value to subtract
3004 * @v: pointer to atomic64_t
3005 *
3006 * Atomically updates @v to (@v - @i) with full ordering.
3007 *
3008 * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
3009 *
3010 * Return: The original value of @v.
3011 */
37f8173d 3012static __always_inline s64
9257959a 3013raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
37f8173d 3014{
1d78814d
MR
3015#if defined(arch_atomic64_fetch_sub)
3016 return arch_atomic64_fetch_sub(i, v);
3017#elif defined(arch_atomic64_fetch_sub_relaxed)
37f8173d
PZ
3018 s64 ret;
3019 __atomic_pre_full_fence();
9257959a 3020 ret = arch_atomic64_fetch_sub_relaxed(i, v);
37f8173d
PZ
3021 __atomic_post_full_fence();
3022 return ret;
9257959a
MR
3023#else
3024#error "Unable to define raw_atomic64_fetch_sub"
37f8173d 3025#endif
1d78814d 3026}
37f8173d 3027
ad811070
MR
3028/**
3029 * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
3030 * @i: s64 value to subtract
3031 * @v: pointer to atomic64_t
3032 *
3033 * Atomically updates @v to (@v - @i) with acquire ordering.
3034 *
3035 * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
3036 *
3037 * Return: The original value of @v.
3038 */
37f8173d 3039static __always_inline s64
9257959a 3040raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
37f8173d 3041{
1d78814d
MR
3042#if defined(arch_atomic64_fetch_sub_acquire)
3043 return arch_atomic64_fetch_sub_acquire(i, v);
3044#elif defined(arch_atomic64_fetch_sub_relaxed)
37f8173d
PZ
3045 s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
3046 __atomic_acquire_fence();
3047 return ret;
9257959a 3048#elif defined(arch_atomic64_fetch_sub)
1d78814d 3049 return arch_atomic64_fetch_sub(i, v);
9257959a
MR
3050#else
3051#error "Unable to define raw_atomic64_fetch_sub_acquire"
37f8173d 3052#endif
1d78814d 3053}
37f8173d 3054
ad811070
MR
3055/**
3056 * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
3057 * @i: s64 value to subtract
3058 * @v: pointer to atomic64_t
3059 *
3060 * Atomically updates @v to (@v - @i) with release ordering.
3061 *
3062 * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
3063 *
3064 * Return: The original value of @v.
3065 */
37f8173d 3066static __always_inline s64
9257959a 3067raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
37f8173d 3068{
1d78814d
MR
3069#if defined(arch_atomic64_fetch_sub_release)
3070 return arch_atomic64_fetch_sub_release(i, v);
3071#elif defined(arch_atomic64_fetch_sub_relaxed)
37f8173d
PZ
3072 __atomic_release_fence();
3073 return arch_atomic64_fetch_sub_relaxed(i, v);
9257959a 3074#elif defined(arch_atomic64_fetch_sub)
1d78814d 3075 return arch_atomic64_fetch_sub(i, v);
9257959a
MR
3076#else
3077#error "Unable to define raw_atomic64_fetch_sub_release"
37f8173d 3078#endif
1d78814d 3079}
37f8173d 3080
ad811070
MR
3081/**
3082 * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
3083 * @i: s64 value to subtract
3084 * @v: pointer to atomic64_t
3085 *
3086 * Atomically updates @v to (@v - @i) with relaxed ordering.
3087 *
3088 * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
3089 *
3090 * Return: The original value of @v.
3091 */
1d78814d
MR
3092static __always_inline s64
3093raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
3094{
9257959a 3095#if defined(arch_atomic64_fetch_sub_relaxed)
1d78814d 3096 return arch_atomic64_fetch_sub_relaxed(i, v);
9257959a 3097#elif defined(arch_atomic64_fetch_sub)
1d78814d 3098 return arch_atomic64_fetch_sub(i, v);
9257959a
MR
3099#else
3100#error "Unable to define raw_atomic64_fetch_sub_relaxed"
37f8173d 3101#endif
1d78814d 3102}
37f8173d 3103
ad811070
MR
3104/**
3105 * raw_atomic64_inc() - atomic increment with relaxed ordering
3106 * @v: pointer to atomic64_t
3107 *
3108 * Atomically updates @v to (@v + 1) with relaxed ordering.
3109 *
3110 * Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
3111 *
3112 * Return: Nothing.
3113 */
37f8173d 3114static __always_inline void
9257959a 3115raw_atomic64_inc(atomic64_t *v)
37f8173d 3116{
1d78814d
MR
3117#if defined(arch_atomic64_inc)
3118 arch_atomic64_inc(v);
3119#else
9257959a 3120 raw_atomic64_add(1, v);
37f8173d 3121#endif
1d78814d 3122}
37f8173d 3123
ad811070
MR
3124/**
3125 * raw_atomic64_inc_return() - atomic increment with full ordering
3126 * @v: pointer to atomic64_t
3127 *
3128 * Atomically updates @v to (@v + 1) with full ordering.
3129 *
3130 * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
3131 *
3132 * Return: The updated value of @v.
3133 */
37f8173d 3134static __always_inline s64
9257959a 3135raw_atomic64_inc_return(atomic64_t *v)
37f8173d 3136{
1d78814d
MR
3137#if defined(arch_atomic64_inc_return)
3138 return arch_atomic64_inc_return(v);
3139#elif defined(arch_atomic64_inc_return_relaxed)
9257959a
MR
3140 s64 ret;
3141 __atomic_pre_full_fence();
3142 ret = arch_atomic64_inc_return_relaxed(v);
3143 __atomic_post_full_fence();
3144 return ret;
9257959a 3145#else
9257959a 3146 return raw_atomic64_add_return(1, v);
37f8173d 3147#endif
1d78814d 3148}
37f8173d 3149
ad811070
MR
3150/**
3151 * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
3152 * @v: pointer to atomic64_t
3153 *
3154 * Atomically updates @v to (@v + 1) with acquire ordering.
3155 *
3156 * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
3157 *
3158 * Return: The updated value of @v.
3159 */
37f8173d 3160static __always_inline s64
9257959a 3161raw_atomic64_inc_return_acquire(atomic64_t *v)
37f8173d 3162{
1d78814d
MR
3163#if defined(arch_atomic64_inc_return_acquire)
3164 return arch_atomic64_inc_return_acquire(v);
3165#elif defined(arch_atomic64_inc_return_relaxed)
9257959a
MR
3166 s64 ret = arch_atomic64_inc_return_relaxed(v);
3167 __atomic_acquire_fence();
3168 return ret;
9257959a 3169#elif defined(arch_atomic64_inc_return)
1d78814d 3170 return arch_atomic64_inc_return(v);
9257959a 3171#else
9257959a 3172 return raw_atomic64_add_return_acquire(1, v);
37f8173d 3173#endif
1d78814d 3174}
37f8173d 3175
ad811070
MR
3176/**
3177 * raw_atomic64_inc_return_release() - atomic increment with release ordering
3178 * @v: pointer to atomic64_t
3179 *
3180 * Atomically updates @v to (@v + 1) with release ordering.
3181 *
3182 * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
3183 *
3184 * Return: The updated value of @v.
3185 */
37f8173d 3186static __always_inline s64
9257959a 3187raw_atomic64_inc_return_release(atomic64_t *v)
37f8173d 3188{
1d78814d
MR
3189#if defined(arch_atomic64_inc_return_release)
3190 return arch_atomic64_inc_return_release(v);
3191#elif defined(arch_atomic64_inc_return_relaxed)
9257959a
MR
3192 __atomic_release_fence();
3193 return arch_atomic64_inc_return_relaxed(v);
9257959a 3194#elif defined(arch_atomic64_inc_return)
1d78814d 3195 return arch_atomic64_inc_return(v);
9257959a 3196#else
9257959a 3197 return raw_atomic64_add_return_release(1, v);
37f8173d 3198#endif
1d78814d 3199}
37f8173d 3200
ad811070
MR
3201/**
3202 * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
3203 * @v: pointer to atomic64_t
3204 *
3205 * Atomically updates @v to (@v + 1) with relaxed ordering.
3206 *
3207 * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
3208 *
3209 * Return: The updated value of @v.
3210 */
37f8173d 3211static __always_inline s64
9257959a 3212raw_atomic64_inc_return_relaxed(atomic64_t *v)
37f8173d 3213{
1d78814d
MR
3214#if defined(arch_atomic64_inc_return_relaxed)
3215 return arch_atomic64_inc_return_relaxed(v);
3216#elif defined(arch_atomic64_inc_return)
3217 return arch_atomic64_inc_return(v);
3218#else
9257959a 3219 return raw_atomic64_add_return_relaxed(1, v);
37f8173d 3220#endif
1d78814d 3221}
37f8173d 3222
ad811070
MR
3223/**
3224 * raw_atomic64_fetch_inc() - atomic increment with full ordering
3225 * @v: pointer to atomic64_t
3226 *
3227 * Atomically updates @v to (@v + 1) with full ordering.
3228 *
3229 * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
3230 *
3231 * Return: The original value of @v.
3232 */
37f8173d 3233static __always_inline s64
9257959a 3234raw_atomic64_fetch_inc(atomic64_t *v)
37f8173d 3235{
1d78814d
MR
3236#if defined(arch_atomic64_fetch_inc)
3237 return arch_atomic64_fetch_inc(v);
3238#elif defined(arch_atomic64_fetch_inc_relaxed)
37f8173d
PZ
3239 s64 ret;
3240 __atomic_pre_full_fence();
9257959a 3241 ret = arch_atomic64_fetch_inc_relaxed(v);
37f8173d
PZ
3242 __atomic_post_full_fence();
3243 return ret;
9257959a 3244#else
9257959a 3245 return raw_atomic64_fetch_add(1, v);
37f8173d 3246#endif
1d78814d 3247}
37f8173d 3248
ad811070
MR
3249/**
3250 * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
3251 * @v: pointer to atomic64_t
3252 *
3253 * Atomically updates @v to (@v + 1) with acquire ordering.
3254 *
3255 * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
3256 *
3257 * Return: The original value of @v.
3258 */
9257959a
MR
3259static __always_inline s64
3260raw_atomic64_fetch_inc_acquire(atomic64_t *v)
3261{
1d78814d
MR
3262#if defined(arch_atomic64_fetch_inc_acquire)
3263 return arch_atomic64_fetch_inc_acquire(v);
3264#elif defined(arch_atomic64_fetch_inc_relaxed)
9257959a
MR
3265 s64 ret = arch_atomic64_fetch_inc_relaxed(v);
3266 __atomic_acquire_fence();
3267 return ret;
9257959a 3268#elif defined(arch_atomic64_fetch_inc)
1d78814d 3269 return arch_atomic64_fetch_inc(v);
9257959a 3270#else
9257959a 3271 return raw_atomic64_fetch_add_acquire(1, v);
37f8173d 3272#endif
1d78814d 3273}
37f8173d 3274
ad811070
MR
3275/**
3276 * raw_atomic64_fetch_inc_release() - atomic increment with release ordering
3277 * @v: pointer to atomic64_t
3278 *
3279 * Atomically updates @v to (@v + 1) with release ordering.
3280 *
3281 * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
3282 *
3283 * Return: The original value of @v.
3284 */
37f8173d 3285static __always_inline s64
9257959a 3286raw_atomic64_fetch_inc_release(atomic64_t *v)
37f8173d 3287{
1d78814d
MR
3288#if defined(arch_atomic64_fetch_inc_release)
3289 return arch_atomic64_fetch_inc_release(v);
3290#elif defined(arch_atomic64_fetch_inc_relaxed)
9257959a
MR
3291 __atomic_release_fence();
3292 return arch_atomic64_fetch_inc_relaxed(v);
9257959a 3293#elif defined(arch_atomic64_fetch_inc)
1d78814d 3294 return arch_atomic64_fetch_inc(v);
9257959a 3295#else
9257959a 3296 return raw_atomic64_fetch_add_release(1, v);
37f8173d 3297#endif
1d78814d 3298}
37f8173d 3299
ad811070
MR
3300/**
3301 * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
3302 * @v: pointer to atomic64_t
3303 *
3304 * Atomically updates @v to (@v + 1) with relaxed ordering.
3305 *
3306 * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
3307 *
3308 * Return: The original value of @v.
3309 */
37f8173d 3310static __always_inline s64
9257959a 3311raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
37f8173d 3312{
1d78814d
MR
3313#if defined(arch_atomic64_fetch_inc_relaxed)
3314 return arch_atomic64_fetch_inc_relaxed(v);
3315#elif defined(arch_atomic64_fetch_inc)
3316 return arch_atomic64_fetch_inc(v);
3317#else
9257959a 3318 return raw_atomic64_fetch_add_relaxed(1, v);
37f8173d 3319#endif
1d78814d 3320}
37f8173d 3321
ad811070
MR
3322/**
3323 * raw_atomic64_dec() - atomic decrement with relaxed ordering
3324 * @v: pointer to atomic64_t
3325 *
3326 * Atomically updates @v to (@v - 1) with relaxed ordering.
3327 *
3328 * Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
3329 *
3330 * Return: Nothing.
3331 */
9257959a
MR
3332static __always_inline void
3333raw_atomic64_dec(atomic64_t *v)
37f8173d 3334{
1d78814d
MR
3335#if defined(arch_atomic64_dec)
3336 arch_atomic64_dec(v);
3337#else
9257959a 3338 raw_atomic64_sub(1, v);
37f8173d 3339#endif
1d78814d 3340}
37f8173d 3341
ad811070
MR
3342/**
3343 * raw_atomic64_dec_return() - atomic decrement with full ordering
3344 * @v: pointer to atomic64_t
3345 *
3346 * Atomically updates @v to (@v - 1) with full ordering.
3347 *
3348 * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
3349 *
3350 * Return: The updated value of @v.
3351 */
37f8173d 3352static __always_inline s64
9257959a 3353raw_atomic64_dec_return(atomic64_t *v)
37f8173d 3354{
1d78814d
MR
3355#if defined(arch_atomic64_dec_return)
3356 return arch_atomic64_dec_return(v);
3357#elif defined(arch_atomic64_dec_return_relaxed)
37f8173d
PZ
3358 s64 ret;
3359 __atomic_pre_full_fence();
9257959a 3360 ret = arch_atomic64_dec_return_relaxed(v);
37f8173d
PZ
3361 __atomic_post_full_fence();
3362 return ret;
9257959a 3363#else
9257959a 3364 return raw_atomic64_sub_return(1, v);
37f8173d 3365#endif
1d78814d 3366}
37f8173d 3367
ad811070
MR
3368/**
3369 * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
3370 * @v: pointer to atomic64_t
3371 *
3372 * Atomically updates @v to (@v - 1) with acquire ordering.
3373 *
3374 * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
3375 *
3376 * Return: The updated value of @v.
3377 */
37f8173d 3378static __always_inline s64
9257959a 3379raw_atomic64_dec_return_acquire(atomic64_t *v)
37f8173d 3380{
1d78814d
MR
3381#if defined(arch_atomic64_dec_return_acquire)
3382 return arch_atomic64_dec_return_acquire(v);
3383#elif defined(arch_atomic64_dec_return_relaxed)
9257959a
MR
3384 s64 ret = arch_atomic64_dec_return_relaxed(v);
3385 __atomic_acquire_fence();
3386 return ret;
9257959a 3387#elif defined(arch_atomic64_dec_return)
1d78814d 3388 return arch_atomic64_dec_return(v);
9257959a 3389#else
9257959a 3390 return raw_atomic64_sub_return_acquire(1, v);
37f8173d 3391#endif
1d78814d 3392}
37f8173d 3393
ad811070
MR
3394/**
3395 * raw_atomic64_dec_return_release() - atomic decrement with release ordering
3396 * @v: pointer to atomic64_t
3397 *
3398 * Atomically updates @v to (@v - 1) with release ordering.
3399 *
3400 * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
3401 *
3402 * Return: The updated value of @v.
3403 */
37f8173d 3404static __always_inline s64
9257959a 3405raw_atomic64_dec_return_release(atomic64_t *v)
37f8173d 3406{
1d78814d
MR
3407#if defined(arch_atomic64_dec_return_release)
3408 return arch_atomic64_dec_return_release(v);
3409#elif defined(arch_atomic64_dec_return_relaxed)
9257959a
MR
3410 __atomic_release_fence();
3411 return arch_atomic64_dec_return_relaxed(v);
9257959a 3412#elif defined(arch_atomic64_dec_return)
1d78814d 3413 return arch_atomic64_dec_return(v);
9257959a 3414#else
9257959a 3415 return raw_atomic64_sub_return_release(1, v);
37f8173d 3416#endif
1d78814d 3417}
37f8173d 3418
ad811070
MR
3419/**
3420 * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
3421 * @v: pointer to atomic64_t
3422 *
3423 * Atomically updates @v to (@v - 1) with relaxed ordering.
3424 *
3425 * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
3426 *
3427 * Return: The updated value of @v.
3428 */
37f8173d 3429static __always_inline s64
9257959a 3430raw_atomic64_dec_return_relaxed(atomic64_t *v)
37f8173d 3431{
1d78814d
MR
3432#if defined(arch_atomic64_dec_return_relaxed)
3433 return arch_atomic64_dec_return_relaxed(v);
3434#elif defined(arch_atomic64_dec_return)
3435 return arch_atomic64_dec_return(v);
3436#else
9257959a 3437 return raw_atomic64_sub_return_relaxed(1, v);
37f8173d 3438#endif
1d78814d 3439}
37f8173d 3440
ad811070
MR
3441/**
3442 * raw_atomic64_fetch_dec() - atomic decrement with full ordering
3443 * @v: pointer to atomic64_t
3444 *
3445 * Atomically updates @v to (@v - 1) with full ordering.
3446 *
3447 * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
3448 *
3449 * Return: The original value of @v.
3450 */
37f8173d 3451static __always_inline s64
9257959a 3452raw_atomic64_fetch_dec(atomic64_t *v)
37f8173d 3453{
1d78814d
MR
3454#if defined(arch_atomic64_fetch_dec)
3455 return arch_atomic64_fetch_dec(v);
3456#elif defined(arch_atomic64_fetch_dec_relaxed)
37f8173d
PZ
3457 s64 ret;
3458 __atomic_pre_full_fence();
9257959a 3459 ret = arch_atomic64_fetch_dec_relaxed(v);
37f8173d
PZ
3460 __atomic_post_full_fence();
3461 return ret;
9257959a 3462#else
9257959a 3463 return raw_atomic64_fetch_sub(1, v);
37f8173d 3464#endif
1d78814d 3465}
37f8173d 3466
ad811070
MR
3467/**
3468 * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
3469 * @v: pointer to atomic64_t
3470 *
3471 * Atomically updates @v to (@v - 1) with acquire ordering.
3472 *
3473 * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
3474 *
3475 * Return: The original value of @v.
3476 */
37f8173d 3477static __always_inline s64
9257959a 3478raw_atomic64_fetch_dec_acquire(atomic64_t *v)
37f8173d 3479{
1d78814d
MR
3480#if defined(arch_atomic64_fetch_dec_acquire)
3481 return arch_atomic64_fetch_dec_acquire(v);
3482#elif defined(arch_atomic64_fetch_dec_relaxed)
9257959a
MR
3483 s64 ret = arch_atomic64_fetch_dec_relaxed(v);
3484 __atomic_acquire_fence();
3485 return ret;
9257959a 3486#elif defined(arch_atomic64_fetch_dec)
1d78814d 3487 return arch_atomic64_fetch_dec(v);
9257959a 3488#else
9257959a 3489 return raw_atomic64_fetch_sub_acquire(1, v);
37f8173d 3490#endif
1d78814d 3491}
37f8173d 3492
ad811070
MR
3493/**
3494 * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
3495 * @v: pointer to atomic64_t
3496 *
3497 * Atomically updates @v to (@v - 1) with release ordering.
3498 *
3499 * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
3500 *
3501 * Return: The original value of @v.
3502 */
37f8173d 3503static __always_inline s64
9257959a 3504raw_atomic64_fetch_dec_release(atomic64_t *v)
37f8173d 3505{
1d78814d
MR
3506#if defined(arch_atomic64_fetch_dec_release)
3507 return arch_atomic64_fetch_dec_release(v);
3508#elif defined(arch_atomic64_fetch_dec_relaxed)
9257959a
MR
3509 __atomic_release_fence();
3510 return arch_atomic64_fetch_dec_relaxed(v);
9257959a 3511#elif defined(arch_atomic64_fetch_dec)
1d78814d 3512 return arch_atomic64_fetch_dec(v);
9257959a 3513#else
9257959a 3514 return raw_atomic64_fetch_sub_release(1, v);
37f8173d 3515#endif
1d78814d 3516}
37f8173d 3517
ad811070
MR
3518/**
3519 * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
3520 * @v: pointer to atomic64_t
3521 *
3522 * Atomically updates @v to (@v - 1) with relaxed ordering.
3523 *
3524 * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
3525 *
3526 * Return: The original value of @v.
3527 */
37f8173d 3528static __always_inline s64
9257959a 3529raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
37f8173d 3530{
1d78814d
MR
3531#if defined(arch_atomic64_fetch_dec_relaxed)
3532 return arch_atomic64_fetch_dec_relaxed(v);
3533#elif defined(arch_atomic64_fetch_dec)
3534 return arch_atomic64_fetch_dec(v);
3535#else
9257959a 3536 return raw_atomic64_fetch_sub_relaxed(1, v);
37f8173d 3537#endif
1d78814d 3538}
37f8173d 3539
ad811070
MR
3540/**
3541 * raw_atomic64_and() - atomic bitwise AND with relaxed ordering
3542 * @i: s64 value
3543 * @v: pointer to atomic64_t
3544 *
3545 * Atomically updates @v to (@v & @i) with relaxed ordering.
3546 *
3547 * Safe to use in noinstr code; prefer atomic64_and() elsewhere.
3548 *
3549 * Return: Nothing.
3550 */
1d78814d
MR
3551static __always_inline void
3552raw_atomic64_and(s64 i, atomic64_t *v)
3553{
3554 arch_atomic64_and(i, v);
3555}
9257959a 3556
ad811070
MR
3557/**
3558 * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
3559 * @i: s64 value
3560 * @v: pointer to atomic64_t
3561 *
3562 * Atomically updates @v to (@v & @i) with full ordering.
3563 *
3564 * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
3565 *
3566 * Return: The original value of @v.
3567 */
37f8173d 3568static __always_inline s64
9257959a 3569raw_atomic64_fetch_and(s64 i, atomic64_t *v)
37f8173d 3570{
1d78814d
MR
3571#if defined(arch_atomic64_fetch_and)
3572 return arch_atomic64_fetch_and(i, v);
3573#elif defined(arch_atomic64_fetch_and_relaxed)
37f8173d
PZ
3574 s64 ret;
3575 __atomic_pre_full_fence();
9257959a 3576 ret = arch_atomic64_fetch_and_relaxed(i, v);
37f8173d
PZ
3577 __atomic_post_full_fence();
3578 return ret;
9257959a
MR
3579#else
3580#error "Unable to define raw_atomic64_fetch_and"
37f8173d 3581#endif
1d78814d 3582}
37f8173d 3583
ad811070
MR
3584/**
3585 * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
3586 * @i: s64 value
3587 * @v: pointer to atomic64_t
3588 *
3589 * Atomically updates @v to (@v & @i) with acquire ordering.
3590 *
3591 * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
3592 *
3593 * Return: The original value of @v.
3594 */
37f8173d 3595static __always_inline s64
9257959a 3596raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
37f8173d 3597{
1d78814d
MR
3598#if defined(arch_atomic64_fetch_and_acquire)
3599 return arch_atomic64_fetch_and_acquire(i, v);
3600#elif defined(arch_atomic64_fetch_and_relaxed)
37f8173d
PZ
3601 s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
3602 __atomic_acquire_fence();
3603 return ret;
9257959a 3604#elif defined(arch_atomic64_fetch_and)
1d78814d 3605 return arch_atomic64_fetch_and(i, v);
9257959a
MR
3606#else
3607#error "Unable to define raw_atomic64_fetch_and_acquire"
37f8173d 3608#endif
1d78814d 3609}
37f8173d 3610
ad811070
MR
3611/**
3612 * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
3613 * @i: s64 value
3614 * @v: pointer to atomic64_t
3615 *
3616 * Atomically updates @v to (@v & @i) with release ordering.
3617 *
3618 * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
3619 *
3620 * Return: The original value of @v.
3621 */
37f8173d 3622static __always_inline s64
9257959a 3623raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
37f8173d 3624{
1d78814d
MR
3625#if defined(arch_atomic64_fetch_and_release)
3626 return arch_atomic64_fetch_and_release(i, v);
3627#elif defined(arch_atomic64_fetch_and_relaxed)
37f8173d
PZ
3628 __atomic_release_fence();
3629 return arch_atomic64_fetch_and_relaxed(i, v);
9257959a 3630#elif defined(arch_atomic64_fetch_and)
1d78814d 3631 return arch_atomic64_fetch_and(i, v);
9257959a
MR
3632#else
3633#error "Unable to define raw_atomic64_fetch_and_release"
37f8173d 3634#endif
1d78814d 3635}
37f8173d 3636
ad811070
MR
3637/**
3638 * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
3639 * @i: s64 value
3640 * @v: pointer to atomic64_t
3641 *
3642 * Atomically updates @v to (@v & @i) with relaxed ordering.
3643 *
3644 * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
3645 *
3646 * Return: The original value of @v.
3647 */
1d78814d
MR
3648static __always_inline s64
3649raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
3650{
9257959a 3651#if defined(arch_atomic64_fetch_and_relaxed)
1d78814d 3652 return arch_atomic64_fetch_and_relaxed(i, v);
9257959a 3653#elif defined(arch_atomic64_fetch_and)
1d78814d 3654 return arch_atomic64_fetch_and(i, v);
9257959a
MR
3655#else
3656#error "Unable to define raw_atomic64_fetch_and_relaxed"
37f8173d 3657#endif
1d78814d 3658}
37f8173d 3659
ad811070
MR
3660/**
3661 * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
3662 * @i: s64 value
3663 * @v: pointer to atomic64_t
3664 *
3665 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3666 *
3667 * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
3668 *
3669 * Return: Nothing.
3670 */
37f8173d 3671static __always_inline void
9257959a 3672raw_atomic64_andnot(s64 i, atomic64_t *v)
37f8173d 3673{
1d78814d
MR
3674#if defined(arch_atomic64_andnot)
3675 arch_atomic64_andnot(i, v);
3676#else
9257959a 3677 raw_atomic64_and(~i, v);
37f8173d 3678#endif
1d78814d 3679}
37f8173d 3680
ad811070
MR
3681/**
3682 * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
3683 * @i: s64 value
3684 * @v: pointer to atomic64_t
3685 *
3686 * Atomically updates @v to (@v & ~@i) with full ordering.
3687 *
3688 * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
3689 *
3690 * Return: The original value of @v.
3691 */
37f8173d 3692static __always_inline s64
9257959a 3693raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
37f8173d 3694{
1d78814d
MR
3695#if defined(arch_atomic64_fetch_andnot)
3696 return arch_atomic64_fetch_andnot(i, v);
3697#elif defined(arch_atomic64_fetch_andnot_relaxed)
9257959a
MR
3698 s64 ret;
3699 __atomic_pre_full_fence();
3700 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3701 __atomic_post_full_fence();
3702 return ret;
9257959a 3703#else
9257959a 3704 return raw_atomic64_fetch_and(~i, v);
37f8173d 3705#endif
1d78814d 3706}
37f8173d 3707
ad811070
MR
3708/**
3709 * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
3710 * @i: s64 value
3711 * @v: pointer to atomic64_t
3712 *
3713 * Atomically updates @v to (@v & ~@i) with acquire ordering.
3714 *
3715 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
3716 *
3717 * Return: The original value of @v.
3718 */
37f8173d 3719static __always_inline s64
9257959a 3720raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
37f8173d 3721{
1d78814d
MR
3722#if defined(arch_atomic64_fetch_andnot_acquire)
3723 return arch_atomic64_fetch_andnot_acquire(i, v);
3724#elif defined(arch_atomic64_fetch_andnot_relaxed)
9257959a
MR
3725 s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3726 __atomic_acquire_fence();
3727 return ret;
9257959a 3728#elif defined(arch_atomic64_fetch_andnot)
1d78814d 3729 return arch_atomic64_fetch_andnot(i, v);
9257959a 3730#else
9257959a 3731 return raw_atomic64_fetch_and_acquire(~i, v);
37f8173d 3732#endif
1d78814d 3733}
37f8173d 3734
ad811070
MR
3735/**
3736 * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
3737 * @i: s64 value
3738 * @v: pointer to atomic64_t
3739 *
3740 * Atomically updates @v to (@v & ~@i) with release ordering.
3741 *
3742 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
3743 *
3744 * Return: The original value of @v.
3745 */
37f8173d 3746static __always_inline s64
9257959a 3747raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
37f8173d 3748{
1d78814d
MR
3749#if defined(arch_atomic64_fetch_andnot_release)
3750 return arch_atomic64_fetch_andnot_release(i, v);
3751#elif defined(arch_atomic64_fetch_andnot_relaxed)
9257959a
MR
3752 __atomic_release_fence();
3753 return arch_atomic64_fetch_andnot_relaxed(i, v);
9257959a 3754#elif defined(arch_atomic64_fetch_andnot)
1d78814d 3755 return arch_atomic64_fetch_andnot(i, v);
9257959a 3756#else
9257959a 3757 return raw_atomic64_fetch_and_release(~i, v);
37f8173d 3758#endif
1d78814d 3759}
37f8173d 3760
ad811070
MR
3761/**
3762 * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
3763 * @i: s64 value
3764 * @v: pointer to atomic64_t
3765 *
3766 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3767 *
3768 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
3769 *
3770 * Return: The original value of @v.
3771 */
37f8173d 3772static __always_inline s64
9257959a 3773raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
37f8173d 3774{
1d78814d
MR
3775#if defined(arch_atomic64_fetch_andnot_relaxed)
3776 return arch_atomic64_fetch_andnot_relaxed(i, v);
3777#elif defined(arch_atomic64_fetch_andnot)
3778 return arch_atomic64_fetch_andnot(i, v);
3779#else
9257959a 3780 return raw_atomic64_fetch_and_relaxed(~i, v);
37f8173d 3781#endif
1d78814d 3782}
37f8173d 3783
ad811070
MR
3784/**
3785 * raw_atomic64_or() - atomic bitwise OR with relaxed ordering
3786 * @i: s64 value
3787 * @v: pointer to atomic64_t
3788 *
3789 * Atomically updates @v to (@v | @i) with relaxed ordering.
3790 *
3791 * Safe to use in noinstr code; prefer atomic64_or() elsewhere.
3792 *
3793 * Return: Nothing.
3794 */
1d78814d
MR
3795static __always_inline void
3796raw_atomic64_or(s64 i, atomic64_t *v)
3797{
3798 arch_atomic64_or(i, v);
3799}
9257959a 3800
ad811070
MR
3801/**
3802 * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
3803 * @i: s64 value
3804 * @v: pointer to atomic64_t
3805 *
3806 * Atomically updates @v to (@v | @i) with full ordering.
3807 *
3808 * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
3809 *
3810 * Return: The original value of @v.
3811 */
37f8173d 3812static __always_inline s64
9257959a 3813raw_atomic64_fetch_or(s64 i, atomic64_t *v)
37f8173d 3814{
1d78814d
MR
3815#if defined(arch_atomic64_fetch_or)
3816 return arch_atomic64_fetch_or(i, v);
3817#elif defined(arch_atomic64_fetch_or_relaxed)
37f8173d
PZ
3818 s64 ret;
3819 __atomic_pre_full_fence();
9257959a 3820 ret = arch_atomic64_fetch_or_relaxed(i, v);
37f8173d
PZ
3821 __atomic_post_full_fence();
3822 return ret;
9257959a
MR
3823#else
3824#error "Unable to define raw_atomic64_fetch_or"
37f8173d 3825#endif
1d78814d 3826}
37f8173d 3827
ad811070
MR
3828/**
3829 * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
3830 * @i: s64 value
3831 * @v: pointer to atomic64_t
3832 *
3833 * Atomically updates @v to (@v | @i) with acquire ordering.
3834 *
3835 * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
3836 *
3837 * Return: The original value of @v.
3838 */
37f8173d 3839static __always_inline s64
9257959a 3840raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
37f8173d 3841{
1d78814d
MR
3842#if defined(arch_atomic64_fetch_or_acquire)
3843 return arch_atomic64_fetch_or_acquire(i, v);
3844#elif defined(arch_atomic64_fetch_or_relaxed)
37f8173d
PZ
3845 s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
3846 __atomic_acquire_fence();
3847 return ret;
9257959a 3848#elif defined(arch_atomic64_fetch_or)
1d78814d 3849 return arch_atomic64_fetch_or(i, v);
9257959a
MR
3850#else
3851#error "Unable to define raw_atomic64_fetch_or_acquire"
37f8173d 3852#endif
1d78814d 3853}
37f8173d 3854
ad811070
MR
3855/**
3856 * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
3857 * @i: s64 value
3858 * @v: pointer to atomic64_t
3859 *
3860 * Atomically updates @v to (@v | @i) with release ordering.
3861 *
3862 * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
3863 *
3864 * Return: The original value of @v.
3865 */
37f8173d 3866static __always_inline s64
9257959a 3867raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
37f8173d 3868{
1d78814d
MR
3869#if defined(arch_atomic64_fetch_or_release)
3870 return arch_atomic64_fetch_or_release(i, v);
3871#elif defined(arch_atomic64_fetch_or_relaxed)
37f8173d
PZ
3872 __atomic_release_fence();
3873 return arch_atomic64_fetch_or_relaxed(i, v);
9257959a 3874#elif defined(arch_atomic64_fetch_or)
1d78814d 3875 return arch_atomic64_fetch_or(i, v);
9257959a
MR
3876#else
3877#error "Unable to define raw_atomic64_fetch_or_release"
3878#endif
1d78814d 3879}
9257959a 3880
ad811070
MR
3881/**
3882 * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
3883 * @i: s64 value
3884 * @v: pointer to atomic64_t
3885 *
3886 * Atomically updates @v to (@v | @i) with relaxed ordering.
3887 *
3888 * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
3889 *
3890 * Return: The original value of @v.
3891 */
1d78814d
MR
3892static __always_inline s64
3893raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
3894{
9257959a 3895#if defined(arch_atomic64_fetch_or_relaxed)
1d78814d 3896 return arch_atomic64_fetch_or_relaxed(i, v);
9257959a 3897#elif defined(arch_atomic64_fetch_or)
1d78814d 3898 return arch_atomic64_fetch_or(i, v);
9257959a
MR
3899#else
3900#error "Unable to define raw_atomic64_fetch_or_relaxed"
37f8173d 3901#endif
1d78814d 3902}
37f8173d 3903
ad811070
MR
3904/**
3905 * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
3906 * @i: s64 value
3907 * @v: pointer to atomic64_t
3908 *
3909 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
3910 *
3911 * Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
3912 *
3913 * Return: Nothing.
3914 */
1d78814d
MR
3915static __always_inline void
3916raw_atomic64_xor(s64 i, atomic64_t *v)
3917{
3918 arch_atomic64_xor(i, v);
3919}
9257959a 3920
ad811070
MR
3921/**
3922 * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
3923 * @i: s64 value
3924 * @v: pointer to atomic64_t
3925 *
3926 * Atomically updates @v to (@v ^ @i) with full ordering.
3927 *
3928 * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
3929 *
3930 * Return: The original value of @v.
3931 */
37f8173d 3932static __always_inline s64
9257959a 3933raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
37f8173d 3934{
1d78814d
MR
3935#if defined(arch_atomic64_fetch_xor)
3936 return arch_atomic64_fetch_xor(i, v);
3937#elif defined(arch_atomic64_fetch_xor_relaxed)
37f8173d
PZ
3938 s64 ret;
3939 __atomic_pre_full_fence();
9257959a 3940 ret = arch_atomic64_fetch_xor_relaxed(i, v);
37f8173d
PZ
3941 __atomic_post_full_fence();
3942 return ret;
9257959a
MR
3943#else
3944#error "Unable to define raw_atomic64_fetch_xor"
37f8173d 3945#endif
1d78814d 3946}
37f8173d 3947
ad811070
MR
3948/**
3949 * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
3950 * @i: s64 value
3951 * @v: pointer to atomic64_t
3952 *
3953 * Atomically updates @v to (@v ^ @i) with acquire ordering.
3954 *
3955 * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
3956 *
3957 * Return: The original value of @v.
3958 */
37f8173d 3959static __always_inline s64
9257959a 3960raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
37f8173d 3961{
1d78814d
MR
3962#if defined(arch_atomic64_fetch_xor_acquire)
3963 return arch_atomic64_fetch_xor_acquire(i, v);
3964#elif defined(arch_atomic64_fetch_xor_relaxed)
37f8173d
PZ
3965 s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3966 __atomic_acquire_fence();
3967 return ret;
9257959a 3968#elif defined(arch_atomic64_fetch_xor)
1d78814d 3969 return arch_atomic64_fetch_xor(i, v);
9257959a
MR
3970#else
3971#error "Unable to define raw_atomic64_fetch_xor_acquire"
37f8173d 3972#endif
1d78814d 3973}
37f8173d 3974
ad811070
MR
3975/**
3976 * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
3977 * @i: s64 value
3978 * @v: pointer to atomic64_t
3979 *
3980 * Atomically updates @v to (@v ^ @i) with release ordering.
3981 *
3982 * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
3983 *
3984 * Return: The original value of @v.
3985 */
37f8173d 3986static __always_inline s64
9257959a 3987raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
37f8173d 3988{
1d78814d
MR
3989#if defined(arch_atomic64_fetch_xor_release)
3990 return arch_atomic64_fetch_xor_release(i, v);
3991#elif defined(arch_atomic64_fetch_xor_relaxed)
37f8173d
PZ
3992 __atomic_release_fence();
3993 return arch_atomic64_fetch_xor_relaxed(i, v);
9257959a 3994#elif defined(arch_atomic64_fetch_xor)
1d78814d 3995 return arch_atomic64_fetch_xor(i, v);
9257959a
MR
3996#else
3997#error "Unable to define raw_atomic64_fetch_xor_release"
3998#endif
1d78814d 3999}
9257959a 4000
ad811070
MR
4001/**
4002 * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
4003 * @i: s64 value
4004 * @v: pointer to atomic64_t
4005 *
4006 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
4007 *
4008 * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
4009 *
4010 * Return: The original value of @v.
4011 */
1d78814d
MR
4012static __always_inline s64
4013raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
4014{
9257959a 4015#if defined(arch_atomic64_fetch_xor_relaxed)
1d78814d 4016 return arch_atomic64_fetch_xor_relaxed(i, v);
9257959a 4017#elif defined(arch_atomic64_fetch_xor)
1d78814d 4018 return arch_atomic64_fetch_xor(i, v);
9257959a
MR
4019#else
4020#error "Unable to define raw_atomic64_fetch_xor_relaxed"
37f8173d 4021#endif
1d78814d 4022}
37f8173d 4023
ad811070
MR
4024/**
4025 * raw_atomic64_xchg() - atomic exchange with full ordering
4026 * @v: pointer to atomic64_t
4027 * @new: s64 value to assign
4028 *
4029 * Atomically updates @v to @new with full ordering.
4030 *
4031 * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
4032 *
4033 * Return: The original value of @v.
4034 */
37f8173d 4035static __always_inline s64
1d78814d 4036raw_atomic64_xchg(atomic64_t *v, s64 new)
37f8173d 4037{
1d78814d
MR
4038#if defined(arch_atomic64_xchg)
4039 return arch_atomic64_xchg(v, new);
4040#elif defined(arch_atomic64_xchg_relaxed)
37f8173d
PZ
4041 s64 ret;
4042 __atomic_pre_full_fence();
1d78814d 4043 ret = arch_atomic64_xchg_relaxed(v, new);
37f8173d
PZ
4044 __atomic_post_full_fence();
4045 return ret;
9257959a 4046#else
9257959a 4047 return raw_xchg(&v->counter, new);
d12157ef 4048#endif
1d78814d 4049}
d12157ef 4050
ad811070
MR
4051/**
4052 * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
4053 * @v: pointer to atomic64_t
4054 * @new: s64 value to assign
4055 *
4056 * Atomically updates @v to @new with acquire ordering.
4057 *
4058 * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
4059 *
4060 * Return: The original value of @v.
4061 */
d12157ef 4062static __always_inline s64
1d78814d 4063raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
d12157ef 4064{
1d78814d
MR
4065#if defined(arch_atomic64_xchg_acquire)
4066 return arch_atomic64_xchg_acquire(v, new);
4067#elif defined(arch_atomic64_xchg_relaxed)
4068 s64 ret = arch_atomic64_xchg_relaxed(v, new);
9257959a
MR
4069 __atomic_acquire_fence();
4070 return ret;
9257959a 4071#elif defined(arch_atomic64_xchg)
1d78814d 4072 return arch_atomic64_xchg(v, new);
9257959a 4073#else
9257959a 4074 return raw_xchg_acquire(&v->counter, new);
d12157ef 4075#endif
1d78814d 4076}
d12157ef 4077
ad811070
MR
4078/**
4079 * raw_atomic64_xchg_release() - atomic exchange with release ordering
4080 * @v: pointer to atomic64_t
4081 * @new: s64 value to assign
4082 *
4083 * Atomically updates @v to @new with release ordering.
4084 *
4085 * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
4086 *
4087 * Return: The original value of @v.
4088 */
d12157ef 4089static __always_inline s64
1d78814d 4090raw_atomic64_xchg_release(atomic64_t *v, s64 new)
d12157ef 4091{
1d78814d
MR
4092#if defined(arch_atomic64_xchg_release)
4093 return arch_atomic64_xchg_release(v, new);
4094#elif defined(arch_atomic64_xchg_relaxed)
9257959a 4095 __atomic_release_fence();
1d78814d 4096 return arch_atomic64_xchg_relaxed(v, new);
9257959a 4097#elif defined(arch_atomic64_xchg)
1d78814d 4098 return arch_atomic64_xchg(v, new);
9257959a 4099#else
9257959a 4100 return raw_xchg_release(&v->counter, new);
37f8173d 4101#endif
1d78814d 4102}
37f8173d 4103
ad811070
MR
4104/**
4105 * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
4106 * @v: pointer to atomic64_t
4107 * @new: s64 value to assign
4108 *
4109 * Atomically updates @v to @new with relaxed ordering.
4110 *
4111 * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
4112 *
4113 * Return: The original value of @v.
4114 */
37f8173d 4115static __always_inline s64
9257959a 4116raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
37f8173d 4117{
1d78814d
MR
4118#if defined(arch_atomic64_xchg_relaxed)
4119 return arch_atomic64_xchg_relaxed(v, new);
4120#elif defined(arch_atomic64_xchg)
4121 return arch_atomic64_xchg(v, new);
4122#else
9257959a 4123 return raw_xchg_relaxed(&v->counter, new);
37f8173d 4124#endif
1d78814d 4125}
37f8173d 4126
ad811070
MR
4127/**
4128 * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
4129 * @v: pointer to atomic64_t
4130 * @old: s64 value to compare with
4131 * @new: s64 value to assign
4132 *
4133 * If (@v == @old), atomically updates @v to @new with full ordering.
6dfee110 4134 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
4135 *
4136 * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
4137 *
4138 * Return: The original value of @v.
4139 */
37f8173d 4140static __always_inline s64
9257959a 4141raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
37f8173d 4142{
1d78814d
MR
4143#if defined(arch_atomic64_cmpxchg)
4144 return arch_atomic64_cmpxchg(v, old, new);
4145#elif defined(arch_atomic64_cmpxchg_relaxed)
37f8173d
PZ
4146 s64 ret;
4147 __atomic_pre_full_fence();
9257959a 4148 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
37f8173d
PZ
4149 __atomic_post_full_fence();
4150 return ret;
9257959a 4151#else
9257959a 4152 return raw_cmpxchg(&v->counter, old, new);
d12157ef 4153#endif
1d78814d 4154}
d12157ef 4155
ad811070
MR
4156/**
4157 * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4158 * @v: pointer to atomic64_t
4159 * @old: s64 value to compare with
4160 * @new: s64 value to assign
4161 *
4162 * If (@v == @old), atomically updates @v to @new with acquire ordering.
6dfee110 4163 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
4164 *
4165 * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
4166 *
4167 * Return: The original value of @v.
4168 */
d12157ef 4169static __always_inline s64
9257959a 4170raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
d12157ef 4171{
1d78814d
MR
4172#if defined(arch_atomic64_cmpxchg_acquire)
4173 return arch_atomic64_cmpxchg_acquire(v, old, new);
4174#elif defined(arch_atomic64_cmpxchg_relaxed)
9257959a
MR
4175 s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4176 __atomic_acquire_fence();
4177 return ret;
9257959a 4178#elif defined(arch_atomic64_cmpxchg)
1d78814d 4179 return arch_atomic64_cmpxchg(v, old, new);
9257959a 4180#else
9257959a 4181 return raw_cmpxchg_acquire(&v->counter, old, new);
d12157ef 4182#endif
1d78814d 4183}
d12157ef 4184
ad811070
MR
4185/**
4186 * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
4187 * @v: pointer to atomic64_t
4188 * @old: s64 value to compare with
4189 * @new: s64 value to assign
4190 *
4191 * If (@v == @old), atomically updates @v to @new with release ordering.
6dfee110 4192 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
4193 *
4194 * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
4195 *
4196 * Return: The original value of @v.
4197 */
d12157ef 4198static __always_inline s64
9257959a 4199raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
d12157ef 4200{
1d78814d
MR
4201#if defined(arch_atomic64_cmpxchg_release)
4202 return arch_atomic64_cmpxchg_release(v, old, new);
4203#elif defined(arch_atomic64_cmpxchg_relaxed)
9257959a
MR
4204 __atomic_release_fence();
4205 return arch_atomic64_cmpxchg_relaxed(v, old, new);
9257959a 4206#elif defined(arch_atomic64_cmpxchg)
1d78814d 4207 return arch_atomic64_cmpxchg(v, old, new);
9257959a 4208#else
9257959a 4209 return raw_cmpxchg_release(&v->counter, old, new);
37f8173d 4210#endif
1d78814d 4211}
37f8173d 4212
ad811070
MR
4213/**
4214 * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4215 * @v: pointer to atomic64_t
4216 * @old: s64 value to compare with
4217 * @new: s64 value to assign
4218 *
4219 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
6dfee110 4220 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
4221 *
4222 * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
4223 *
4224 * Return: The original value of @v.
4225 */
37f8173d 4226static __always_inline s64
9257959a 4227raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
37f8173d 4228{
1d78814d
MR
4229#if defined(arch_atomic64_cmpxchg_relaxed)
4230 return arch_atomic64_cmpxchg_relaxed(v, old, new);
4231#elif defined(arch_atomic64_cmpxchg)
4232 return arch_atomic64_cmpxchg(v, old, new);
4233#else
9257959a 4234 return raw_cmpxchg_relaxed(&v->counter, old, new);
37f8173d 4235#endif
1d78814d 4236}
37f8173d 4237
ad811070
MR
4238/**
4239 * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
4240 * @v: pointer to atomic64_t
4241 * @old: pointer to s64 value to compare with
4242 * @new: s64 value to assign
4243 *
4244 * If (@v == @old), atomically updates @v to @new with full ordering.
6dfee110
MR
4245 * Otherwise, @v is not modified, @old is updated to the current value of @v,
4246 * and relaxed ordering is provided.
ad811070
MR
4247 *
4248 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
4249 *
4250 * Return: @true if the exchange occured, @false otherwise.
4251 */
9257959a
MR
4252static __always_inline bool
4253raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
37f8173d 4254{
1d78814d
MR
4255#if defined(arch_atomic64_try_cmpxchg)
4256 return arch_atomic64_try_cmpxchg(v, old, new);
4257#elif defined(arch_atomic64_try_cmpxchg_relaxed)
9257959a 4258 bool ret;
37f8173d 4259 __atomic_pre_full_fence();
9257959a 4260 ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
37f8173d
PZ
4261 __atomic_post_full_fence();
4262 return ret;
9257959a 4263#else
37f8173d 4264 s64 r, o = *old;
9257959a 4265 r = raw_atomic64_cmpxchg(v, o, new);
37f8173d
PZ
4266 if (unlikely(r != o))
4267 *old = r;
4268 return likely(r == o);
37f8173d 4269#endif
1d78814d 4270}
37f8173d 4271
ad811070
MR
4272/**
4273 * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4274 * @v: pointer to atomic64_t
4275 * @old: pointer to s64 value to compare with
4276 * @new: s64 value to assign
4277 *
4278 * If (@v == @old), atomically updates @v to @new with acquire ordering.
6dfee110
MR
4279 * Otherwise, @v is not modified, @old is updated to the current value of @v,
4280 * and relaxed ordering is provided.
ad811070
MR
4281 *
4282 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
4283 *
4284 * Return: @true if the exchange occured, @false otherwise.
4285 */
9257959a
MR
4286static __always_inline bool
4287raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
4288{
1d78814d
MR
4289#if defined(arch_atomic64_try_cmpxchg_acquire)
4290 return arch_atomic64_try_cmpxchg_acquire(v, old, new);
4291#elif defined(arch_atomic64_try_cmpxchg_relaxed)
9257959a
MR
4292 bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4293 __atomic_acquire_fence();
4294 return ret;
9257959a 4295#elif defined(arch_atomic64_try_cmpxchg)
1d78814d 4296 return arch_atomic64_try_cmpxchg(v, old, new);
9257959a 4297#else
37f8173d 4298 s64 r, o = *old;
9257959a 4299 r = raw_atomic64_cmpxchg_acquire(v, o, new);
37f8173d
PZ
4300 if (unlikely(r != o))
4301 *old = r;
4302 return likely(r == o);
37f8173d 4303#endif
1d78814d 4304}
37f8173d 4305
ad811070
MR
4306/**
4307 * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
4308 * @v: pointer to atomic64_t
4309 * @old: pointer to s64 value to compare with
4310 * @new: s64 value to assign
4311 *
4312 * If (@v == @old), atomically updates @v to @new with release ordering.
6dfee110
MR
4313 * Otherwise, @v is not modified, @old is updated to the current value of @v,
4314 * and relaxed ordering is provided.
ad811070
MR
4315 *
4316 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
4317 *
4318 * Return: @true if the exchange occured, @false otherwise.
4319 */
9257959a
MR
4320static __always_inline bool
4321raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
4322{
1d78814d
MR
4323#if defined(arch_atomic64_try_cmpxchg_release)
4324 return arch_atomic64_try_cmpxchg_release(v, old, new);
4325#elif defined(arch_atomic64_try_cmpxchg_relaxed)
9257959a
MR
4326 __atomic_release_fence();
4327 return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
9257959a 4328#elif defined(arch_atomic64_try_cmpxchg)
1d78814d 4329 return arch_atomic64_try_cmpxchg(v, old, new);
9257959a 4330#else
37f8173d 4331 s64 r, o = *old;
9257959a 4332 r = raw_atomic64_cmpxchg_release(v, o, new);
37f8173d
PZ
4333 if (unlikely(r != o))
4334 *old = r;
4335 return likely(r == o);
37f8173d 4336#endif
1d78814d 4337}
37f8173d 4338
ad811070
MR
4339/**
4340 * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4341 * @v: pointer to atomic64_t
4342 * @old: pointer to s64 value to compare with
4343 * @new: s64 value to assign
4344 *
4345 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
6dfee110
MR
4346 * Otherwise, @v is not modified, @old is updated to the current value of @v,
4347 * and relaxed ordering is provided.
ad811070
MR
4348 *
4349 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
4350 *
4351 * Return: @true if the exchange occured, @false otherwise.
4352 */
37f8173d 4353static __always_inline bool
9257959a 4354raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
37f8173d 4355{
1d78814d
MR
4356#if defined(arch_atomic64_try_cmpxchg_relaxed)
4357 return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4358#elif defined(arch_atomic64_try_cmpxchg)
4359 return arch_atomic64_try_cmpxchg(v, old, new);
4360#else
37f8173d 4361 s64 r, o = *old;
9257959a 4362 r = raw_atomic64_cmpxchg_relaxed(v, o, new);
37f8173d
PZ
4363 if (unlikely(r != o))
4364 *old = r;
4365 return likely(r == o);
37f8173d 4366#endif
1d78814d 4367}
37f8173d 4368
ad811070
MR
4369/**
4370 * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
4371 * @i: s64 value to add
4372 * @v: pointer to atomic64_t
4373 *
4374 * Atomically updates @v to (@v - @i) with full ordering.
4375 *
4376 * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
4377 *
4378 * Return: @true if the resulting value of @v is zero, @false otherwise.
4379 */
37f8173d 4380static __always_inline bool
9257959a 4381raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
37f8173d 4382{
1d78814d
MR
4383#if defined(arch_atomic64_sub_and_test)
4384 return arch_atomic64_sub_and_test(i, v);
4385#else
9257959a 4386 return raw_atomic64_sub_return(i, v) == 0;
37f8173d 4387#endif
1d78814d 4388}
37f8173d 4389
ad811070
MR
4390/**
4391 * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
4392 * @v: pointer to atomic64_t
4393 *
4394 * Atomically updates @v to (@v - 1) with full ordering.
4395 *
4396 * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
4397 *
4398 * Return: @true if the resulting value of @v is zero, @false otherwise.
4399 */
37f8173d 4400static __always_inline bool
9257959a 4401raw_atomic64_dec_and_test(atomic64_t *v)
37f8173d 4402{
1d78814d
MR
4403#if defined(arch_atomic64_dec_and_test)
4404 return arch_atomic64_dec_and_test(v);
4405#else
9257959a 4406 return raw_atomic64_dec_return(v) == 0;
37f8173d 4407#endif
1d78814d 4408}
37f8173d 4409
ad811070
MR
4410/**
4411 * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
4412 * @v: pointer to atomic64_t
4413 *
4414 * Atomically updates @v to (@v + 1) with full ordering.
4415 *
4416 * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
4417 *
4418 * Return: @true if the resulting value of @v is zero, @false otherwise.
4419 */
37f8173d 4420static __always_inline bool
9257959a 4421raw_atomic64_inc_and_test(atomic64_t *v)
37f8173d 4422{
1d78814d
MR
4423#if defined(arch_atomic64_inc_and_test)
4424 return arch_atomic64_inc_and_test(v);
4425#else
9257959a 4426 return raw_atomic64_inc_return(v) == 0;
37f8173d 4427#endif
1d78814d 4428}
37f8173d 4429
ad811070
MR
4430/**
4431 * raw_atomic64_add_negative() - atomic add and test if negative with full ordering
4432 * @i: s64 value to add
4433 * @v: pointer to atomic64_t
4434 *
4435 * Atomically updates @v to (@v + @i) with full ordering.
4436 *
4437 * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
4438 *
4439 * Return: @true if the resulting value of @v is negative, @false otherwise.
4440 */
37f8173d 4441static __always_inline bool
9257959a 4442raw_atomic64_add_negative(s64 i, atomic64_t *v)
37f8173d 4443{
1d78814d
MR
4444#if defined(arch_atomic64_add_negative)
4445 return arch_atomic64_add_negative(i, v);
4446#elif defined(arch_atomic64_add_negative_relaxed)
9257959a
MR
4447 bool ret;
4448 __atomic_pre_full_fence();
4449 ret = arch_atomic64_add_negative_relaxed(i, v);
4450 __atomic_post_full_fence();
4451 return ret;
9257959a 4452#else
9257959a 4453 return raw_atomic64_add_return(i, v) < 0;
e5ab9eff 4454#endif
1d78814d 4455}
e5ab9eff 4456
ad811070
MR
4457/**
4458 * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
4459 * @i: s64 value to add
4460 * @v: pointer to atomic64_t
4461 *
4462 * Atomically updates @v to (@v + @i) with acquire ordering.
4463 *
4464 * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
4465 *
4466 * Return: @true if the resulting value of @v is negative, @false otherwise.
4467 */
e5ab9eff 4468static __always_inline bool
9257959a 4469raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
e5ab9eff 4470{
1d78814d
MR
4471#if defined(arch_atomic64_add_negative_acquire)
4472 return arch_atomic64_add_negative_acquire(i, v);
4473#elif defined(arch_atomic64_add_negative_relaxed)
9257959a
MR
4474 bool ret = arch_atomic64_add_negative_relaxed(i, v);
4475 __atomic_acquire_fence();
4476 return ret;
9257959a 4477#elif defined(arch_atomic64_add_negative)
1d78814d 4478 return arch_atomic64_add_negative(i, v);
9257959a 4479#else
9257959a 4480 return raw_atomic64_add_return_acquire(i, v) < 0;
e5ab9eff 4481#endif
1d78814d 4482}
e5ab9eff 4483
ad811070
MR
4484/**
4485 * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
4486 * @i: s64 value to add
4487 * @v: pointer to atomic64_t
4488 *
4489 * Atomically updates @v to (@v + @i) with release ordering.
4490 *
4491 * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
4492 *
4493 * Return: @true if the resulting value of @v is negative, @false otherwise.
4494 */
e5ab9eff 4495static __always_inline bool
9257959a 4496raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
e5ab9eff 4497{
1d78814d
MR
4498#if defined(arch_atomic64_add_negative_release)
4499 return arch_atomic64_add_negative_release(i, v);
4500#elif defined(arch_atomic64_add_negative_relaxed)
9257959a
MR
4501 __atomic_release_fence();
4502 return arch_atomic64_add_negative_relaxed(i, v);
9257959a 4503#elif defined(arch_atomic64_add_negative)
1d78814d 4504 return arch_atomic64_add_negative(i, v);
9257959a 4505#else
9257959a 4506 return raw_atomic64_add_return_release(i, v) < 0;
e5ab9eff 4507#endif
1d78814d 4508}
e5ab9eff 4509
ad811070
MR
4510/**
4511 * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
4512 * @i: s64 value to add
4513 * @v: pointer to atomic64_t
4514 *
4515 * Atomically updates @v to (@v + @i) with relaxed ordering.
4516 *
4517 * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
4518 *
4519 * Return: @true if the resulting value of @v is negative, @false otherwise.
4520 */
e5ab9eff 4521static __always_inline bool
9257959a 4522raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
e5ab9eff 4523{
1d78814d
MR
4524#if defined(arch_atomic64_add_negative_relaxed)
4525 return arch_atomic64_add_negative_relaxed(i, v);
4526#elif defined(arch_atomic64_add_negative)
4527 return arch_atomic64_add_negative(i, v);
4528#else
9257959a 4529 return raw_atomic64_add_return_relaxed(i, v) < 0;
e5ab9eff 4530#endif
1d78814d 4531}
e5ab9eff 4532
ad811070
MR
4533/**
4534 * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
4535 * @v: pointer to atomic64_t
4536 * @a: s64 value to add
4537 * @u: s64 value to compare with
4538 *
4539 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
6dfee110 4540 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
4541 *
4542 * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
4543 *
4544 * Return: The original value of @v.
4545 */
37f8173d 4546static __always_inline s64
9257959a 4547raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
37f8173d 4548{
1d78814d
MR
4549#if defined(arch_atomic64_fetch_add_unless)
4550 return arch_atomic64_fetch_add_unless(v, a, u);
4551#else
9257959a 4552 s64 c = raw_atomic64_read(v);
37f8173d
PZ
4553
4554 do {
4555 if (unlikely(c == u))
4556 break;
9257959a 4557 } while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
37f8173d
PZ
4558
4559 return c;
37f8173d 4560#endif
1d78814d 4561}
37f8173d 4562
ad811070
MR
4563/**
4564 * raw_atomic64_add_unless() - atomic add unless value with full ordering
4565 * @v: pointer to atomic64_t
4566 * @a: s64 value to add
4567 * @u: s64 value to compare with
4568 *
4569 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
6dfee110 4570 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
4571 *
4572 * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
4573 *
4574 * Return: @true if @v was updated, @false otherwise.
4575 */
37f8173d 4576static __always_inline bool
9257959a 4577raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
37f8173d 4578{
1d78814d
MR
4579#if defined(arch_atomic64_add_unless)
4580 return arch_atomic64_add_unless(v, a, u);
4581#else
9257959a 4582 return raw_atomic64_fetch_add_unless(v, a, u) != u;
37f8173d 4583#endif
1d78814d 4584}
37f8173d 4585
ad811070
MR
4586/**
4587 * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
4588 * @v: pointer to atomic64_t
4589 *
4590 * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
6dfee110 4591 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
4592 *
4593 * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
4594 *
4595 * Return: @true if @v was updated, @false otherwise.
4596 */
37f8173d 4597static __always_inline bool
9257959a 4598raw_atomic64_inc_not_zero(atomic64_t *v)
37f8173d 4599{
1d78814d
MR
4600#if defined(arch_atomic64_inc_not_zero)
4601 return arch_atomic64_inc_not_zero(v);
4602#else
9257959a 4603 return raw_atomic64_add_unless(v, 1, 0);
37f8173d 4604#endif
1d78814d 4605}
37f8173d 4606
ad811070
MR
4607/**
4608 * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
4609 * @v: pointer to atomic64_t
4610 *
4611 * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
6dfee110 4612 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
4613 *
4614 * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
4615 *
4616 * Return: @true if @v was updated, @false otherwise.
4617 */
37f8173d 4618static __always_inline bool
9257959a 4619raw_atomic64_inc_unless_negative(atomic64_t *v)
37f8173d 4620{
1d78814d
MR
4621#if defined(arch_atomic64_inc_unless_negative)
4622 return arch_atomic64_inc_unless_negative(v);
4623#else
9257959a 4624 s64 c = raw_atomic64_read(v);
37f8173d
PZ
4625
4626 do {
4627 if (unlikely(c < 0))
4628 return false;
9257959a 4629 } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
37f8173d
PZ
4630
4631 return true;
37f8173d 4632#endif
1d78814d 4633}
37f8173d 4634
ad811070
MR
4635/**
4636 * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
4637 * @v: pointer to atomic64_t
4638 *
4639 * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
6dfee110 4640 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
4641 *
4642 * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
4643 *
4644 * Return: @true if @v was updated, @false otherwise.
4645 */
37f8173d 4646static __always_inline bool
9257959a 4647raw_atomic64_dec_unless_positive(atomic64_t *v)
37f8173d 4648{
1d78814d
MR
4649#if defined(arch_atomic64_dec_unless_positive)
4650 return arch_atomic64_dec_unless_positive(v);
4651#else
9257959a 4652 s64 c = raw_atomic64_read(v);
37f8173d
PZ
4653
4654 do {
4655 if (unlikely(c > 0))
4656 return false;
9257959a 4657 } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
37f8173d
PZ
4658
4659 return true;
37f8173d 4660#endif
1d78814d 4661}
37f8173d 4662
ad811070
MR
4663/**
4664 * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
4665 * @v: pointer to atomic64_t
4666 *
4667 * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
6dfee110 4668 * Otherwise, @v is not modified and relaxed ordering is provided.
ad811070
MR
4669 *
4670 * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
4671 *
b33eb50a 4672 * Return: The old value of (@v - 1), regardless of whether @v was updated.
ad811070 4673 */
37f8173d 4674static __always_inline s64
9257959a 4675raw_atomic64_dec_if_positive(atomic64_t *v)
37f8173d 4676{
1d78814d
MR
4677#if defined(arch_atomic64_dec_if_positive)
4678 return arch_atomic64_dec_if_positive(v);
4679#else
9257959a 4680 s64 dec, c = raw_atomic64_read(v);
37f8173d
PZ
4681
4682 do {
4683 dec = c - 1;
4684 if (unlikely(dec < 0))
4685 break;
9257959a 4686 } while (!raw_atomic64_try_cmpxchg(v, &c, dec));
37f8173d
PZ
4687
4688 return dec;
37f8173d 4689#endif
1d78814d 4690}
37f8173d
PZ
4691
4692#endif /* _LINUX_ATOMIC_FALLBACK_H */
6dfee110 4693// 14850c0b0db20c62fdc78ccd1d42b98b88d76331