1 // SPDX-License-Identifier: GPL-2.0
3 // Generated by scripts/atomic/gen-atomic-fallback.sh
4 // DO NOT MODIFY THIS FILE DIRECTLY
6 #ifndef _LINUX_ATOMIC_FALLBACK_H
7 #define _LINUX_ATOMIC_FALLBACK_H
9 #include <linux/compiler.h>
11 #if defined(arch_xchg)
12 #define raw_xchg arch_xchg
13 #elif defined(arch_xchg_relaxed)
14 #define raw_xchg(...) \
15 __atomic_op_fence(arch_xchg, __VA_ARGS__)
17 extern void raw_xchg_not_implemented(void);
18 #define raw_xchg(...) raw_xchg_not_implemented()
21 #if defined(arch_xchg_acquire)
22 #define raw_xchg_acquire arch_xchg_acquire
23 #elif defined(arch_xchg_relaxed)
24 #define raw_xchg_acquire(...) \
25 __atomic_op_acquire(arch_xchg, __VA_ARGS__)
26 #elif defined(arch_xchg)
27 #define raw_xchg_acquire arch_xchg
29 extern void raw_xchg_acquire_not_implemented(void);
30 #define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
33 #if defined(arch_xchg_release)
34 #define raw_xchg_release arch_xchg_release
35 #elif defined(arch_xchg_relaxed)
36 #define raw_xchg_release(...) \
37 __atomic_op_release(arch_xchg, __VA_ARGS__)
38 #elif defined(arch_xchg)
39 #define raw_xchg_release arch_xchg
41 extern void raw_xchg_release_not_implemented(void);
42 #define raw_xchg_release(...) raw_xchg_release_not_implemented()
45 #if defined(arch_xchg_relaxed)
46 #define raw_xchg_relaxed arch_xchg_relaxed
47 #elif defined(arch_xchg)
48 #define raw_xchg_relaxed arch_xchg
50 extern void raw_xchg_relaxed_not_implemented(void);
51 #define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
54 #if defined(arch_cmpxchg)
55 #define raw_cmpxchg arch_cmpxchg
56 #elif defined(arch_cmpxchg_relaxed)
57 #define raw_cmpxchg(...) \
58 __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
60 extern void raw_cmpxchg_not_implemented(void);
61 #define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
64 #if defined(arch_cmpxchg_acquire)
65 #define raw_cmpxchg_acquire arch_cmpxchg_acquire
66 #elif defined(arch_cmpxchg_relaxed)
67 #define raw_cmpxchg_acquire(...) \
68 __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
69 #elif defined(arch_cmpxchg)
70 #define raw_cmpxchg_acquire arch_cmpxchg
72 extern void raw_cmpxchg_acquire_not_implemented(void);
73 #define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
76 #if defined(arch_cmpxchg_release)
77 #define raw_cmpxchg_release arch_cmpxchg_release
78 #elif defined(arch_cmpxchg_relaxed)
79 #define raw_cmpxchg_release(...) \
80 __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
81 #elif defined(arch_cmpxchg)
82 #define raw_cmpxchg_release arch_cmpxchg
84 extern void raw_cmpxchg_release_not_implemented(void);
85 #define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
88 #if defined(arch_cmpxchg_relaxed)
89 #define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
90 #elif defined(arch_cmpxchg)
91 #define raw_cmpxchg_relaxed arch_cmpxchg
93 extern void raw_cmpxchg_relaxed_not_implemented(void);
94 #define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
97 #if defined(arch_cmpxchg64)
98 #define raw_cmpxchg64 arch_cmpxchg64
99 #elif defined(arch_cmpxchg64_relaxed)
100 #define raw_cmpxchg64(...) \
101 __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
103 extern void raw_cmpxchg64_not_implemented(void);
104 #define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
107 #if defined(arch_cmpxchg64_acquire)
108 #define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
109 #elif defined(arch_cmpxchg64_relaxed)
110 #define raw_cmpxchg64_acquire(...) \
111 __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
112 #elif defined(arch_cmpxchg64)
113 #define raw_cmpxchg64_acquire arch_cmpxchg64
115 extern void raw_cmpxchg64_acquire_not_implemented(void);
116 #define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
119 #if defined(arch_cmpxchg64_release)
120 #define raw_cmpxchg64_release arch_cmpxchg64_release
121 #elif defined(arch_cmpxchg64_relaxed)
122 #define raw_cmpxchg64_release(...) \
123 __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
124 #elif defined(arch_cmpxchg64)
125 #define raw_cmpxchg64_release arch_cmpxchg64
127 extern void raw_cmpxchg64_release_not_implemented(void);
128 #define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
131 #if defined(arch_cmpxchg64_relaxed)
132 #define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
133 #elif defined(arch_cmpxchg64)
134 #define raw_cmpxchg64_relaxed arch_cmpxchg64
136 extern void raw_cmpxchg64_relaxed_not_implemented(void);
137 #define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
140 #if defined(arch_cmpxchg128)
141 #define raw_cmpxchg128 arch_cmpxchg128
142 #elif defined(arch_cmpxchg128_relaxed)
143 #define raw_cmpxchg128(...) \
144 __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
146 extern void raw_cmpxchg128_not_implemented(void);
147 #define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
150 #if defined(arch_cmpxchg128_acquire)
151 #define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
152 #elif defined(arch_cmpxchg128_relaxed)
153 #define raw_cmpxchg128_acquire(...) \
154 __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
155 #elif defined(arch_cmpxchg128)
156 #define raw_cmpxchg128_acquire arch_cmpxchg128
158 extern void raw_cmpxchg128_acquire_not_implemented(void);
159 #define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
162 #if defined(arch_cmpxchg128_release)
163 #define raw_cmpxchg128_release arch_cmpxchg128_release
164 #elif defined(arch_cmpxchg128_relaxed)
165 #define raw_cmpxchg128_release(...) \
166 __atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
167 #elif defined(arch_cmpxchg128)
168 #define raw_cmpxchg128_release arch_cmpxchg128
170 extern void raw_cmpxchg128_release_not_implemented(void);
171 #define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
174 #if defined(arch_cmpxchg128_relaxed)
175 #define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
176 #elif defined(arch_cmpxchg128)
177 #define raw_cmpxchg128_relaxed arch_cmpxchg128
179 extern void raw_cmpxchg128_relaxed_not_implemented(void);
180 #define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
183 #if defined(arch_try_cmpxchg)
184 #define raw_try_cmpxchg arch_try_cmpxchg
185 #elif defined(arch_try_cmpxchg_relaxed)
186 #define raw_try_cmpxchg(...) \
187 __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
189 #define raw_try_cmpxchg(_ptr, _oldp, _new) \
191 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
192 ___r = raw_cmpxchg((_ptr), ___o, (_new)); \
193 if (unlikely(___r != ___o)) \
195 likely(___r == ___o); \
199 #if defined(arch_try_cmpxchg_acquire)
200 #define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
201 #elif defined(arch_try_cmpxchg_relaxed)
202 #define raw_try_cmpxchg_acquire(...) \
203 __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
204 #elif defined(arch_try_cmpxchg)
205 #define raw_try_cmpxchg_acquire arch_try_cmpxchg
207 #define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
209 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
210 ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
211 if (unlikely(___r != ___o)) \
213 likely(___r == ___o); \
217 #if defined(arch_try_cmpxchg_release)
218 #define raw_try_cmpxchg_release arch_try_cmpxchg_release
219 #elif defined(arch_try_cmpxchg_relaxed)
220 #define raw_try_cmpxchg_release(...) \
221 __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
222 #elif defined(arch_try_cmpxchg)
223 #define raw_try_cmpxchg_release arch_try_cmpxchg
225 #define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
227 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
228 ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
229 if (unlikely(___r != ___o)) \
231 likely(___r == ___o); \
235 #if defined(arch_try_cmpxchg_relaxed)
236 #define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
237 #elif defined(arch_try_cmpxchg)
238 #define raw_try_cmpxchg_relaxed arch_try_cmpxchg
240 #define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
242 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
243 ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
244 if (unlikely(___r != ___o)) \
246 likely(___r == ___o); \
250 #if defined(arch_try_cmpxchg64)
251 #define raw_try_cmpxchg64 arch_try_cmpxchg64
252 #elif defined(arch_try_cmpxchg64_relaxed)
253 #define raw_try_cmpxchg64(...) \
254 __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
256 #define raw_try_cmpxchg64(_ptr, _oldp, _new) \
258 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
259 ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
260 if (unlikely(___r != ___o)) \
262 likely(___r == ___o); \
266 #if defined(arch_try_cmpxchg64_acquire)
267 #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
268 #elif defined(arch_try_cmpxchg64_relaxed)
269 #define raw_try_cmpxchg64_acquire(...) \
270 __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
271 #elif defined(arch_try_cmpxchg64)
272 #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
274 #define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
276 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
277 ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
278 if (unlikely(___r != ___o)) \
280 likely(___r == ___o); \
284 #if defined(arch_try_cmpxchg64_release)
285 #define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
286 #elif defined(arch_try_cmpxchg64_relaxed)
287 #define raw_try_cmpxchg64_release(...) \
288 __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
289 #elif defined(arch_try_cmpxchg64)
290 #define raw_try_cmpxchg64_release arch_try_cmpxchg64
292 #define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
294 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
295 ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
296 if (unlikely(___r != ___o)) \
298 likely(___r == ___o); \
302 #if defined(arch_try_cmpxchg64_relaxed)
303 #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
304 #elif defined(arch_try_cmpxchg64)
305 #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
307 #define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
309 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
310 ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
311 if (unlikely(___r != ___o)) \
313 likely(___r == ___o); \
317 #if defined(arch_try_cmpxchg128)
318 #define raw_try_cmpxchg128 arch_try_cmpxchg128
319 #elif defined(arch_try_cmpxchg128_relaxed)
320 #define raw_try_cmpxchg128(...) \
321 __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
323 #define raw_try_cmpxchg128(_ptr, _oldp, _new) \
325 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
326 ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
327 if (unlikely(___r != ___o)) \
329 likely(___r == ___o); \
333 #if defined(arch_try_cmpxchg128_acquire)
334 #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
335 #elif defined(arch_try_cmpxchg128_relaxed)
336 #define raw_try_cmpxchg128_acquire(...) \
337 __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
338 #elif defined(arch_try_cmpxchg128)
339 #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
341 #define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
343 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
344 ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
345 if (unlikely(___r != ___o)) \
347 likely(___r == ___o); \
351 #if defined(arch_try_cmpxchg128_release)
352 #define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
353 #elif defined(arch_try_cmpxchg128_relaxed)
354 #define raw_try_cmpxchg128_release(...) \
355 __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
356 #elif defined(arch_try_cmpxchg128)
357 #define raw_try_cmpxchg128_release arch_try_cmpxchg128
359 #define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
361 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
362 ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
363 if (unlikely(___r != ___o)) \
365 likely(___r == ___o); \
369 #if defined(arch_try_cmpxchg128_relaxed)
370 #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
371 #elif defined(arch_try_cmpxchg128)
372 #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
374 #define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
376 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
377 ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
378 if (unlikely(___r != ___o)) \
380 likely(___r == ___o); \
384 #define raw_cmpxchg_local arch_cmpxchg_local
386 #ifdef arch_try_cmpxchg_local
387 #define raw_try_cmpxchg_local arch_try_cmpxchg_local
389 #define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
391 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
392 ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
393 if (unlikely(___r != ___o)) \
395 likely(___r == ___o); \
399 #define raw_cmpxchg64_local arch_cmpxchg64_local
401 #ifdef arch_try_cmpxchg64_local
402 #define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
404 #define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
406 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
407 ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
408 if (unlikely(___r != ___o)) \
410 likely(___r == ___o); \
414 #define raw_cmpxchg128_local arch_cmpxchg128_local
416 #ifdef arch_try_cmpxchg128_local
417 #define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
419 #define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
421 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
422 ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
423 if (unlikely(___r != ___o)) \
425 likely(___r == ___o); \
429 #define raw_sync_cmpxchg arch_sync_cmpxchg
431 #ifdef arch_sync_try_cmpxchg
432 #define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
434 #define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
436 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
437 ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
438 if (unlikely(___r != ___o)) \
440 likely(___r == ___o); \
445 * raw_atomic_read() - atomic load with relaxed ordering
446 * @v: pointer to atomic_t
448 * Atomically loads the value of @v with relaxed ordering.
450 * Safe to use in noinstr code; prefer atomic_read() elsewhere.
452 * Return: The value loaded from @v.
454 static __always_inline int
455 raw_atomic_read(const atomic_t *v)
457 return arch_atomic_read(v);
461 * raw_atomic_read_acquire() - atomic load with acquire ordering
462 * @v: pointer to atomic_t
464 * Atomically loads the value of @v with acquire ordering.
466 * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
468 * Return: The value loaded from @v.
470 static __always_inline int
471 raw_atomic_read_acquire(const atomic_t *v)
473 #if defined(arch_atomic_read_acquire)
474 return arch_atomic_read_acquire(v);
478 if (__native_word(atomic_t)) {
479 ret = smp_load_acquire(&(v)->counter);
481 ret = raw_atomic_read(v);
482 __atomic_acquire_fence();
490 * raw_atomic_set() - atomic set with relaxed ordering
491 * @v: pointer to atomic_t
492 * @i: int value to assign
494 * Atomically sets @v to @i with relaxed ordering.
496 * Safe to use in noinstr code; prefer atomic_set() elsewhere.
500 static __always_inline void
501 raw_atomic_set(atomic_t *v, int i)
503 arch_atomic_set(v, i);
507 * raw_atomic_set_release() - atomic set with release ordering
508 * @v: pointer to atomic_t
509 * @i: int value to assign
511 * Atomically sets @v to @i with release ordering.
513 * Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
517 static __always_inline void
518 raw_atomic_set_release(atomic_t *v, int i)
520 #if defined(arch_atomic_set_release)
521 arch_atomic_set_release(v, i);
523 if (__native_word(atomic_t)) {
524 smp_store_release(&(v)->counter, i);
526 __atomic_release_fence();
527 raw_atomic_set(v, i);
533 * raw_atomic_add() - atomic add with relaxed ordering
534 * @i: int value to add
535 * @v: pointer to atomic_t
537 * Atomically updates @v to (@v + @i) with relaxed ordering.
539 * Safe to use in noinstr code; prefer atomic_add() elsewhere.
543 static __always_inline void
544 raw_atomic_add(int i, atomic_t *v)
546 arch_atomic_add(i, v);
550 * raw_atomic_add_return() - atomic add with full ordering
551 * @i: int value to add
552 * @v: pointer to atomic_t
554 * Atomically updates @v to (@v + @i) with full ordering.
556 * Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
558 * Return: The updated value of @v.
560 static __always_inline int
561 raw_atomic_add_return(int i, atomic_t *v)
563 #if defined(arch_atomic_add_return)
564 return arch_atomic_add_return(i, v);
565 #elif defined(arch_atomic_add_return_relaxed)
567 __atomic_pre_full_fence();
568 ret = arch_atomic_add_return_relaxed(i, v);
569 __atomic_post_full_fence();
572 #error "Unable to define raw_atomic_add_return"
577 * raw_atomic_add_return_acquire() - atomic add with acquire ordering
578 * @i: int value to add
579 * @v: pointer to atomic_t
581 * Atomically updates @v to (@v + @i) with acquire ordering.
583 * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
585 * Return: The updated value of @v.
587 static __always_inline int
588 raw_atomic_add_return_acquire(int i, atomic_t *v)
590 #if defined(arch_atomic_add_return_acquire)
591 return arch_atomic_add_return_acquire(i, v);
592 #elif defined(arch_atomic_add_return_relaxed)
593 int ret = arch_atomic_add_return_relaxed(i, v);
594 __atomic_acquire_fence();
596 #elif defined(arch_atomic_add_return)
597 return arch_atomic_add_return(i, v);
599 #error "Unable to define raw_atomic_add_return_acquire"
604 * raw_atomic_add_return_release() - atomic add with release ordering
605 * @i: int value to add
606 * @v: pointer to atomic_t
608 * Atomically updates @v to (@v + @i) with release ordering.
610 * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
612 * Return: The updated value of @v.
614 static __always_inline int
615 raw_atomic_add_return_release(int i, atomic_t *v)
617 #if defined(arch_atomic_add_return_release)
618 return arch_atomic_add_return_release(i, v);
619 #elif defined(arch_atomic_add_return_relaxed)
620 __atomic_release_fence();
621 return arch_atomic_add_return_relaxed(i, v);
622 #elif defined(arch_atomic_add_return)
623 return arch_atomic_add_return(i, v);
625 #error "Unable to define raw_atomic_add_return_release"
630 * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
631 * @i: int value to add
632 * @v: pointer to atomic_t
634 * Atomically updates @v to (@v + @i) with relaxed ordering.
636 * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
638 * Return: The updated value of @v.
640 static __always_inline int
641 raw_atomic_add_return_relaxed(int i, atomic_t *v)
643 #if defined(arch_atomic_add_return_relaxed)
644 return arch_atomic_add_return_relaxed(i, v);
645 #elif defined(arch_atomic_add_return)
646 return arch_atomic_add_return(i, v);
648 #error "Unable to define raw_atomic_add_return_relaxed"
653 * raw_atomic_fetch_add() - atomic add with full ordering
654 * @i: int value to add
655 * @v: pointer to atomic_t
657 * Atomically updates @v to (@v + @i) with full ordering.
659 * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
661 * Return: The original value of @v.
663 static __always_inline int
664 raw_atomic_fetch_add(int i, atomic_t *v)
666 #if defined(arch_atomic_fetch_add)
667 return arch_atomic_fetch_add(i, v);
668 #elif defined(arch_atomic_fetch_add_relaxed)
670 __atomic_pre_full_fence();
671 ret = arch_atomic_fetch_add_relaxed(i, v);
672 __atomic_post_full_fence();
675 #error "Unable to define raw_atomic_fetch_add"
680 * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
681 * @i: int value to add
682 * @v: pointer to atomic_t
684 * Atomically updates @v to (@v + @i) with acquire ordering.
686 * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
688 * Return: The original value of @v.
690 static __always_inline int
691 raw_atomic_fetch_add_acquire(int i, atomic_t *v)
693 #if defined(arch_atomic_fetch_add_acquire)
694 return arch_atomic_fetch_add_acquire(i, v);
695 #elif defined(arch_atomic_fetch_add_relaxed)
696 int ret = arch_atomic_fetch_add_relaxed(i, v);
697 __atomic_acquire_fence();
699 #elif defined(arch_atomic_fetch_add)
700 return arch_atomic_fetch_add(i, v);
702 #error "Unable to define raw_atomic_fetch_add_acquire"
707 * raw_atomic_fetch_add_release() - atomic add with release ordering
708 * @i: int value to add
709 * @v: pointer to atomic_t
711 * Atomically updates @v to (@v + @i) with release ordering.
713 * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
715 * Return: The original value of @v.
717 static __always_inline int
718 raw_atomic_fetch_add_release(int i, atomic_t *v)
720 #if defined(arch_atomic_fetch_add_release)
721 return arch_atomic_fetch_add_release(i, v);
722 #elif defined(arch_atomic_fetch_add_relaxed)
723 __atomic_release_fence();
724 return arch_atomic_fetch_add_relaxed(i, v);
725 #elif defined(arch_atomic_fetch_add)
726 return arch_atomic_fetch_add(i, v);
728 #error "Unable to define raw_atomic_fetch_add_release"
733 * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
734 * @i: int value to add
735 * @v: pointer to atomic_t
737 * Atomically updates @v to (@v + @i) with relaxed ordering.
739 * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
741 * Return: The original value of @v.
743 static __always_inline int
744 raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
746 #if defined(arch_atomic_fetch_add_relaxed)
747 return arch_atomic_fetch_add_relaxed(i, v);
748 #elif defined(arch_atomic_fetch_add)
749 return arch_atomic_fetch_add(i, v);
751 #error "Unable to define raw_atomic_fetch_add_relaxed"
756 * raw_atomic_sub() - atomic subtract with relaxed ordering
757 * @i: int value to subtract
758 * @v: pointer to atomic_t
760 * Atomically updates @v to (@v - @i) with relaxed ordering.
762 * Safe to use in noinstr code; prefer atomic_sub() elsewhere.
766 static __always_inline void
767 raw_atomic_sub(int i, atomic_t *v)
769 arch_atomic_sub(i, v);
773 * raw_atomic_sub_return() - atomic subtract with full ordering
774 * @i: int value to subtract
775 * @v: pointer to atomic_t
777 * Atomically updates @v to (@v - @i) with full ordering.
779 * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
781 * Return: The updated value of @v.
783 static __always_inline int
784 raw_atomic_sub_return(int i, atomic_t *v)
786 #if defined(arch_atomic_sub_return)
787 return arch_atomic_sub_return(i, v);
788 #elif defined(arch_atomic_sub_return_relaxed)
790 __atomic_pre_full_fence();
791 ret = arch_atomic_sub_return_relaxed(i, v);
792 __atomic_post_full_fence();
795 #error "Unable to define raw_atomic_sub_return"
800 * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
801 * @i: int value to subtract
802 * @v: pointer to atomic_t
804 * Atomically updates @v to (@v - @i) with acquire ordering.
806 * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
808 * Return: The updated value of @v.
810 static __always_inline int
811 raw_atomic_sub_return_acquire(int i, atomic_t *v)
813 #if defined(arch_atomic_sub_return_acquire)
814 return arch_atomic_sub_return_acquire(i, v);
815 #elif defined(arch_atomic_sub_return_relaxed)
816 int ret = arch_atomic_sub_return_relaxed(i, v);
817 __atomic_acquire_fence();
819 #elif defined(arch_atomic_sub_return)
820 return arch_atomic_sub_return(i, v);
822 #error "Unable to define raw_atomic_sub_return_acquire"
827 * raw_atomic_sub_return_release() - atomic subtract with release ordering
828 * @i: int value to subtract
829 * @v: pointer to atomic_t
831 * Atomically updates @v to (@v - @i) with release ordering.
833 * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
835 * Return: The updated value of @v.
837 static __always_inline int
838 raw_atomic_sub_return_release(int i, atomic_t *v)
840 #if defined(arch_atomic_sub_return_release)
841 return arch_atomic_sub_return_release(i, v);
842 #elif defined(arch_atomic_sub_return_relaxed)
843 __atomic_release_fence();
844 return arch_atomic_sub_return_relaxed(i, v);
845 #elif defined(arch_atomic_sub_return)
846 return arch_atomic_sub_return(i, v);
848 #error "Unable to define raw_atomic_sub_return_release"
853 * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
854 * @i: int value to subtract
855 * @v: pointer to atomic_t
857 * Atomically updates @v to (@v - @i) with relaxed ordering.
859 * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
861 * Return: The updated value of @v.
863 static __always_inline int
864 raw_atomic_sub_return_relaxed(int i, atomic_t *v)
866 #if defined(arch_atomic_sub_return_relaxed)
867 return arch_atomic_sub_return_relaxed(i, v);
868 #elif defined(arch_atomic_sub_return)
869 return arch_atomic_sub_return(i, v);
871 #error "Unable to define raw_atomic_sub_return_relaxed"
876 * raw_atomic_fetch_sub() - atomic subtract with full ordering
877 * @i: int value to subtract
878 * @v: pointer to atomic_t
880 * Atomically updates @v to (@v - @i) with full ordering.
882 * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
884 * Return: The original value of @v.
886 static __always_inline int
887 raw_atomic_fetch_sub(int i, atomic_t *v)
889 #if defined(arch_atomic_fetch_sub)
890 return arch_atomic_fetch_sub(i, v);
891 #elif defined(arch_atomic_fetch_sub_relaxed)
893 __atomic_pre_full_fence();
894 ret = arch_atomic_fetch_sub_relaxed(i, v);
895 __atomic_post_full_fence();
898 #error "Unable to define raw_atomic_fetch_sub"
903 * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
904 * @i: int value to subtract
905 * @v: pointer to atomic_t
907 * Atomically updates @v to (@v - @i) with acquire ordering.
909 * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
911 * Return: The original value of @v.
913 static __always_inline int
914 raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
916 #if defined(arch_atomic_fetch_sub_acquire)
917 return arch_atomic_fetch_sub_acquire(i, v);
918 #elif defined(arch_atomic_fetch_sub_relaxed)
919 int ret = arch_atomic_fetch_sub_relaxed(i, v);
920 __atomic_acquire_fence();
922 #elif defined(arch_atomic_fetch_sub)
923 return arch_atomic_fetch_sub(i, v);
925 #error "Unable to define raw_atomic_fetch_sub_acquire"
930 * raw_atomic_fetch_sub_release() - atomic subtract with release ordering
931 * @i: int value to subtract
932 * @v: pointer to atomic_t
934 * Atomically updates @v to (@v - @i) with release ordering.
936 * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
938 * Return: The original value of @v.
940 static __always_inline int
941 raw_atomic_fetch_sub_release(int i, atomic_t *v)
943 #if defined(arch_atomic_fetch_sub_release)
944 return arch_atomic_fetch_sub_release(i, v);
945 #elif defined(arch_atomic_fetch_sub_relaxed)
946 __atomic_release_fence();
947 return arch_atomic_fetch_sub_relaxed(i, v);
948 #elif defined(arch_atomic_fetch_sub)
949 return arch_atomic_fetch_sub(i, v);
951 #error "Unable to define raw_atomic_fetch_sub_release"
956 * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
957 * @i: int value to subtract
958 * @v: pointer to atomic_t
960 * Atomically updates @v to (@v - @i) with relaxed ordering.
962 * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
964 * Return: The original value of @v.
966 static __always_inline int
967 raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
969 #if defined(arch_atomic_fetch_sub_relaxed)
970 return arch_atomic_fetch_sub_relaxed(i, v);
971 #elif defined(arch_atomic_fetch_sub)
972 return arch_atomic_fetch_sub(i, v);
974 #error "Unable to define raw_atomic_fetch_sub_relaxed"
979 * raw_atomic_inc() - atomic increment with relaxed ordering
980 * @v: pointer to atomic_t
982 * Atomically updates @v to (@v + 1) with relaxed ordering.
984 * Safe to use in noinstr code; prefer atomic_inc() elsewhere.
988 static __always_inline void
989 raw_atomic_inc(atomic_t *v)
991 #if defined(arch_atomic_inc)
994 raw_atomic_add(1, v);
999 * raw_atomic_inc_return() - atomic increment with full ordering
1000 * @v: pointer to atomic_t
1002 * Atomically updates @v to (@v + 1) with full ordering.
1004 * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
1006 * Return: The updated value of @v.
1008 static __always_inline int
1009 raw_atomic_inc_return(atomic_t *v)
1011 #if defined(arch_atomic_inc_return)
1012 return arch_atomic_inc_return(v);
1013 #elif defined(arch_atomic_inc_return_relaxed)
1015 __atomic_pre_full_fence();
1016 ret = arch_atomic_inc_return_relaxed(v);
1017 __atomic_post_full_fence();
1020 return raw_atomic_add_return(1, v);
1025 * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
1026 * @v: pointer to atomic_t
1028 * Atomically updates @v to (@v + 1) with acquire ordering.
1030 * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
1032 * Return: The updated value of @v.
1034 static __always_inline int
1035 raw_atomic_inc_return_acquire(atomic_t *v)
1037 #if defined(arch_atomic_inc_return_acquire)
1038 return arch_atomic_inc_return_acquire(v);
1039 #elif defined(arch_atomic_inc_return_relaxed)
1040 int ret = arch_atomic_inc_return_relaxed(v);
1041 __atomic_acquire_fence();
1043 #elif defined(arch_atomic_inc_return)
1044 return arch_atomic_inc_return(v);
1046 return raw_atomic_add_return_acquire(1, v);
1051 * raw_atomic_inc_return_release() - atomic increment with release ordering
1052 * @v: pointer to atomic_t
1054 * Atomically updates @v to (@v + 1) with release ordering.
1056 * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
1058 * Return: The updated value of @v.
1060 static __always_inline int
1061 raw_atomic_inc_return_release(atomic_t *v)
1063 #if defined(arch_atomic_inc_return_release)
1064 return arch_atomic_inc_return_release(v);
1065 #elif defined(arch_atomic_inc_return_relaxed)
1066 __atomic_release_fence();
1067 return arch_atomic_inc_return_relaxed(v);
1068 #elif defined(arch_atomic_inc_return)
1069 return arch_atomic_inc_return(v);
1071 return raw_atomic_add_return_release(1, v);
1076 * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
1077 * @v: pointer to atomic_t
1079 * Atomically updates @v to (@v + 1) with relaxed ordering.
1081 * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
1083 * Return: The updated value of @v.
1085 static __always_inline int
1086 raw_atomic_inc_return_relaxed(atomic_t *v)
1088 #if defined(arch_atomic_inc_return_relaxed)
1089 return arch_atomic_inc_return_relaxed(v);
1090 #elif defined(arch_atomic_inc_return)
1091 return arch_atomic_inc_return(v);
1093 return raw_atomic_add_return_relaxed(1, v);
1098 * raw_atomic_fetch_inc() - atomic increment with full ordering
1099 * @v: pointer to atomic_t
1101 * Atomically updates @v to (@v + 1) with full ordering.
1103 * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
1105 * Return: The original value of @v.
1107 static __always_inline int
1108 raw_atomic_fetch_inc(atomic_t *v)
1110 #if defined(arch_atomic_fetch_inc)
1111 return arch_atomic_fetch_inc(v);
1112 #elif defined(arch_atomic_fetch_inc_relaxed)
1114 __atomic_pre_full_fence();
1115 ret = arch_atomic_fetch_inc_relaxed(v);
1116 __atomic_post_full_fence();
1119 return raw_atomic_fetch_add(1, v);
1124 * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
1125 * @v: pointer to atomic_t
1127 * Atomically updates @v to (@v + 1) with acquire ordering.
1129 * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
1131 * Return: The original value of @v.
1133 static __always_inline int
1134 raw_atomic_fetch_inc_acquire(atomic_t *v)
1136 #if defined(arch_atomic_fetch_inc_acquire)
1137 return arch_atomic_fetch_inc_acquire(v);
1138 #elif defined(arch_atomic_fetch_inc_relaxed)
1139 int ret = arch_atomic_fetch_inc_relaxed(v);
1140 __atomic_acquire_fence();
1142 #elif defined(arch_atomic_fetch_inc)
1143 return arch_atomic_fetch_inc(v);
1145 return raw_atomic_fetch_add_acquire(1, v);
1150 * raw_atomic_fetch_inc_release() - atomic increment with release ordering
1151 * @v: pointer to atomic_t
1153 * Atomically updates @v to (@v + 1) with release ordering.
1155 * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
1157 * Return: The original value of @v.
1159 static __always_inline int
1160 raw_atomic_fetch_inc_release(atomic_t *v)
1162 #if defined(arch_atomic_fetch_inc_release)
1163 return arch_atomic_fetch_inc_release(v);
1164 #elif defined(arch_atomic_fetch_inc_relaxed)
1165 __atomic_release_fence();
1166 return arch_atomic_fetch_inc_relaxed(v);
1167 #elif defined(arch_atomic_fetch_inc)
1168 return arch_atomic_fetch_inc(v);
1170 return raw_atomic_fetch_add_release(1, v);
1175 * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
1176 * @v: pointer to atomic_t
1178 * Atomically updates @v to (@v + 1) with relaxed ordering.
1180 * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
1182 * Return: The original value of @v.
1184 static __always_inline int
1185 raw_atomic_fetch_inc_relaxed(atomic_t *v)
1187 #if defined(arch_atomic_fetch_inc_relaxed)
1188 return arch_atomic_fetch_inc_relaxed(v);
1189 #elif defined(arch_atomic_fetch_inc)
1190 return arch_atomic_fetch_inc(v);
1192 return raw_atomic_fetch_add_relaxed(1, v);
1197 * raw_atomic_dec() - atomic decrement with relaxed ordering
1198 * @v: pointer to atomic_t
1200 * Atomically updates @v to (@v - 1) with relaxed ordering.
1202 * Safe to use in noinstr code; prefer atomic_dec() elsewhere.
1206 static __always_inline void
1207 raw_atomic_dec(atomic_t *v)
1209 #if defined(arch_atomic_dec)
1212 raw_atomic_sub(1, v);
1217 * raw_atomic_dec_return() - atomic decrement with full ordering
1218 * @v: pointer to atomic_t
1220 * Atomically updates @v to (@v - 1) with full ordering.
1222 * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
1224 * Return: The updated value of @v.
1226 static __always_inline int
1227 raw_atomic_dec_return(atomic_t *v)
1229 #if defined(arch_atomic_dec_return)
1230 return arch_atomic_dec_return(v);
1231 #elif defined(arch_atomic_dec_return_relaxed)
1233 __atomic_pre_full_fence();
1234 ret = arch_atomic_dec_return_relaxed(v);
1235 __atomic_post_full_fence();
1238 return raw_atomic_sub_return(1, v);
1243 * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
1244 * @v: pointer to atomic_t
1246 * Atomically updates @v to (@v - 1) with acquire ordering.
1248 * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
1250 * Return: The updated value of @v.
1252 static __always_inline int
1253 raw_atomic_dec_return_acquire(atomic_t *v)
1255 #if defined(arch_atomic_dec_return_acquire)
1256 return arch_atomic_dec_return_acquire(v);
1257 #elif defined(arch_atomic_dec_return_relaxed)
1258 int ret = arch_atomic_dec_return_relaxed(v);
1259 __atomic_acquire_fence();
1261 #elif defined(arch_atomic_dec_return)
1262 return arch_atomic_dec_return(v);
1264 return raw_atomic_sub_return_acquire(1, v);
1269 * raw_atomic_dec_return_release() - atomic decrement with release ordering
1270 * @v: pointer to atomic_t
1272 * Atomically updates @v to (@v - 1) with release ordering.
1274 * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
1276 * Return: The updated value of @v.
1278 static __always_inline int
1279 raw_atomic_dec_return_release(atomic_t *v)
1281 #if defined(arch_atomic_dec_return_release)
1282 return arch_atomic_dec_return_release(v);
1283 #elif defined(arch_atomic_dec_return_relaxed)
1284 __atomic_release_fence();
1285 return arch_atomic_dec_return_relaxed(v);
1286 #elif defined(arch_atomic_dec_return)
1287 return arch_atomic_dec_return(v);
1289 return raw_atomic_sub_return_release(1, v);
1294 * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
1295 * @v: pointer to atomic_t
1297 * Atomically updates @v to (@v - 1) with relaxed ordering.
1299 * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
1301 * Return: The updated value of @v.
1303 static __always_inline int
1304 raw_atomic_dec_return_relaxed(atomic_t *v)
1306 #if defined(arch_atomic_dec_return_relaxed)
1307 return arch_atomic_dec_return_relaxed(v);
1308 #elif defined(arch_atomic_dec_return)
1309 return arch_atomic_dec_return(v);
1311 return raw_atomic_sub_return_relaxed(1, v);
1316 * raw_atomic_fetch_dec() - atomic decrement with full ordering
1317 * @v: pointer to atomic_t
1319 * Atomically updates @v to (@v - 1) with full ordering.
1321 * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
1323 * Return: The original value of @v.
1325 static __always_inline int
1326 raw_atomic_fetch_dec(atomic_t *v)
1328 #if defined(arch_atomic_fetch_dec)
1329 return arch_atomic_fetch_dec(v);
1330 #elif defined(arch_atomic_fetch_dec_relaxed)
1332 __atomic_pre_full_fence();
1333 ret = arch_atomic_fetch_dec_relaxed(v);
1334 __atomic_post_full_fence();
1337 return raw_atomic_fetch_sub(1, v);
1342 * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
1343 * @v: pointer to atomic_t
1345 * Atomically updates @v to (@v - 1) with acquire ordering.
1347 * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
1349 * Return: The original value of @v.
1351 static __always_inline int
1352 raw_atomic_fetch_dec_acquire(atomic_t *v)
1354 #if defined(arch_atomic_fetch_dec_acquire)
1355 return arch_atomic_fetch_dec_acquire(v);
1356 #elif defined(arch_atomic_fetch_dec_relaxed)
1357 int ret = arch_atomic_fetch_dec_relaxed(v);
1358 __atomic_acquire_fence();
1360 #elif defined(arch_atomic_fetch_dec)
1361 return arch_atomic_fetch_dec(v);
1363 return raw_atomic_fetch_sub_acquire(1, v);
1368 * raw_atomic_fetch_dec_release() - atomic decrement with release ordering
1369 * @v: pointer to atomic_t
1371 * Atomically updates @v to (@v - 1) with release ordering.
1373 * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
1375 * Return: The original value of @v.
1377 static __always_inline int
1378 raw_atomic_fetch_dec_release(atomic_t *v)
1380 #if defined(arch_atomic_fetch_dec_release)
1381 return arch_atomic_fetch_dec_release(v);
1382 #elif defined(arch_atomic_fetch_dec_relaxed)
1383 __atomic_release_fence();
1384 return arch_atomic_fetch_dec_relaxed(v);
1385 #elif defined(arch_atomic_fetch_dec)
1386 return arch_atomic_fetch_dec(v);
1388 return raw_atomic_fetch_sub_release(1, v);
1393 * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
1394 * @v: pointer to atomic_t
1396 * Atomically updates @v to (@v - 1) with relaxed ordering.
1398 * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
1400 * Return: The original value of @v.
1402 static __always_inline int
1403 raw_atomic_fetch_dec_relaxed(atomic_t *v)
1405 #if defined(arch_atomic_fetch_dec_relaxed)
1406 return arch_atomic_fetch_dec_relaxed(v);
1407 #elif defined(arch_atomic_fetch_dec)
1408 return arch_atomic_fetch_dec(v);
1410 return raw_atomic_fetch_sub_relaxed(1, v);
1415 * raw_atomic_and() - atomic bitwise AND with relaxed ordering
1417 * @v: pointer to atomic_t
1419 * Atomically updates @v to (@v & @i) with relaxed ordering.
1421 * Safe to use in noinstr code; prefer atomic_and() elsewhere.
1425 static __always_inline void
1426 raw_atomic_and(int i, atomic_t *v)
1428 arch_atomic_and(i, v);
1432 * raw_atomic_fetch_and() - atomic bitwise AND with full ordering
1434 * @v: pointer to atomic_t
1436 * Atomically updates @v to (@v & @i) with full ordering.
1438 * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
1440 * Return: The original value of @v.
1442 static __always_inline int
1443 raw_atomic_fetch_and(int i, atomic_t *v)
1445 #if defined(arch_atomic_fetch_and)
1446 return arch_atomic_fetch_and(i, v);
1447 #elif defined(arch_atomic_fetch_and_relaxed)
1449 __atomic_pre_full_fence();
1450 ret = arch_atomic_fetch_and_relaxed(i, v);
1451 __atomic_post_full_fence();
1454 #error "Unable to define raw_atomic_fetch_and"
1459 * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
1461 * @v: pointer to atomic_t
1463 * Atomically updates @v to (@v & @i) with acquire ordering.
1465 * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
1467 * Return: The original value of @v.
1469 static __always_inline int
1470 raw_atomic_fetch_and_acquire(int i, atomic_t *v)
1472 #if defined(arch_atomic_fetch_and_acquire)
1473 return arch_atomic_fetch_and_acquire(i, v);
1474 #elif defined(arch_atomic_fetch_and_relaxed)
1475 int ret = arch_atomic_fetch_and_relaxed(i, v);
1476 __atomic_acquire_fence();
1478 #elif defined(arch_atomic_fetch_and)
1479 return arch_atomic_fetch_and(i, v);
1481 #error "Unable to define raw_atomic_fetch_and_acquire"
1486 * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
1488 * @v: pointer to atomic_t
1490 * Atomically updates @v to (@v & @i) with release ordering.
1492 * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
1494 * Return: The original value of @v.
1496 static __always_inline int
1497 raw_atomic_fetch_and_release(int i, atomic_t *v)
1499 #if defined(arch_atomic_fetch_and_release)
1500 return arch_atomic_fetch_and_release(i, v);
1501 #elif defined(arch_atomic_fetch_and_relaxed)
1502 __atomic_release_fence();
1503 return arch_atomic_fetch_and_relaxed(i, v);
1504 #elif defined(arch_atomic_fetch_and)
1505 return arch_atomic_fetch_and(i, v);
1507 #error "Unable to define raw_atomic_fetch_and_release"
1512 * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
1514 * @v: pointer to atomic_t
1516 * Atomically updates @v to (@v & @i) with relaxed ordering.
1518 * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
1520 * Return: The original value of @v.
1522 static __always_inline int
1523 raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
1525 #if defined(arch_atomic_fetch_and_relaxed)
1526 return arch_atomic_fetch_and_relaxed(i, v);
1527 #elif defined(arch_atomic_fetch_and)
1528 return arch_atomic_fetch_and(i, v);
1530 #error "Unable to define raw_atomic_fetch_and_relaxed"
1535 * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
1537 * @v: pointer to atomic_t
1539 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1541 * Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
1545 static __always_inline void
1546 raw_atomic_andnot(int i, atomic_t *v)
1548 #if defined(arch_atomic_andnot)
1549 arch_atomic_andnot(i, v);
1551 raw_atomic_and(~i, v);
1556 * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
1558 * @v: pointer to atomic_t
1560 * Atomically updates @v to (@v & ~@i) with full ordering.
1562 * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
1564 * Return: The original value of @v.
1566 static __always_inline int
1567 raw_atomic_fetch_andnot(int i, atomic_t *v)
1569 #if defined(arch_atomic_fetch_andnot)
1570 return arch_atomic_fetch_andnot(i, v);
1571 #elif defined(arch_atomic_fetch_andnot_relaxed)
1573 __atomic_pre_full_fence();
1574 ret = arch_atomic_fetch_andnot_relaxed(i, v);
1575 __atomic_post_full_fence();
1578 return raw_atomic_fetch_and(~i, v);
1583 * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
1585 * @v: pointer to atomic_t
1587 * Atomically updates @v to (@v & ~@i) with acquire ordering.
1589 * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
1591 * Return: The original value of @v.
1593 static __always_inline int
1594 raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
1596 #if defined(arch_atomic_fetch_andnot_acquire)
1597 return arch_atomic_fetch_andnot_acquire(i, v);
1598 #elif defined(arch_atomic_fetch_andnot_relaxed)
1599 int ret = arch_atomic_fetch_andnot_relaxed(i, v);
1600 __atomic_acquire_fence();
1602 #elif defined(arch_atomic_fetch_andnot)
1603 return arch_atomic_fetch_andnot(i, v);
1605 return raw_atomic_fetch_and_acquire(~i, v);
1610 * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
1612 * @v: pointer to atomic_t
1614 * Atomically updates @v to (@v & ~@i) with release ordering.
1616 * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
1618 * Return: The original value of @v.
1620 static __always_inline int
1621 raw_atomic_fetch_andnot_release(int i, atomic_t *v)
1623 #if defined(arch_atomic_fetch_andnot_release)
1624 return arch_atomic_fetch_andnot_release(i, v);
1625 #elif defined(arch_atomic_fetch_andnot_relaxed)
1626 __atomic_release_fence();
1627 return arch_atomic_fetch_andnot_relaxed(i, v);
1628 #elif defined(arch_atomic_fetch_andnot)
1629 return arch_atomic_fetch_andnot(i, v);
1631 return raw_atomic_fetch_and_release(~i, v);
1636 * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
1638 * @v: pointer to atomic_t
1640 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1642 * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
1644 * Return: The original value of @v.
1646 static __always_inline int
1647 raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
1649 #if defined(arch_atomic_fetch_andnot_relaxed)
1650 return arch_atomic_fetch_andnot_relaxed(i, v);
1651 #elif defined(arch_atomic_fetch_andnot)
1652 return arch_atomic_fetch_andnot(i, v);
1654 return raw_atomic_fetch_and_relaxed(~i, v);
1659 * raw_atomic_or() - atomic bitwise OR with relaxed ordering
1661 * @v: pointer to atomic_t
1663 * Atomically updates @v to (@v | @i) with relaxed ordering.
1665 * Safe to use in noinstr code; prefer atomic_or() elsewhere.
1669 static __always_inline void
1670 raw_atomic_or(int i, atomic_t *v)
1672 arch_atomic_or(i, v);
1676 * raw_atomic_fetch_or() - atomic bitwise OR with full ordering
1678 * @v: pointer to atomic_t
1680 * Atomically updates @v to (@v | @i) with full ordering.
1682 * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
1684 * Return: The original value of @v.
1686 static __always_inline int
1687 raw_atomic_fetch_or(int i, atomic_t *v)
1689 #if defined(arch_atomic_fetch_or)
1690 return arch_atomic_fetch_or(i, v);
1691 #elif defined(arch_atomic_fetch_or_relaxed)
1693 __atomic_pre_full_fence();
1694 ret = arch_atomic_fetch_or_relaxed(i, v);
1695 __atomic_post_full_fence();
1698 #error "Unable to define raw_atomic_fetch_or"
1703 * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
1705 * @v: pointer to atomic_t
1707 * Atomically updates @v to (@v | @i) with acquire ordering.
1709 * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
1711 * Return: The original value of @v.
1713 static __always_inline int
1714 raw_atomic_fetch_or_acquire(int i, atomic_t *v)
1716 #if defined(arch_atomic_fetch_or_acquire)
1717 return arch_atomic_fetch_or_acquire(i, v);
1718 #elif defined(arch_atomic_fetch_or_relaxed)
1719 int ret = arch_atomic_fetch_or_relaxed(i, v);
1720 __atomic_acquire_fence();
1722 #elif defined(arch_atomic_fetch_or)
1723 return arch_atomic_fetch_or(i, v);
1725 #error "Unable to define raw_atomic_fetch_or_acquire"
1730 * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
1732 * @v: pointer to atomic_t
1734 * Atomically updates @v to (@v | @i) with release ordering.
1736 * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
1738 * Return: The original value of @v.
1740 static __always_inline int
1741 raw_atomic_fetch_or_release(int i, atomic_t *v)
1743 #if defined(arch_atomic_fetch_or_release)
1744 return arch_atomic_fetch_or_release(i, v);
1745 #elif defined(arch_atomic_fetch_or_relaxed)
1746 __atomic_release_fence();
1747 return arch_atomic_fetch_or_relaxed(i, v);
1748 #elif defined(arch_atomic_fetch_or)
1749 return arch_atomic_fetch_or(i, v);
1751 #error "Unable to define raw_atomic_fetch_or_release"
1756 * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
1758 * @v: pointer to atomic_t
1760 * Atomically updates @v to (@v | @i) with relaxed ordering.
1762 * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
1764 * Return: The original value of @v.
1766 static __always_inline int
1767 raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
1769 #if defined(arch_atomic_fetch_or_relaxed)
1770 return arch_atomic_fetch_or_relaxed(i, v);
1771 #elif defined(arch_atomic_fetch_or)
1772 return arch_atomic_fetch_or(i, v);
1774 #error "Unable to define raw_atomic_fetch_or_relaxed"
1779 * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
1781 * @v: pointer to atomic_t
1783 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1785 * Safe to use in noinstr code; prefer atomic_xor() elsewhere.
1789 static __always_inline void
1790 raw_atomic_xor(int i, atomic_t *v)
1792 arch_atomic_xor(i, v);
1796 * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
1798 * @v: pointer to atomic_t
1800 * Atomically updates @v to (@v ^ @i) with full ordering.
1802 * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
1804 * Return: The original value of @v.
1806 static __always_inline int
1807 raw_atomic_fetch_xor(int i, atomic_t *v)
1809 #if defined(arch_atomic_fetch_xor)
1810 return arch_atomic_fetch_xor(i, v);
1811 #elif defined(arch_atomic_fetch_xor_relaxed)
1813 __atomic_pre_full_fence();
1814 ret = arch_atomic_fetch_xor_relaxed(i, v);
1815 __atomic_post_full_fence();
1818 #error "Unable to define raw_atomic_fetch_xor"
1823 * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
1825 * @v: pointer to atomic_t
1827 * Atomically updates @v to (@v ^ @i) with acquire ordering.
1829 * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
1831 * Return: The original value of @v.
1833 static __always_inline int
1834 raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
1836 #if defined(arch_atomic_fetch_xor_acquire)
1837 return arch_atomic_fetch_xor_acquire(i, v);
1838 #elif defined(arch_atomic_fetch_xor_relaxed)
1839 int ret = arch_atomic_fetch_xor_relaxed(i, v);
1840 __atomic_acquire_fence();
1842 #elif defined(arch_atomic_fetch_xor)
1843 return arch_atomic_fetch_xor(i, v);
1845 #error "Unable to define raw_atomic_fetch_xor_acquire"
1850 * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
1852 * @v: pointer to atomic_t
1854 * Atomically updates @v to (@v ^ @i) with release ordering.
1856 * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
1858 * Return: The original value of @v.
1860 static __always_inline int
1861 raw_atomic_fetch_xor_release(int i, atomic_t *v)
1863 #if defined(arch_atomic_fetch_xor_release)
1864 return arch_atomic_fetch_xor_release(i, v);
1865 #elif defined(arch_atomic_fetch_xor_relaxed)
1866 __atomic_release_fence();
1867 return arch_atomic_fetch_xor_relaxed(i, v);
1868 #elif defined(arch_atomic_fetch_xor)
1869 return arch_atomic_fetch_xor(i, v);
1871 #error "Unable to define raw_atomic_fetch_xor_release"
1876 * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
1878 * @v: pointer to atomic_t
1880 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1882 * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
1884 * Return: The original value of @v.
1886 static __always_inline int
1887 raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
1889 #if defined(arch_atomic_fetch_xor_relaxed)
1890 return arch_atomic_fetch_xor_relaxed(i, v);
1891 #elif defined(arch_atomic_fetch_xor)
1892 return arch_atomic_fetch_xor(i, v);
1894 #error "Unable to define raw_atomic_fetch_xor_relaxed"
1899 * raw_atomic_xchg() - atomic exchange with full ordering
1900 * @v: pointer to atomic_t
1901 * @new: int value to assign
1903 * Atomically updates @v to @new with full ordering.
1905 * Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
1907 * Return: The original value of @v.
1909 static __always_inline int
1910 raw_atomic_xchg(atomic_t *v, int new)
1912 #if defined(arch_atomic_xchg)
1913 return arch_atomic_xchg(v, new);
1914 #elif defined(arch_atomic_xchg_relaxed)
1916 __atomic_pre_full_fence();
1917 ret = arch_atomic_xchg_relaxed(v, new);
1918 __atomic_post_full_fence();
1921 return raw_xchg(&v->counter, new);
1926 * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
1927 * @v: pointer to atomic_t
1928 * @new: int value to assign
1930 * Atomically updates @v to @new with acquire ordering.
1932 * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
1934 * Return: The original value of @v.
1936 static __always_inline int
1937 raw_atomic_xchg_acquire(atomic_t *v, int new)
1939 #if defined(arch_atomic_xchg_acquire)
1940 return arch_atomic_xchg_acquire(v, new);
1941 #elif defined(arch_atomic_xchg_relaxed)
1942 int ret = arch_atomic_xchg_relaxed(v, new);
1943 __atomic_acquire_fence();
1945 #elif defined(arch_atomic_xchg)
1946 return arch_atomic_xchg(v, new);
1948 return raw_xchg_acquire(&v->counter, new);
1953 * raw_atomic_xchg_release() - atomic exchange with release ordering
1954 * @v: pointer to atomic_t
1955 * @new: int value to assign
1957 * Atomically updates @v to @new with release ordering.
1959 * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
1961 * Return: The original value of @v.
1963 static __always_inline int
1964 raw_atomic_xchg_release(atomic_t *v, int new)
1966 #if defined(arch_atomic_xchg_release)
1967 return arch_atomic_xchg_release(v, new);
1968 #elif defined(arch_atomic_xchg_relaxed)
1969 __atomic_release_fence();
1970 return arch_atomic_xchg_relaxed(v, new);
1971 #elif defined(arch_atomic_xchg)
1972 return arch_atomic_xchg(v, new);
1974 return raw_xchg_release(&v->counter, new);
1979 * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
1980 * @v: pointer to atomic_t
1981 * @new: int value to assign
1983 * Atomically updates @v to @new with relaxed ordering.
1985 * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
1987 * Return: The original value of @v.
1989 static __always_inline int
1990 raw_atomic_xchg_relaxed(atomic_t *v, int new)
1992 #if defined(arch_atomic_xchg_relaxed)
1993 return arch_atomic_xchg_relaxed(v, new);
1994 #elif defined(arch_atomic_xchg)
1995 return arch_atomic_xchg(v, new);
1997 return raw_xchg_relaxed(&v->counter, new);
2002 * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
2003 * @v: pointer to atomic_t
2004 * @old: int value to compare with
2005 * @new: int value to assign
2007 * If (@v == @old), atomically updates @v to @new with full ordering.
2008 * Otherwise, @v is not modified and relaxed ordering is provided.
2010 * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
2012 * Return: The original value of @v.
2014 static __always_inline int
2015 raw_atomic_cmpxchg(atomic_t *v, int old, int new)
2017 #if defined(arch_atomic_cmpxchg)
2018 return arch_atomic_cmpxchg(v, old, new);
2019 #elif defined(arch_atomic_cmpxchg_relaxed)
2021 __atomic_pre_full_fence();
2022 ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2023 __atomic_post_full_fence();
2026 return raw_cmpxchg(&v->counter, old, new);
2031 * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2032 * @v: pointer to atomic_t
2033 * @old: int value to compare with
2034 * @new: int value to assign
2036 * If (@v == @old), atomically updates @v to @new with acquire ordering.
2037 * Otherwise, @v is not modified and relaxed ordering is provided.
2039 * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
2041 * Return: The original value of @v.
2043 static __always_inline int
2044 raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
2046 #if defined(arch_atomic_cmpxchg_acquire)
2047 return arch_atomic_cmpxchg_acquire(v, old, new);
2048 #elif defined(arch_atomic_cmpxchg_relaxed)
2049 int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2050 __atomic_acquire_fence();
2052 #elif defined(arch_atomic_cmpxchg)
2053 return arch_atomic_cmpxchg(v, old, new);
2055 return raw_cmpxchg_acquire(&v->counter, old, new);
2060 * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
2061 * @v: pointer to atomic_t
2062 * @old: int value to compare with
2063 * @new: int value to assign
2065 * If (@v == @old), atomically updates @v to @new with release ordering.
2066 * Otherwise, @v is not modified and relaxed ordering is provided.
2068 * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
2070 * Return: The original value of @v.
2072 static __always_inline int
2073 raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
2075 #if defined(arch_atomic_cmpxchg_release)
2076 return arch_atomic_cmpxchg_release(v, old, new);
2077 #elif defined(arch_atomic_cmpxchg_relaxed)
2078 __atomic_release_fence();
2079 return arch_atomic_cmpxchg_relaxed(v, old, new);
2080 #elif defined(arch_atomic_cmpxchg)
2081 return arch_atomic_cmpxchg(v, old, new);
2083 return raw_cmpxchg_release(&v->counter, old, new);
2088 * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2089 * @v: pointer to atomic_t
2090 * @old: int value to compare with
2091 * @new: int value to assign
2093 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
2094 * Otherwise, @v is not modified and relaxed ordering is provided.
2096 * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
2098 * Return: The original value of @v.
2100 static __always_inline int
2101 raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
2103 #if defined(arch_atomic_cmpxchg_relaxed)
2104 return arch_atomic_cmpxchg_relaxed(v, old, new);
2105 #elif defined(arch_atomic_cmpxchg)
2106 return arch_atomic_cmpxchg(v, old, new);
2108 return raw_cmpxchg_relaxed(&v->counter, old, new);
2113 * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
2114 * @v: pointer to atomic_t
2115 * @old: pointer to int value to compare with
2116 * @new: int value to assign
2118 * If (@v == @old), atomically updates @v to @new with full ordering.
2119 * Otherwise, @v is not modified, @old is updated to the current value of @v,
2120 * and relaxed ordering is provided.
2122 * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
2124 * Return: @true if the exchange occured, @false otherwise.
2126 static __always_inline bool
2127 raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
2129 #if defined(arch_atomic_try_cmpxchg)
2130 return arch_atomic_try_cmpxchg(v, old, new);
2131 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2133 __atomic_pre_full_fence();
2134 ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2135 __atomic_post_full_fence();
2139 r = raw_atomic_cmpxchg(v, o, new);
2140 if (unlikely(r != o))
2142 return likely(r == o);
2147 * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2148 * @v: pointer to atomic_t
2149 * @old: pointer to int value to compare with
2150 * @new: int value to assign
2152 * If (@v == @old), atomically updates @v to @new with acquire ordering.
2153 * Otherwise, @v is not modified, @old is updated to the current value of @v,
2154 * and relaxed ordering is provided.
2156 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
2158 * Return: @true if the exchange occured, @false otherwise.
2160 static __always_inline bool
2161 raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
2163 #if defined(arch_atomic_try_cmpxchg_acquire)
2164 return arch_atomic_try_cmpxchg_acquire(v, old, new);
2165 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2166 bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2167 __atomic_acquire_fence();
2169 #elif defined(arch_atomic_try_cmpxchg)
2170 return arch_atomic_try_cmpxchg(v, old, new);
2173 r = raw_atomic_cmpxchg_acquire(v, o, new);
2174 if (unlikely(r != o))
2176 return likely(r == o);
2181 * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
2182 * @v: pointer to atomic_t
2183 * @old: pointer to int value to compare with
2184 * @new: int value to assign
2186 * If (@v == @old), atomically updates @v to @new with release ordering.
2187 * Otherwise, @v is not modified, @old is updated to the current value of @v,
2188 * and relaxed ordering is provided.
2190 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
2192 * Return: @true if the exchange occured, @false otherwise.
2194 static __always_inline bool
2195 raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
2197 #if defined(arch_atomic_try_cmpxchg_release)
2198 return arch_atomic_try_cmpxchg_release(v, old, new);
2199 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2200 __atomic_release_fence();
2201 return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2202 #elif defined(arch_atomic_try_cmpxchg)
2203 return arch_atomic_try_cmpxchg(v, old, new);
2206 r = raw_atomic_cmpxchg_release(v, o, new);
2207 if (unlikely(r != o))
2209 return likely(r == o);
2214 * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2215 * @v: pointer to atomic_t
2216 * @old: pointer to int value to compare with
2217 * @new: int value to assign
2219 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
2220 * Otherwise, @v is not modified, @old is updated to the current value of @v,
2221 * and relaxed ordering is provided.
2223 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
2225 * Return: @true if the exchange occured, @false otherwise.
2227 static __always_inline bool
2228 raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
2230 #if defined(arch_atomic_try_cmpxchg_relaxed)
2231 return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2232 #elif defined(arch_atomic_try_cmpxchg)
2233 return arch_atomic_try_cmpxchg(v, old, new);
2236 r = raw_atomic_cmpxchg_relaxed(v, o, new);
2237 if (unlikely(r != o))
2239 return likely(r == o);
2244 * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
2245 * @i: int value to subtract
2246 * @v: pointer to atomic_t
2248 * Atomically updates @v to (@v - @i) with full ordering.
2250 * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
2252 * Return: @true if the resulting value of @v is zero, @false otherwise.
2254 static __always_inline bool
2255 raw_atomic_sub_and_test(int i, atomic_t *v)
2257 #if defined(arch_atomic_sub_and_test)
2258 return arch_atomic_sub_and_test(i, v);
2260 return raw_atomic_sub_return(i, v) == 0;
2265 * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
2266 * @v: pointer to atomic_t
2268 * Atomically updates @v to (@v - 1) with full ordering.
2270 * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
2272 * Return: @true if the resulting value of @v is zero, @false otherwise.
2274 static __always_inline bool
2275 raw_atomic_dec_and_test(atomic_t *v)
2277 #if defined(arch_atomic_dec_and_test)
2278 return arch_atomic_dec_and_test(v);
2280 return raw_atomic_dec_return(v) == 0;
2285 * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
2286 * @v: pointer to atomic_t
2288 * Atomically updates @v to (@v + 1) with full ordering.
2290 * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
2292 * Return: @true if the resulting value of @v is zero, @false otherwise.
2294 static __always_inline bool
2295 raw_atomic_inc_and_test(atomic_t *v)
2297 #if defined(arch_atomic_inc_and_test)
2298 return arch_atomic_inc_and_test(v);
2300 return raw_atomic_inc_return(v) == 0;
2305 * raw_atomic_add_negative() - atomic add and test if negative with full ordering
2306 * @i: int value to add
2307 * @v: pointer to atomic_t
2309 * Atomically updates @v to (@v + @i) with full ordering.
2311 * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
2313 * Return: @true if the resulting value of @v is negative, @false otherwise.
2315 static __always_inline bool
2316 raw_atomic_add_negative(int i, atomic_t *v)
2318 #if defined(arch_atomic_add_negative)
2319 return arch_atomic_add_negative(i, v);
2320 #elif defined(arch_atomic_add_negative_relaxed)
2322 __atomic_pre_full_fence();
2323 ret = arch_atomic_add_negative_relaxed(i, v);
2324 __atomic_post_full_fence();
2327 return raw_atomic_add_return(i, v) < 0;
2332 * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
2333 * @i: int value to add
2334 * @v: pointer to atomic_t
2336 * Atomically updates @v to (@v + @i) with acquire ordering.
2338 * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
2340 * Return: @true if the resulting value of @v is negative, @false otherwise.
2342 static __always_inline bool
2343 raw_atomic_add_negative_acquire(int i, atomic_t *v)
2345 #if defined(arch_atomic_add_negative_acquire)
2346 return arch_atomic_add_negative_acquire(i, v);
2347 #elif defined(arch_atomic_add_negative_relaxed)
2348 bool ret = arch_atomic_add_negative_relaxed(i, v);
2349 __atomic_acquire_fence();
2351 #elif defined(arch_atomic_add_negative)
2352 return arch_atomic_add_negative(i, v);
2354 return raw_atomic_add_return_acquire(i, v) < 0;
2359 * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
2360 * @i: int value to add
2361 * @v: pointer to atomic_t
2363 * Atomically updates @v to (@v + @i) with release ordering.
2365 * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
2367 * Return: @true if the resulting value of @v is negative, @false otherwise.
2369 static __always_inline bool
2370 raw_atomic_add_negative_release(int i, atomic_t *v)
2372 #if defined(arch_atomic_add_negative_release)
2373 return arch_atomic_add_negative_release(i, v);
2374 #elif defined(arch_atomic_add_negative_relaxed)
2375 __atomic_release_fence();
2376 return arch_atomic_add_negative_relaxed(i, v);
2377 #elif defined(arch_atomic_add_negative)
2378 return arch_atomic_add_negative(i, v);
2380 return raw_atomic_add_return_release(i, v) < 0;
2385 * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
2386 * @i: int value to add
2387 * @v: pointer to atomic_t
2389 * Atomically updates @v to (@v + @i) with relaxed ordering.
2391 * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
2393 * Return: @true if the resulting value of @v is negative, @false otherwise.
2395 static __always_inline bool
2396 raw_atomic_add_negative_relaxed(int i, atomic_t *v)
2398 #if defined(arch_atomic_add_negative_relaxed)
2399 return arch_atomic_add_negative_relaxed(i, v);
2400 #elif defined(arch_atomic_add_negative)
2401 return arch_atomic_add_negative(i, v);
2403 return raw_atomic_add_return_relaxed(i, v) < 0;
2408 * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
2409 * @v: pointer to atomic_t
2410 * @a: int value to add
2411 * @u: int value to compare with
2413 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
2414 * Otherwise, @v is not modified and relaxed ordering is provided.
2416 * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
2418 * Return: The original value of @v.
2420 static __always_inline int
2421 raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
2423 #if defined(arch_atomic_fetch_add_unless)
2424 return arch_atomic_fetch_add_unless(v, a, u);
2426 int c = raw_atomic_read(v);
2429 if (unlikely(c == u))
2431 } while (!raw_atomic_try_cmpxchg(v, &c, c + a));
2438 * raw_atomic_add_unless() - atomic add unless value with full ordering
2439 * @v: pointer to atomic_t
2440 * @a: int value to add
2441 * @u: int value to compare with
2443 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
2444 * Otherwise, @v is not modified and relaxed ordering is provided.
2446 * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
2448 * Return: @true if @v was updated, @false otherwise.
2450 static __always_inline bool
2451 raw_atomic_add_unless(atomic_t *v, int a, int u)
2453 #if defined(arch_atomic_add_unless)
2454 return arch_atomic_add_unless(v, a, u);
2456 return raw_atomic_fetch_add_unless(v, a, u) != u;
2461 * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
2462 * @v: pointer to atomic_t
2464 * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
2465 * Otherwise, @v is not modified and relaxed ordering is provided.
2467 * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
2469 * Return: @true if @v was updated, @false otherwise.
2471 static __always_inline bool
2472 raw_atomic_inc_not_zero(atomic_t *v)
2474 #if defined(arch_atomic_inc_not_zero)
2475 return arch_atomic_inc_not_zero(v);
2477 return raw_atomic_add_unless(v, 1, 0);
2482 * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
2483 * @v: pointer to atomic_t
2485 * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
2486 * Otherwise, @v is not modified and relaxed ordering is provided.
2488 * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
2490 * Return: @true if @v was updated, @false otherwise.
2492 static __always_inline bool
2493 raw_atomic_inc_unless_negative(atomic_t *v)
2495 #if defined(arch_atomic_inc_unless_negative)
2496 return arch_atomic_inc_unless_negative(v);
2498 int c = raw_atomic_read(v);
2501 if (unlikely(c < 0))
2503 } while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
2510 * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
2511 * @v: pointer to atomic_t
2513 * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
2514 * Otherwise, @v is not modified and relaxed ordering is provided.
2516 * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
2518 * Return: @true if @v was updated, @false otherwise.
2520 static __always_inline bool
2521 raw_atomic_dec_unless_positive(atomic_t *v)
2523 #if defined(arch_atomic_dec_unless_positive)
2524 return arch_atomic_dec_unless_positive(v);
2526 int c = raw_atomic_read(v);
2529 if (unlikely(c > 0))
2531 } while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
2538 * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
2539 * @v: pointer to atomic_t
2541 * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
2542 * Otherwise, @v is not modified and relaxed ordering is provided.
2544 * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
2546 * Return: The old value of (@v - 1), regardless of whether @v was updated.
2548 static __always_inline int
2549 raw_atomic_dec_if_positive(atomic_t *v)
2551 #if defined(arch_atomic_dec_if_positive)
2552 return arch_atomic_dec_if_positive(v);
2554 int dec, c = raw_atomic_read(v);
2558 if (unlikely(dec < 0))
2560 } while (!raw_atomic_try_cmpxchg(v, &c, dec));
2566 #ifdef CONFIG_GENERIC_ATOMIC64
2567 #include <asm-generic/atomic64.h>
2571 * raw_atomic64_read() - atomic load with relaxed ordering
2572 * @v: pointer to atomic64_t
2574 * Atomically loads the value of @v with relaxed ordering.
2576 * Safe to use in noinstr code; prefer atomic64_read() elsewhere.
2578 * Return: The value loaded from @v.
2580 static __always_inline s64
2581 raw_atomic64_read(const atomic64_t *v)
2583 return arch_atomic64_read(v);
2587 * raw_atomic64_read_acquire() - atomic load with acquire ordering
2588 * @v: pointer to atomic64_t
2590 * Atomically loads the value of @v with acquire ordering.
2592 * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
2594 * Return: The value loaded from @v.
2596 static __always_inline s64
2597 raw_atomic64_read_acquire(const atomic64_t *v)
2599 #if defined(arch_atomic64_read_acquire)
2600 return arch_atomic64_read_acquire(v);
2604 if (__native_word(atomic64_t)) {
2605 ret = smp_load_acquire(&(v)->counter);
2607 ret = raw_atomic64_read(v);
2608 __atomic_acquire_fence();
2616 * raw_atomic64_set() - atomic set with relaxed ordering
2617 * @v: pointer to atomic64_t
2618 * @i: s64 value to assign
2620 * Atomically sets @v to @i with relaxed ordering.
2622 * Safe to use in noinstr code; prefer atomic64_set() elsewhere.
2626 static __always_inline void
2627 raw_atomic64_set(atomic64_t *v, s64 i)
2629 arch_atomic64_set(v, i);
2633 * raw_atomic64_set_release() - atomic set with release ordering
2634 * @v: pointer to atomic64_t
2635 * @i: s64 value to assign
2637 * Atomically sets @v to @i with release ordering.
2639 * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
2643 static __always_inline void
2644 raw_atomic64_set_release(atomic64_t *v, s64 i)
2646 #if defined(arch_atomic64_set_release)
2647 arch_atomic64_set_release(v, i);
2649 if (__native_word(atomic64_t)) {
2650 smp_store_release(&(v)->counter, i);
2652 __atomic_release_fence();
2653 raw_atomic64_set(v, i);
2659 * raw_atomic64_add() - atomic add with relaxed ordering
2660 * @i: s64 value to add
2661 * @v: pointer to atomic64_t
2663 * Atomically updates @v to (@v + @i) with relaxed ordering.
2665 * Safe to use in noinstr code; prefer atomic64_add() elsewhere.
2669 static __always_inline void
2670 raw_atomic64_add(s64 i, atomic64_t *v)
2672 arch_atomic64_add(i, v);
2676 * raw_atomic64_add_return() - atomic add with full ordering
2677 * @i: s64 value to add
2678 * @v: pointer to atomic64_t
2680 * Atomically updates @v to (@v + @i) with full ordering.
2682 * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
2684 * Return: The updated value of @v.
2686 static __always_inline s64
2687 raw_atomic64_add_return(s64 i, atomic64_t *v)
2689 #if defined(arch_atomic64_add_return)
2690 return arch_atomic64_add_return(i, v);
2691 #elif defined(arch_atomic64_add_return_relaxed)
2693 __atomic_pre_full_fence();
2694 ret = arch_atomic64_add_return_relaxed(i, v);
2695 __atomic_post_full_fence();
2698 #error "Unable to define raw_atomic64_add_return"
2703 * raw_atomic64_add_return_acquire() - atomic add with acquire ordering
2704 * @i: s64 value to add
2705 * @v: pointer to atomic64_t
2707 * Atomically updates @v to (@v + @i) with acquire ordering.
2709 * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
2711 * Return: The updated value of @v.
2713 static __always_inline s64
2714 raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
2716 #if defined(arch_atomic64_add_return_acquire)
2717 return arch_atomic64_add_return_acquire(i, v);
2718 #elif defined(arch_atomic64_add_return_relaxed)
2719 s64 ret = arch_atomic64_add_return_relaxed(i, v);
2720 __atomic_acquire_fence();
2722 #elif defined(arch_atomic64_add_return)
2723 return arch_atomic64_add_return(i, v);
2725 #error "Unable to define raw_atomic64_add_return_acquire"
2730 * raw_atomic64_add_return_release() - atomic add with release ordering
2731 * @i: s64 value to add
2732 * @v: pointer to atomic64_t
2734 * Atomically updates @v to (@v + @i) with release ordering.
2736 * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
2738 * Return: The updated value of @v.
2740 static __always_inline s64
2741 raw_atomic64_add_return_release(s64 i, atomic64_t *v)
2743 #if defined(arch_atomic64_add_return_release)
2744 return arch_atomic64_add_return_release(i, v);
2745 #elif defined(arch_atomic64_add_return_relaxed)
2746 __atomic_release_fence();
2747 return arch_atomic64_add_return_relaxed(i, v);
2748 #elif defined(arch_atomic64_add_return)
2749 return arch_atomic64_add_return(i, v);
2751 #error "Unable to define raw_atomic64_add_return_release"
2756 * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
2757 * @i: s64 value to add
2758 * @v: pointer to atomic64_t
2760 * Atomically updates @v to (@v + @i) with relaxed ordering.
2762 * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
2764 * Return: The updated value of @v.
2766 static __always_inline s64
2767 raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
2769 #if defined(arch_atomic64_add_return_relaxed)
2770 return arch_atomic64_add_return_relaxed(i, v);
2771 #elif defined(arch_atomic64_add_return)
2772 return arch_atomic64_add_return(i, v);
2774 #error "Unable to define raw_atomic64_add_return_relaxed"
2779 * raw_atomic64_fetch_add() - atomic add with full ordering
2780 * @i: s64 value to add
2781 * @v: pointer to atomic64_t
2783 * Atomically updates @v to (@v + @i) with full ordering.
2785 * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
2787 * Return: The original value of @v.
2789 static __always_inline s64
2790 raw_atomic64_fetch_add(s64 i, atomic64_t *v)
2792 #if defined(arch_atomic64_fetch_add)
2793 return arch_atomic64_fetch_add(i, v);
2794 #elif defined(arch_atomic64_fetch_add_relaxed)
2796 __atomic_pre_full_fence();
2797 ret = arch_atomic64_fetch_add_relaxed(i, v);
2798 __atomic_post_full_fence();
2801 #error "Unable to define raw_atomic64_fetch_add"
2806 * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
2807 * @i: s64 value to add
2808 * @v: pointer to atomic64_t
2810 * Atomically updates @v to (@v + @i) with acquire ordering.
2812 * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
2814 * Return: The original value of @v.
2816 static __always_inline s64
2817 raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
2819 #if defined(arch_atomic64_fetch_add_acquire)
2820 return arch_atomic64_fetch_add_acquire(i, v);
2821 #elif defined(arch_atomic64_fetch_add_relaxed)
2822 s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
2823 __atomic_acquire_fence();
2825 #elif defined(arch_atomic64_fetch_add)
2826 return arch_atomic64_fetch_add(i, v);
2828 #error "Unable to define raw_atomic64_fetch_add_acquire"
2833 * raw_atomic64_fetch_add_release() - atomic add with release ordering
2834 * @i: s64 value to add
2835 * @v: pointer to atomic64_t
2837 * Atomically updates @v to (@v + @i) with release ordering.
2839 * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
2841 * Return: The original value of @v.
2843 static __always_inline s64
2844 raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
2846 #if defined(arch_atomic64_fetch_add_release)
2847 return arch_atomic64_fetch_add_release(i, v);
2848 #elif defined(arch_atomic64_fetch_add_relaxed)
2849 __atomic_release_fence();
2850 return arch_atomic64_fetch_add_relaxed(i, v);
2851 #elif defined(arch_atomic64_fetch_add)
2852 return arch_atomic64_fetch_add(i, v);
2854 #error "Unable to define raw_atomic64_fetch_add_release"
2859 * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
2860 * @i: s64 value to add
2861 * @v: pointer to atomic64_t
2863 * Atomically updates @v to (@v + @i) with relaxed ordering.
2865 * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
2867 * Return: The original value of @v.
2869 static __always_inline s64
2870 raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
2872 #if defined(arch_atomic64_fetch_add_relaxed)
2873 return arch_atomic64_fetch_add_relaxed(i, v);
2874 #elif defined(arch_atomic64_fetch_add)
2875 return arch_atomic64_fetch_add(i, v);
2877 #error "Unable to define raw_atomic64_fetch_add_relaxed"
2882 * raw_atomic64_sub() - atomic subtract with relaxed ordering
2883 * @i: s64 value to subtract
2884 * @v: pointer to atomic64_t
2886 * Atomically updates @v to (@v - @i) with relaxed ordering.
2888 * Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
2892 static __always_inline void
2893 raw_atomic64_sub(s64 i, atomic64_t *v)
2895 arch_atomic64_sub(i, v);
2899 * raw_atomic64_sub_return() - atomic subtract with full ordering
2900 * @i: s64 value to subtract
2901 * @v: pointer to atomic64_t
2903 * Atomically updates @v to (@v - @i) with full ordering.
2905 * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
2907 * Return: The updated value of @v.
2909 static __always_inline s64
2910 raw_atomic64_sub_return(s64 i, atomic64_t *v)
2912 #if defined(arch_atomic64_sub_return)
2913 return arch_atomic64_sub_return(i, v);
2914 #elif defined(arch_atomic64_sub_return_relaxed)
2916 __atomic_pre_full_fence();
2917 ret = arch_atomic64_sub_return_relaxed(i, v);
2918 __atomic_post_full_fence();
2921 #error "Unable to define raw_atomic64_sub_return"
2926 * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
2927 * @i: s64 value to subtract
2928 * @v: pointer to atomic64_t
2930 * Atomically updates @v to (@v - @i) with acquire ordering.
2932 * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
2934 * Return: The updated value of @v.
2936 static __always_inline s64
2937 raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
2939 #if defined(arch_atomic64_sub_return_acquire)
2940 return arch_atomic64_sub_return_acquire(i, v);
2941 #elif defined(arch_atomic64_sub_return_relaxed)
2942 s64 ret = arch_atomic64_sub_return_relaxed(i, v);
2943 __atomic_acquire_fence();
2945 #elif defined(arch_atomic64_sub_return)
2946 return arch_atomic64_sub_return(i, v);
2948 #error "Unable to define raw_atomic64_sub_return_acquire"
2953 * raw_atomic64_sub_return_release() - atomic subtract with release ordering
2954 * @i: s64 value to subtract
2955 * @v: pointer to atomic64_t
2957 * Atomically updates @v to (@v - @i) with release ordering.
2959 * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
2961 * Return: The updated value of @v.
2963 static __always_inline s64
2964 raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
2966 #if defined(arch_atomic64_sub_return_release)
2967 return arch_atomic64_sub_return_release(i, v);
2968 #elif defined(arch_atomic64_sub_return_relaxed)
2969 __atomic_release_fence();
2970 return arch_atomic64_sub_return_relaxed(i, v);
2971 #elif defined(arch_atomic64_sub_return)
2972 return arch_atomic64_sub_return(i, v);
2974 #error "Unable to define raw_atomic64_sub_return_release"
2979 * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
2980 * @i: s64 value to subtract
2981 * @v: pointer to atomic64_t
2983 * Atomically updates @v to (@v - @i) with relaxed ordering.
2985 * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
2987 * Return: The updated value of @v.
2989 static __always_inline s64
2990 raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
2992 #if defined(arch_atomic64_sub_return_relaxed)
2993 return arch_atomic64_sub_return_relaxed(i, v);
2994 #elif defined(arch_atomic64_sub_return)
2995 return arch_atomic64_sub_return(i, v);
2997 #error "Unable to define raw_atomic64_sub_return_relaxed"
3002 * raw_atomic64_fetch_sub() - atomic subtract with full ordering
3003 * @i: s64 value to subtract
3004 * @v: pointer to atomic64_t
3006 * Atomically updates @v to (@v - @i) with full ordering.
3008 * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
3010 * Return: The original value of @v.
3012 static __always_inline s64
3013 raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
3015 #if defined(arch_atomic64_fetch_sub)
3016 return arch_atomic64_fetch_sub(i, v);
3017 #elif defined(arch_atomic64_fetch_sub_relaxed)
3019 __atomic_pre_full_fence();
3020 ret = arch_atomic64_fetch_sub_relaxed(i, v);
3021 __atomic_post_full_fence();
3024 #error "Unable to define raw_atomic64_fetch_sub"
3029 * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
3030 * @i: s64 value to subtract
3031 * @v: pointer to atomic64_t
3033 * Atomically updates @v to (@v - @i) with acquire ordering.
3035 * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
3037 * Return: The original value of @v.
3039 static __always_inline s64
3040 raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
3042 #if defined(arch_atomic64_fetch_sub_acquire)
3043 return arch_atomic64_fetch_sub_acquire(i, v);
3044 #elif defined(arch_atomic64_fetch_sub_relaxed)
3045 s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
3046 __atomic_acquire_fence();
3048 #elif defined(arch_atomic64_fetch_sub)
3049 return arch_atomic64_fetch_sub(i, v);
3051 #error "Unable to define raw_atomic64_fetch_sub_acquire"
3056 * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
3057 * @i: s64 value to subtract
3058 * @v: pointer to atomic64_t
3060 * Atomically updates @v to (@v - @i) with release ordering.
3062 * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
3064 * Return: The original value of @v.
3066 static __always_inline s64
3067 raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
3069 #if defined(arch_atomic64_fetch_sub_release)
3070 return arch_atomic64_fetch_sub_release(i, v);
3071 #elif defined(arch_atomic64_fetch_sub_relaxed)
3072 __atomic_release_fence();
3073 return arch_atomic64_fetch_sub_relaxed(i, v);
3074 #elif defined(arch_atomic64_fetch_sub)
3075 return arch_atomic64_fetch_sub(i, v);
3077 #error "Unable to define raw_atomic64_fetch_sub_release"
3082 * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
3083 * @i: s64 value to subtract
3084 * @v: pointer to atomic64_t
3086 * Atomically updates @v to (@v - @i) with relaxed ordering.
3088 * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
3090 * Return: The original value of @v.
3092 static __always_inline s64
3093 raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
3095 #if defined(arch_atomic64_fetch_sub_relaxed)
3096 return arch_atomic64_fetch_sub_relaxed(i, v);
3097 #elif defined(arch_atomic64_fetch_sub)
3098 return arch_atomic64_fetch_sub(i, v);
3100 #error "Unable to define raw_atomic64_fetch_sub_relaxed"
3105 * raw_atomic64_inc() - atomic increment with relaxed ordering
3106 * @v: pointer to atomic64_t
3108 * Atomically updates @v to (@v + 1) with relaxed ordering.
3110 * Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
3114 static __always_inline void
3115 raw_atomic64_inc(atomic64_t *v)
3117 #if defined(arch_atomic64_inc)
3118 arch_atomic64_inc(v);
3120 raw_atomic64_add(1, v);
3125 * raw_atomic64_inc_return() - atomic increment with full ordering
3126 * @v: pointer to atomic64_t
3128 * Atomically updates @v to (@v + 1) with full ordering.
3130 * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
3132 * Return: The updated value of @v.
3134 static __always_inline s64
3135 raw_atomic64_inc_return(atomic64_t *v)
3137 #if defined(arch_atomic64_inc_return)
3138 return arch_atomic64_inc_return(v);
3139 #elif defined(arch_atomic64_inc_return_relaxed)
3141 __atomic_pre_full_fence();
3142 ret = arch_atomic64_inc_return_relaxed(v);
3143 __atomic_post_full_fence();
3146 return raw_atomic64_add_return(1, v);
3151 * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
3152 * @v: pointer to atomic64_t
3154 * Atomically updates @v to (@v + 1) with acquire ordering.
3156 * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
3158 * Return: The updated value of @v.
3160 static __always_inline s64
3161 raw_atomic64_inc_return_acquire(atomic64_t *v)
3163 #if defined(arch_atomic64_inc_return_acquire)
3164 return arch_atomic64_inc_return_acquire(v);
3165 #elif defined(arch_atomic64_inc_return_relaxed)
3166 s64 ret = arch_atomic64_inc_return_relaxed(v);
3167 __atomic_acquire_fence();
3169 #elif defined(arch_atomic64_inc_return)
3170 return arch_atomic64_inc_return(v);
3172 return raw_atomic64_add_return_acquire(1, v);
3177 * raw_atomic64_inc_return_release() - atomic increment with release ordering
3178 * @v: pointer to atomic64_t
3180 * Atomically updates @v to (@v + 1) with release ordering.
3182 * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
3184 * Return: The updated value of @v.
3186 static __always_inline s64
3187 raw_atomic64_inc_return_release(atomic64_t *v)
3189 #if defined(arch_atomic64_inc_return_release)
3190 return arch_atomic64_inc_return_release(v);
3191 #elif defined(arch_atomic64_inc_return_relaxed)
3192 __atomic_release_fence();
3193 return arch_atomic64_inc_return_relaxed(v);
3194 #elif defined(arch_atomic64_inc_return)
3195 return arch_atomic64_inc_return(v);
3197 return raw_atomic64_add_return_release(1, v);
3202 * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
3203 * @v: pointer to atomic64_t
3205 * Atomically updates @v to (@v + 1) with relaxed ordering.
3207 * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
3209 * Return: The updated value of @v.
3211 static __always_inline s64
3212 raw_atomic64_inc_return_relaxed(atomic64_t *v)
3214 #if defined(arch_atomic64_inc_return_relaxed)
3215 return arch_atomic64_inc_return_relaxed(v);
3216 #elif defined(arch_atomic64_inc_return)
3217 return arch_atomic64_inc_return(v);
3219 return raw_atomic64_add_return_relaxed(1, v);
3224 * raw_atomic64_fetch_inc() - atomic increment with full ordering
3225 * @v: pointer to atomic64_t
3227 * Atomically updates @v to (@v + 1) with full ordering.
3229 * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
3231 * Return: The original value of @v.
3233 static __always_inline s64
3234 raw_atomic64_fetch_inc(atomic64_t *v)
3236 #if defined(arch_atomic64_fetch_inc)
3237 return arch_atomic64_fetch_inc(v);
3238 #elif defined(arch_atomic64_fetch_inc_relaxed)
3240 __atomic_pre_full_fence();
3241 ret = arch_atomic64_fetch_inc_relaxed(v);
3242 __atomic_post_full_fence();
3245 return raw_atomic64_fetch_add(1, v);
3250 * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
3251 * @v: pointer to atomic64_t
3253 * Atomically updates @v to (@v + 1) with acquire ordering.
3255 * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
3257 * Return: The original value of @v.
3259 static __always_inline s64
3260 raw_atomic64_fetch_inc_acquire(atomic64_t *v)
3262 #if defined(arch_atomic64_fetch_inc_acquire)
3263 return arch_atomic64_fetch_inc_acquire(v);
3264 #elif defined(arch_atomic64_fetch_inc_relaxed)
3265 s64 ret = arch_atomic64_fetch_inc_relaxed(v);
3266 __atomic_acquire_fence();
3268 #elif defined(arch_atomic64_fetch_inc)
3269 return arch_atomic64_fetch_inc(v);
3271 return raw_atomic64_fetch_add_acquire(1, v);
3276 * raw_atomic64_fetch_inc_release() - atomic increment with release ordering
3277 * @v: pointer to atomic64_t
3279 * Atomically updates @v to (@v + 1) with release ordering.
3281 * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
3283 * Return: The original value of @v.
3285 static __always_inline s64
3286 raw_atomic64_fetch_inc_release(atomic64_t *v)
3288 #if defined(arch_atomic64_fetch_inc_release)
3289 return arch_atomic64_fetch_inc_release(v);
3290 #elif defined(arch_atomic64_fetch_inc_relaxed)
3291 __atomic_release_fence();
3292 return arch_atomic64_fetch_inc_relaxed(v);
3293 #elif defined(arch_atomic64_fetch_inc)
3294 return arch_atomic64_fetch_inc(v);
3296 return raw_atomic64_fetch_add_release(1, v);
3301 * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
3302 * @v: pointer to atomic64_t
3304 * Atomically updates @v to (@v + 1) with relaxed ordering.
3306 * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
3308 * Return: The original value of @v.
3310 static __always_inline s64
3311 raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
3313 #if defined(arch_atomic64_fetch_inc_relaxed)
3314 return arch_atomic64_fetch_inc_relaxed(v);
3315 #elif defined(arch_atomic64_fetch_inc)
3316 return arch_atomic64_fetch_inc(v);
3318 return raw_atomic64_fetch_add_relaxed(1, v);
3323 * raw_atomic64_dec() - atomic decrement with relaxed ordering
3324 * @v: pointer to atomic64_t
3326 * Atomically updates @v to (@v - 1) with relaxed ordering.
3328 * Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
3332 static __always_inline void
3333 raw_atomic64_dec(atomic64_t *v)
3335 #if defined(arch_atomic64_dec)
3336 arch_atomic64_dec(v);
3338 raw_atomic64_sub(1, v);
3343 * raw_atomic64_dec_return() - atomic decrement with full ordering
3344 * @v: pointer to atomic64_t
3346 * Atomically updates @v to (@v - 1) with full ordering.
3348 * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
3350 * Return: The updated value of @v.
3352 static __always_inline s64
3353 raw_atomic64_dec_return(atomic64_t *v)
3355 #if defined(arch_atomic64_dec_return)
3356 return arch_atomic64_dec_return(v);
3357 #elif defined(arch_atomic64_dec_return_relaxed)
3359 __atomic_pre_full_fence();
3360 ret = arch_atomic64_dec_return_relaxed(v);
3361 __atomic_post_full_fence();
3364 return raw_atomic64_sub_return(1, v);
3369 * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
3370 * @v: pointer to atomic64_t
3372 * Atomically updates @v to (@v - 1) with acquire ordering.
3374 * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
3376 * Return: The updated value of @v.
3378 static __always_inline s64
3379 raw_atomic64_dec_return_acquire(atomic64_t *v)
3381 #if defined(arch_atomic64_dec_return_acquire)
3382 return arch_atomic64_dec_return_acquire(v);
3383 #elif defined(arch_atomic64_dec_return_relaxed)
3384 s64 ret = arch_atomic64_dec_return_relaxed(v);
3385 __atomic_acquire_fence();
3387 #elif defined(arch_atomic64_dec_return)
3388 return arch_atomic64_dec_return(v);
3390 return raw_atomic64_sub_return_acquire(1, v);
3395 * raw_atomic64_dec_return_release() - atomic decrement with release ordering
3396 * @v: pointer to atomic64_t
3398 * Atomically updates @v to (@v - 1) with release ordering.
3400 * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
3402 * Return: The updated value of @v.
3404 static __always_inline s64
3405 raw_atomic64_dec_return_release(atomic64_t *v)
3407 #if defined(arch_atomic64_dec_return_release)
3408 return arch_atomic64_dec_return_release(v);
3409 #elif defined(arch_atomic64_dec_return_relaxed)
3410 __atomic_release_fence();
3411 return arch_atomic64_dec_return_relaxed(v);
3412 #elif defined(arch_atomic64_dec_return)
3413 return arch_atomic64_dec_return(v);
3415 return raw_atomic64_sub_return_release(1, v);
3420 * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
3421 * @v: pointer to atomic64_t
3423 * Atomically updates @v to (@v - 1) with relaxed ordering.
3425 * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
3427 * Return: The updated value of @v.
3429 static __always_inline s64
3430 raw_atomic64_dec_return_relaxed(atomic64_t *v)
3432 #if defined(arch_atomic64_dec_return_relaxed)
3433 return arch_atomic64_dec_return_relaxed(v);
3434 #elif defined(arch_atomic64_dec_return)
3435 return arch_atomic64_dec_return(v);
3437 return raw_atomic64_sub_return_relaxed(1, v);
3442 * raw_atomic64_fetch_dec() - atomic decrement with full ordering
3443 * @v: pointer to atomic64_t
3445 * Atomically updates @v to (@v - 1) with full ordering.
3447 * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
3449 * Return: The original value of @v.
3451 static __always_inline s64
3452 raw_atomic64_fetch_dec(atomic64_t *v)
3454 #if defined(arch_atomic64_fetch_dec)
3455 return arch_atomic64_fetch_dec(v);
3456 #elif defined(arch_atomic64_fetch_dec_relaxed)
3458 __atomic_pre_full_fence();
3459 ret = arch_atomic64_fetch_dec_relaxed(v);
3460 __atomic_post_full_fence();
3463 return raw_atomic64_fetch_sub(1, v);
3468 * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
3469 * @v: pointer to atomic64_t
3471 * Atomically updates @v to (@v - 1) with acquire ordering.
3473 * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
3475 * Return: The original value of @v.
3477 static __always_inline s64
3478 raw_atomic64_fetch_dec_acquire(atomic64_t *v)
3480 #if defined(arch_atomic64_fetch_dec_acquire)
3481 return arch_atomic64_fetch_dec_acquire(v);
3482 #elif defined(arch_atomic64_fetch_dec_relaxed)
3483 s64 ret = arch_atomic64_fetch_dec_relaxed(v);
3484 __atomic_acquire_fence();
3486 #elif defined(arch_atomic64_fetch_dec)
3487 return arch_atomic64_fetch_dec(v);
3489 return raw_atomic64_fetch_sub_acquire(1, v);
3494 * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
3495 * @v: pointer to atomic64_t
3497 * Atomically updates @v to (@v - 1) with release ordering.
3499 * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
3501 * Return: The original value of @v.
3503 static __always_inline s64
3504 raw_atomic64_fetch_dec_release(atomic64_t *v)
3506 #if defined(arch_atomic64_fetch_dec_release)
3507 return arch_atomic64_fetch_dec_release(v);
3508 #elif defined(arch_atomic64_fetch_dec_relaxed)
3509 __atomic_release_fence();
3510 return arch_atomic64_fetch_dec_relaxed(v);
3511 #elif defined(arch_atomic64_fetch_dec)
3512 return arch_atomic64_fetch_dec(v);
3514 return raw_atomic64_fetch_sub_release(1, v);
3519 * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
3520 * @v: pointer to atomic64_t
3522 * Atomically updates @v to (@v - 1) with relaxed ordering.
3524 * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
3526 * Return: The original value of @v.
3528 static __always_inline s64
3529 raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
3531 #if defined(arch_atomic64_fetch_dec_relaxed)
3532 return arch_atomic64_fetch_dec_relaxed(v);
3533 #elif defined(arch_atomic64_fetch_dec)
3534 return arch_atomic64_fetch_dec(v);
3536 return raw_atomic64_fetch_sub_relaxed(1, v);
3541 * raw_atomic64_and() - atomic bitwise AND with relaxed ordering
3543 * @v: pointer to atomic64_t
3545 * Atomically updates @v to (@v & @i) with relaxed ordering.
3547 * Safe to use in noinstr code; prefer atomic64_and() elsewhere.
3551 static __always_inline void
3552 raw_atomic64_and(s64 i, atomic64_t *v)
3554 arch_atomic64_and(i, v);
3558 * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
3560 * @v: pointer to atomic64_t
3562 * Atomically updates @v to (@v & @i) with full ordering.
3564 * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
3566 * Return: The original value of @v.
3568 static __always_inline s64
3569 raw_atomic64_fetch_and(s64 i, atomic64_t *v)
3571 #if defined(arch_atomic64_fetch_and)
3572 return arch_atomic64_fetch_and(i, v);
3573 #elif defined(arch_atomic64_fetch_and_relaxed)
3575 __atomic_pre_full_fence();
3576 ret = arch_atomic64_fetch_and_relaxed(i, v);
3577 __atomic_post_full_fence();
3580 #error "Unable to define raw_atomic64_fetch_and"
3585 * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
3587 * @v: pointer to atomic64_t
3589 * Atomically updates @v to (@v & @i) with acquire ordering.
3591 * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
3593 * Return: The original value of @v.
3595 static __always_inline s64
3596 raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
3598 #if defined(arch_atomic64_fetch_and_acquire)
3599 return arch_atomic64_fetch_and_acquire(i, v);
3600 #elif defined(arch_atomic64_fetch_and_relaxed)
3601 s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
3602 __atomic_acquire_fence();
3604 #elif defined(arch_atomic64_fetch_and)
3605 return arch_atomic64_fetch_and(i, v);
3607 #error "Unable to define raw_atomic64_fetch_and_acquire"
3612 * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
3614 * @v: pointer to atomic64_t
3616 * Atomically updates @v to (@v & @i) with release ordering.
3618 * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
3620 * Return: The original value of @v.
3622 static __always_inline s64
3623 raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
3625 #if defined(arch_atomic64_fetch_and_release)
3626 return arch_atomic64_fetch_and_release(i, v);
3627 #elif defined(arch_atomic64_fetch_and_relaxed)
3628 __atomic_release_fence();
3629 return arch_atomic64_fetch_and_relaxed(i, v);
3630 #elif defined(arch_atomic64_fetch_and)
3631 return arch_atomic64_fetch_and(i, v);
3633 #error "Unable to define raw_atomic64_fetch_and_release"
3638 * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
3640 * @v: pointer to atomic64_t
3642 * Atomically updates @v to (@v & @i) with relaxed ordering.
3644 * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
3646 * Return: The original value of @v.
3648 static __always_inline s64
3649 raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
3651 #if defined(arch_atomic64_fetch_and_relaxed)
3652 return arch_atomic64_fetch_and_relaxed(i, v);
3653 #elif defined(arch_atomic64_fetch_and)
3654 return arch_atomic64_fetch_and(i, v);
3656 #error "Unable to define raw_atomic64_fetch_and_relaxed"
3661 * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
3663 * @v: pointer to atomic64_t
3665 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3667 * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
3671 static __always_inline void
3672 raw_atomic64_andnot(s64 i, atomic64_t *v)
3674 #if defined(arch_atomic64_andnot)
3675 arch_atomic64_andnot(i, v);
3677 raw_atomic64_and(~i, v);
3682 * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
3684 * @v: pointer to atomic64_t
3686 * Atomically updates @v to (@v & ~@i) with full ordering.
3688 * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
3690 * Return: The original value of @v.
3692 static __always_inline s64
3693 raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
3695 #if defined(arch_atomic64_fetch_andnot)
3696 return arch_atomic64_fetch_andnot(i, v);
3697 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3699 __atomic_pre_full_fence();
3700 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3701 __atomic_post_full_fence();
3704 return raw_atomic64_fetch_and(~i, v);
3709 * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
3711 * @v: pointer to atomic64_t
3713 * Atomically updates @v to (@v & ~@i) with acquire ordering.
3715 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
3717 * Return: The original value of @v.
3719 static __always_inline s64
3720 raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
3722 #if defined(arch_atomic64_fetch_andnot_acquire)
3723 return arch_atomic64_fetch_andnot_acquire(i, v);
3724 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3725 s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3726 __atomic_acquire_fence();
3728 #elif defined(arch_atomic64_fetch_andnot)
3729 return arch_atomic64_fetch_andnot(i, v);
3731 return raw_atomic64_fetch_and_acquire(~i, v);
3736 * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
3738 * @v: pointer to atomic64_t
3740 * Atomically updates @v to (@v & ~@i) with release ordering.
3742 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
3744 * Return: The original value of @v.
3746 static __always_inline s64
3747 raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
3749 #if defined(arch_atomic64_fetch_andnot_release)
3750 return arch_atomic64_fetch_andnot_release(i, v);
3751 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3752 __atomic_release_fence();
3753 return arch_atomic64_fetch_andnot_relaxed(i, v);
3754 #elif defined(arch_atomic64_fetch_andnot)
3755 return arch_atomic64_fetch_andnot(i, v);
3757 return raw_atomic64_fetch_and_release(~i, v);
3762 * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
3764 * @v: pointer to atomic64_t
3766 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3768 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
3770 * Return: The original value of @v.
3772 static __always_inline s64
3773 raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
3775 #if defined(arch_atomic64_fetch_andnot_relaxed)
3776 return arch_atomic64_fetch_andnot_relaxed(i, v);
3777 #elif defined(arch_atomic64_fetch_andnot)
3778 return arch_atomic64_fetch_andnot(i, v);
3780 return raw_atomic64_fetch_and_relaxed(~i, v);
3785 * raw_atomic64_or() - atomic bitwise OR with relaxed ordering
3787 * @v: pointer to atomic64_t
3789 * Atomically updates @v to (@v | @i) with relaxed ordering.
3791 * Safe to use in noinstr code; prefer atomic64_or() elsewhere.
3795 static __always_inline void
3796 raw_atomic64_or(s64 i, atomic64_t *v)
3798 arch_atomic64_or(i, v);
3802 * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
3804 * @v: pointer to atomic64_t
3806 * Atomically updates @v to (@v | @i) with full ordering.
3808 * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
3810 * Return: The original value of @v.
3812 static __always_inline s64
3813 raw_atomic64_fetch_or(s64 i, atomic64_t *v)
3815 #if defined(arch_atomic64_fetch_or)
3816 return arch_atomic64_fetch_or(i, v);
3817 #elif defined(arch_atomic64_fetch_or_relaxed)
3819 __atomic_pre_full_fence();
3820 ret = arch_atomic64_fetch_or_relaxed(i, v);
3821 __atomic_post_full_fence();
3824 #error "Unable to define raw_atomic64_fetch_or"
3829 * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
3831 * @v: pointer to atomic64_t
3833 * Atomically updates @v to (@v | @i) with acquire ordering.
3835 * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
3837 * Return: The original value of @v.
3839 static __always_inline s64
3840 raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
3842 #if defined(arch_atomic64_fetch_or_acquire)
3843 return arch_atomic64_fetch_or_acquire(i, v);
3844 #elif defined(arch_atomic64_fetch_or_relaxed)
3845 s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
3846 __atomic_acquire_fence();
3848 #elif defined(arch_atomic64_fetch_or)
3849 return arch_atomic64_fetch_or(i, v);
3851 #error "Unable to define raw_atomic64_fetch_or_acquire"
3856 * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
3858 * @v: pointer to atomic64_t
3860 * Atomically updates @v to (@v | @i) with release ordering.
3862 * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
3864 * Return: The original value of @v.
3866 static __always_inline s64
3867 raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
3869 #if defined(arch_atomic64_fetch_or_release)
3870 return arch_atomic64_fetch_or_release(i, v);
3871 #elif defined(arch_atomic64_fetch_or_relaxed)
3872 __atomic_release_fence();
3873 return arch_atomic64_fetch_or_relaxed(i, v);
3874 #elif defined(arch_atomic64_fetch_or)
3875 return arch_atomic64_fetch_or(i, v);
3877 #error "Unable to define raw_atomic64_fetch_or_release"
3882 * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
3884 * @v: pointer to atomic64_t
3886 * Atomically updates @v to (@v | @i) with relaxed ordering.
3888 * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
3890 * Return: The original value of @v.
3892 static __always_inline s64
3893 raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
3895 #if defined(arch_atomic64_fetch_or_relaxed)
3896 return arch_atomic64_fetch_or_relaxed(i, v);
3897 #elif defined(arch_atomic64_fetch_or)
3898 return arch_atomic64_fetch_or(i, v);
3900 #error "Unable to define raw_atomic64_fetch_or_relaxed"
3905 * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
3907 * @v: pointer to atomic64_t
3909 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
3911 * Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
3915 static __always_inline void
3916 raw_atomic64_xor(s64 i, atomic64_t *v)
3918 arch_atomic64_xor(i, v);
3922 * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
3924 * @v: pointer to atomic64_t
3926 * Atomically updates @v to (@v ^ @i) with full ordering.
3928 * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
3930 * Return: The original value of @v.
3932 static __always_inline s64
3933 raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
3935 #if defined(arch_atomic64_fetch_xor)
3936 return arch_atomic64_fetch_xor(i, v);
3937 #elif defined(arch_atomic64_fetch_xor_relaxed)
3939 __atomic_pre_full_fence();
3940 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3941 __atomic_post_full_fence();
3944 #error "Unable to define raw_atomic64_fetch_xor"
3949 * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
3951 * @v: pointer to atomic64_t
3953 * Atomically updates @v to (@v ^ @i) with acquire ordering.
3955 * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
3957 * Return: The original value of @v.
3959 static __always_inline s64
3960 raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
3962 #if defined(arch_atomic64_fetch_xor_acquire)
3963 return arch_atomic64_fetch_xor_acquire(i, v);
3964 #elif defined(arch_atomic64_fetch_xor_relaxed)
3965 s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3966 __atomic_acquire_fence();
3968 #elif defined(arch_atomic64_fetch_xor)
3969 return arch_atomic64_fetch_xor(i, v);
3971 #error "Unable to define raw_atomic64_fetch_xor_acquire"
3976 * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
3978 * @v: pointer to atomic64_t
3980 * Atomically updates @v to (@v ^ @i) with release ordering.
3982 * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
3984 * Return: The original value of @v.
3986 static __always_inline s64
3987 raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
3989 #if defined(arch_atomic64_fetch_xor_release)
3990 return arch_atomic64_fetch_xor_release(i, v);
3991 #elif defined(arch_atomic64_fetch_xor_relaxed)
3992 __atomic_release_fence();
3993 return arch_atomic64_fetch_xor_relaxed(i, v);
3994 #elif defined(arch_atomic64_fetch_xor)
3995 return arch_atomic64_fetch_xor(i, v);
3997 #error "Unable to define raw_atomic64_fetch_xor_release"
4002 * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
4004 * @v: pointer to atomic64_t
4006 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
4008 * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
4010 * Return: The original value of @v.
4012 static __always_inline s64
4013 raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
4015 #if defined(arch_atomic64_fetch_xor_relaxed)
4016 return arch_atomic64_fetch_xor_relaxed(i, v);
4017 #elif defined(arch_atomic64_fetch_xor)
4018 return arch_atomic64_fetch_xor(i, v);
4020 #error "Unable to define raw_atomic64_fetch_xor_relaxed"
4025 * raw_atomic64_xchg() - atomic exchange with full ordering
4026 * @v: pointer to atomic64_t
4027 * @new: s64 value to assign
4029 * Atomically updates @v to @new with full ordering.
4031 * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
4033 * Return: The original value of @v.
4035 static __always_inline s64
4036 raw_atomic64_xchg(atomic64_t *v, s64 new)
4038 #if defined(arch_atomic64_xchg)
4039 return arch_atomic64_xchg(v, new);
4040 #elif defined(arch_atomic64_xchg_relaxed)
4042 __atomic_pre_full_fence();
4043 ret = arch_atomic64_xchg_relaxed(v, new);
4044 __atomic_post_full_fence();
4047 return raw_xchg(&v->counter, new);
4052 * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
4053 * @v: pointer to atomic64_t
4054 * @new: s64 value to assign
4056 * Atomically updates @v to @new with acquire ordering.
4058 * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
4060 * Return: The original value of @v.
4062 static __always_inline s64
4063 raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
4065 #if defined(arch_atomic64_xchg_acquire)
4066 return arch_atomic64_xchg_acquire(v, new);
4067 #elif defined(arch_atomic64_xchg_relaxed)
4068 s64 ret = arch_atomic64_xchg_relaxed(v, new);
4069 __atomic_acquire_fence();
4071 #elif defined(arch_atomic64_xchg)
4072 return arch_atomic64_xchg(v, new);
4074 return raw_xchg_acquire(&v->counter, new);
4079 * raw_atomic64_xchg_release() - atomic exchange with release ordering
4080 * @v: pointer to atomic64_t
4081 * @new: s64 value to assign
4083 * Atomically updates @v to @new with release ordering.
4085 * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
4087 * Return: The original value of @v.
4089 static __always_inline s64
4090 raw_atomic64_xchg_release(atomic64_t *v, s64 new)
4092 #if defined(arch_atomic64_xchg_release)
4093 return arch_atomic64_xchg_release(v, new);
4094 #elif defined(arch_atomic64_xchg_relaxed)
4095 __atomic_release_fence();
4096 return arch_atomic64_xchg_relaxed(v, new);
4097 #elif defined(arch_atomic64_xchg)
4098 return arch_atomic64_xchg(v, new);
4100 return raw_xchg_release(&v->counter, new);
4105 * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
4106 * @v: pointer to atomic64_t
4107 * @new: s64 value to assign
4109 * Atomically updates @v to @new with relaxed ordering.
4111 * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
4113 * Return: The original value of @v.
4115 static __always_inline s64
4116 raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
4118 #if defined(arch_atomic64_xchg_relaxed)
4119 return arch_atomic64_xchg_relaxed(v, new);
4120 #elif defined(arch_atomic64_xchg)
4121 return arch_atomic64_xchg(v, new);
4123 return raw_xchg_relaxed(&v->counter, new);
4128 * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
4129 * @v: pointer to atomic64_t
4130 * @old: s64 value to compare with
4131 * @new: s64 value to assign
4133 * If (@v == @old), atomically updates @v to @new with full ordering.
4134 * Otherwise, @v is not modified and relaxed ordering is provided.
4136 * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
4138 * Return: The original value of @v.
4140 static __always_inline s64
4141 raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
4143 #if defined(arch_atomic64_cmpxchg)
4144 return arch_atomic64_cmpxchg(v, old, new);
4145 #elif defined(arch_atomic64_cmpxchg_relaxed)
4147 __atomic_pre_full_fence();
4148 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4149 __atomic_post_full_fence();
4152 return raw_cmpxchg(&v->counter, old, new);
4157 * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4158 * @v: pointer to atomic64_t
4159 * @old: s64 value to compare with
4160 * @new: s64 value to assign
4162 * If (@v == @old), atomically updates @v to @new with acquire ordering.
4163 * Otherwise, @v is not modified and relaxed ordering is provided.
4165 * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
4167 * Return: The original value of @v.
4169 static __always_inline s64
4170 raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
4172 #if defined(arch_atomic64_cmpxchg_acquire)
4173 return arch_atomic64_cmpxchg_acquire(v, old, new);
4174 #elif defined(arch_atomic64_cmpxchg_relaxed)
4175 s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4176 __atomic_acquire_fence();
4178 #elif defined(arch_atomic64_cmpxchg)
4179 return arch_atomic64_cmpxchg(v, old, new);
4181 return raw_cmpxchg_acquire(&v->counter, old, new);
4186 * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
4187 * @v: pointer to atomic64_t
4188 * @old: s64 value to compare with
4189 * @new: s64 value to assign
4191 * If (@v == @old), atomically updates @v to @new with release ordering.
4192 * Otherwise, @v is not modified and relaxed ordering is provided.
4194 * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
4196 * Return: The original value of @v.
4198 static __always_inline s64
4199 raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
4201 #if defined(arch_atomic64_cmpxchg_release)
4202 return arch_atomic64_cmpxchg_release(v, old, new);
4203 #elif defined(arch_atomic64_cmpxchg_relaxed)
4204 __atomic_release_fence();
4205 return arch_atomic64_cmpxchg_relaxed(v, old, new);
4206 #elif defined(arch_atomic64_cmpxchg)
4207 return arch_atomic64_cmpxchg(v, old, new);
4209 return raw_cmpxchg_release(&v->counter, old, new);
4214 * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4215 * @v: pointer to atomic64_t
4216 * @old: s64 value to compare with
4217 * @new: s64 value to assign
4219 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
4220 * Otherwise, @v is not modified and relaxed ordering is provided.
4222 * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
4224 * Return: The original value of @v.
4226 static __always_inline s64
4227 raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
4229 #if defined(arch_atomic64_cmpxchg_relaxed)
4230 return arch_atomic64_cmpxchg_relaxed(v, old, new);
4231 #elif defined(arch_atomic64_cmpxchg)
4232 return arch_atomic64_cmpxchg(v, old, new);
4234 return raw_cmpxchg_relaxed(&v->counter, old, new);
4239 * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
4240 * @v: pointer to atomic64_t
4241 * @old: pointer to s64 value to compare with
4242 * @new: s64 value to assign
4244 * If (@v == @old), atomically updates @v to @new with full ordering.
4245 * Otherwise, @v is not modified, @old is updated to the current value of @v,
4246 * and relaxed ordering is provided.
4248 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
4250 * Return: @true if the exchange occured, @false otherwise.
4252 static __always_inline bool
4253 raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
4255 #if defined(arch_atomic64_try_cmpxchg)
4256 return arch_atomic64_try_cmpxchg(v, old, new);
4257 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4259 __atomic_pre_full_fence();
4260 ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4261 __atomic_post_full_fence();
4265 r = raw_atomic64_cmpxchg(v, o, new);
4266 if (unlikely(r != o))
4268 return likely(r == o);
4273 * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4274 * @v: pointer to atomic64_t
4275 * @old: pointer to s64 value to compare with
4276 * @new: s64 value to assign
4278 * If (@v == @old), atomically updates @v to @new with acquire ordering.
4279 * Otherwise, @v is not modified, @old is updated to the current value of @v,
4280 * and relaxed ordering is provided.
4282 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
4284 * Return: @true if the exchange occured, @false otherwise.
4286 static __always_inline bool
4287 raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
4289 #if defined(arch_atomic64_try_cmpxchg_acquire)
4290 return arch_atomic64_try_cmpxchg_acquire(v, old, new);
4291 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4292 bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4293 __atomic_acquire_fence();
4295 #elif defined(arch_atomic64_try_cmpxchg)
4296 return arch_atomic64_try_cmpxchg(v, old, new);
4299 r = raw_atomic64_cmpxchg_acquire(v, o, new);
4300 if (unlikely(r != o))
4302 return likely(r == o);
4307 * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
4308 * @v: pointer to atomic64_t
4309 * @old: pointer to s64 value to compare with
4310 * @new: s64 value to assign
4312 * If (@v == @old), atomically updates @v to @new with release ordering.
4313 * Otherwise, @v is not modified, @old is updated to the current value of @v,
4314 * and relaxed ordering is provided.
4316 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
4318 * Return: @true if the exchange occured, @false otherwise.
4320 static __always_inline bool
4321 raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
4323 #if defined(arch_atomic64_try_cmpxchg_release)
4324 return arch_atomic64_try_cmpxchg_release(v, old, new);
4325 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4326 __atomic_release_fence();
4327 return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4328 #elif defined(arch_atomic64_try_cmpxchg)
4329 return arch_atomic64_try_cmpxchg(v, old, new);
4332 r = raw_atomic64_cmpxchg_release(v, o, new);
4333 if (unlikely(r != o))
4335 return likely(r == o);
4340 * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4341 * @v: pointer to atomic64_t
4342 * @old: pointer to s64 value to compare with
4343 * @new: s64 value to assign
4345 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
4346 * Otherwise, @v is not modified, @old is updated to the current value of @v,
4347 * and relaxed ordering is provided.
4349 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
4351 * Return: @true if the exchange occured, @false otherwise.
4353 static __always_inline bool
4354 raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
4356 #if defined(arch_atomic64_try_cmpxchg_relaxed)
4357 return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4358 #elif defined(arch_atomic64_try_cmpxchg)
4359 return arch_atomic64_try_cmpxchg(v, old, new);
4362 r = raw_atomic64_cmpxchg_relaxed(v, o, new);
4363 if (unlikely(r != o))
4365 return likely(r == o);
4370 * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
4371 * @i: s64 value to subtract
4372 * @v: pointer to atomic64_t
4374 * Atomically updates @v to (@v - @i) with full ordering.
4376 * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
4378 * Return: @true if the resulting value of @v is zero, @false otherwise.
4380 static __always_inline bool
4381 raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
4383 #if defined(arch_atomic64_sub_and_test)
4384 return arch_atomic64_sub_and_test(i, v);
4386 return raw_atomic64_sub_return(i, v) == 0;
4391 * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
4392 * @v: pointer to atomic64_t
4394 * Atomically updates @v to (@v - 1) with full ordering.
4396 * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
4398 * Return: @true if the resulting value of @v is zero, @false otherwise.
4400 static __always_inline bool
4401 raw_atomic64_dec_and_test(atomic64_t *v)
4403 #if defined(arch_atomic64_dec_and_test)
4404 return arch_atomic64_dec_and_test(v);
4406 return raw_atomic64_dec_return(v) == 0;
4411 * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
4412 * @v: pointer to atomic64_t
4414 * Atomically updates @v to (@v + 1) with full ordering.
4416 * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
4418 * Return: @true if the resulting value of @v is zero, @false otherwise.
4420 static __always_inline bool
4421 raw_atomic64_inc_and_test(atomic64_t *v)
4423 #if defined(arch_atomic64_inc_and_test)
4424 return arch_atomic64_inc_and_test(v);
4426 return raw_atomic64_inc_return(v) == 0;
4431 * raw_atomic64_add_negative() - atomic add and test if negative with full ordering
4432 * @i: s64 value to add
4433 * @v: pointer to atomic64_t
4435 * Atomically updates @v to (@v + @i) with full ordering.
4437 * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
4439 * Return: @true if the resulting value of @v is negative, @false otherwise.
4441 static __always_inline bool
4442 raw_atomic64_add_negative(s64 i, atomic64_t *v)
4444 #if defined(arch_atomic64_add_negative)
4445 return arch_atomic64_add_negative(i, v);
4446 #elif defined(arch_atomic64_add_negative_relaxed)
4448 __atomic_pre_full_fence();
4449 ret = arch_atomic64_add_negative_relaxed(i, v);
4450 __atomic_post_full_fence();
4453 return raw_atomic64_add_return(i, v) < 0;
4458 * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
4459 * @i: s64 value to add
4460 * @v: pointer to atomic64_t
4462 * Atomically updates @v to (@v + @i) with acquire ordering.
4464 * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
4466 * Return: @true if the resulting value of @v is negative, @false otherwise.
4468 static __always_inline bool
4469 raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
4471 #if defined(arch_atomic64_add_negative_acquire)
4472 return arch_atomic64_add_negative_acquire(i, v);
4473 #elif defined(arch_atomic64_add_negative_relaxed)
4474 bool ret = arch_atomic64_add_negative_relaxed(i, v);
4475 __atomic_acquire_fence();
4477 #elif defined(arch_atomic64_add_negative)
4478 return arch_atomic64_add_negative(i, v);
4480 return raw_atomic64_add_return_acquire(i, v) < 0;
4485 * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
4486 * @i: s64 value to add
4487 * @v: pointer to atomic64_t
4489 * Atomically updates @v to (@v + @i) with release ordering.
4491 * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
4493 * Return: @true if the resulting value of @v is negative, @false otherwise.
4495 static __always_inline bool
4496 raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
4498 #if defined(arch_atomic64_add_negative_release)
4499 return arch_atomic64_add_negative_release(i, v);
4500 #elif defined(arch_atomic64_add_negative_relaxed)
4501 __atomic_release_fence();
4502 return arch_atomic64_add_negative_relaxed(i, v);
4503 #elif defined(arch_atomic64_add_negative)
4504 return arch_atomic64_add_negative(i, v);
4506 return raw_atomic64_add_return_release(i, v) < 0;
4511 * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
4512 * @i: s64 value to add
4513 * @v: pointer to atomic64_t
4515 * Atomically updates @v to (@v + @i) with relaxed ordering.
4517 * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
4519 * Return: @true if the resulting value of @v is negative, @false otherwise.
4521 static __always_inline bool
4522 raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
4524 #if defined(arch_atomic64_add_negative_relaxed)
4525 return arch_atomic64_add_negative_relaxed(i, v);
4526 #elif defined(arch_atomic64_add_negative)
4527 return arch_atomic64_add_negative(i, v);
4529 return raw_atomic64_add_return_relaxed(i, v) < 0;
4534 * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
4535 * @v: pointer to atomic64_t
4536 * @a: s64 value to add
4537 * @u: s64 value to compare with
4539 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
4540 * Otherwise, @v is not modified and relaxed ordering is provided.
4542 * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
4544 * Return: The original value of @v.
4546 static __always_inline s64
4547 raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
4549 #if defined(arch_atomic64_fetch_add_unless)
4550 return arch_atomic64_fetch_add_unless(v, a, u);
4552 s64 c = raw_atomic64_read(v);
4555 if (unlikely(c == u))
4557 } while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
4564 * raw_atomic64_add_unless() - atomic add unless value with full ordering
4565 * @v: pointer to atomic64_t
4566 * @a: s64 value to add
4567 * @u: s64 value to compare with
4569 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
4570 * Otherwise, @v is not modified and relaxed ordering is provided.
4572 * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
4574 * Return: @true if @v was updated, @false otherwise.
4576 static __always_inline bool
4577 raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
4579 #if defined(arch_atomic64_add_unless)
4580 return arch_atomic64_add_unless(v, a, u);
4582 return raw_atomic64_fetch_add_unless(v, a, u) != u;
4587 * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
4588 * @v: pointer to atomic64_t
4590 * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
4591 * Otherwise, @v is not modified and relaxed ordering is provided.
4593 * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
4595 * Return: @true if @v was updated, @false otherwise.
4597 static __always_inline bool
4598 raw_atomic64_inc_not_zero(atomic64_t *v)
4600 #if defined(arch_atomic64_inc_not_zero)
4601 return arch_atomic64_inc_not_zero(v);
4603 return raw_atomic64_add_unless(v, 1, 0);
4608 * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
4609 * @v: pointer to atomic64_t
4611 * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
4612 * Otherwise, @v is not modified and relaxed ordering is provided.
4614 * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
4616 * Return: @true if @v was updated, @false otherwise.
4618 static __always_inline bool
4619 raw_atomic64_inc_unless_negative(atomic64_t *v)
4621 #if defined(arch_atomic64_inc_unless_negative)
4622 return arch_atomic64_inc_unless_negative(v);
4624 s64 c = raw_atomic64_read(v);
4627 if (unlikely(c < 0))
4629 } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
4636 * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
4637 * @v: pointer to atomic64_t
4639 * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
4640 * Otherwise, @v is not modified and relaxed ordering is provided.
4642 * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
4644 * Return: @true if @v was updated, @false otherwise.
4646 static __always_inline bool
4647 raw_atomic64_dec_unless_positive(atomic64_t *v)
4649 #if defined(arch_atomic64_dec_unless_positive)
4650 return arch_atomic64_dec_unless_positive(v);
4652 s64 c = raw_atomic64_read(v);
4655 if (unlikely(c > 0))
4657 } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
4664 * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
4665 * @v: pointer to atomic64_t
4667 * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
4668 * Otherwise, @v is not modified and relaxed ordering is provided.
4670 * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
4672 * Return: The old value of (@v - 1), regardless of whether @v was updated.
4674 static __always_inline s64
4675 raw_atomic64_dec_if_positive(atomic64_t *v)
4677 #if defined(arch_atomic64_dec_if_positive)
4678 return arch_atomic64_dec_if_positive(v);
4680 s64 dec, c = raw_atomic64_read(v);
4684 if (unlikely(dec < 0))
4686 } while (!raw_atomic64_try_cmpxchg(v, &c, dec));
4692 #endif /* _LINUX_ATOMIC_FALLBACK_H */
4693 // b565db590afeeff0d7c9485ccbca5bb6e155749f