locking/atomic: Move ATOMIC_INIT into linux/types.h
authorHerbert Xu <herbert@gondor.apana.org.au>
Wed, 29 Jul 2020 12:31:05 +0000 (22:31 +1000)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 29 Jul 2020 14:14:18 +0000 (16:14 +0200)
This patch moves ATOMIC_INIT from asm/atomic.h into linux/types.h.
This allows users of atomic_t to use ATOMIC_INIT without having to
include atomic.h as that way may lead to header loops.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Waiman Long <longman@redhat.com>
Link: https://lkml.kernel.org/r/20200729123105.GB7047@gondor.apana.org.au
20 files changed:
arch/alpha/include/asm/atomic.h
arch/arc/include/asm/atomic.h
arch/arm/include/asm/atomic.h
arch/arm64/include/asm/atomic.h
arch/h8300/include/asm/atomic.h
arch/hexagon/include/asm/atomic.h
arch/ia64/include/asm/atomic.h
arch/m68k/include/asm/atomic.h
arch/mips/include/asm/atomic.h
arch/parisc/include/asm/atomic.h
arch/powerpc/include/asm/atomic.h
arch/riscv/include/asm/atomic.h
arch/s390/include/asm/atomic.h
arch/sh/include/asm/atomic.h
arch/sparc/include/asm/atomic_32.h
arch/sparc/include/asm/atomic_64.h
arch/x86/include/asm/atomic.h
arch/xtensa/include/asm/atomic.h
include/asm-generic/atomic.h
include/linux/types.h

index 2144530d1428ca618a599ae3488fef7b5e90ed3b..e2093994fd0de42a4244848160e80169694098cb 100644 (file)
@@ -24,7 +24,6 @@
 #define __atomic_acquire_fence()
 #define __atomic_post_full_fence()
 
-#define ATOMIC_INIT(i)         { (i) }
 #define ATOMIC64_INIT(i)       { (i) }
 
 #define atomic_read(v)         READ_ONCE((v)->counter)
index 7298ce84762e9dda46693e1436719103f1dc7ba7..c614857eb20917cd9a89b30b9a8f2436200c849d 100644 (file)
@@ -14,8 +14,6 @@
 #include <asm/barrier.h>
 #include <asm/smp.h>
 
-#define ATOMIC_INIT(i) { (i) }
-
 #ifndef CONFIG_ARC_PLAT_EZNPS
 
 #define atomic_read(v)  READ_ONCE((v)->counter)
index 75bb2c543e592ad81f05e0ea27bee03f6aa67203..455eb19a5ac14c37f30bd4a93a8e216628f0f3fa 100644 (file)
@@ -15,8 +15,6 @@
 #include <asm/barrier.h>
 #include <asm/cmpxchg.h>
 
-#define ATOMIC_INIT(i) { (i) }
-
 #ifdef __KERNEL__
 
 /*
index a08890da696c75d39198c9fde8d5042b709d2066..015ddffaf6caa3213813fcdcbb5601b1a3ec95d8 100644 (file)
@@ -99,8 +99,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
        return __lse_ll_sc_body(atomic64_dec_if_positive, v);
 }
 
-#define ATOMIC_INIT(i) { (i) }
-
 #define arch_atomic_read(v)                    __READ_ONCE((v)->counter)
 #define arch_atomic_set(v, i)                  __WRITE_ONCE(((v)->counter), (i))
 
index c6b6a06231b2e2f9a5998385ceda0b2acc841e3e..a990d151f1633a4df92121a972747e82b61182b0 100644 (file)
@@ -12,8 +12,6 @@
  * resource counting etc..
  */
 
-#define ATOMIC_INIT(i) { (i) }
-
 #define atomic_read(v)         READ_ONCE((v)->counter)
 #define atomic_set(v, i)       WRITE_ONCE(((v)->counter), (i))
 
index 0231d69c8bf2bf35285a21a3117c5a8f2ab26f1a..4ab895d7111f60c98406335e27f13313aed880fe 100644 (file)
@@ -12,8 +12,6 @@
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
 
-#define ATOMIC_INIT(i)         { (i) }
-
 /*  Normal writes in our arch don't clear lock reservations  */
 
 static inline void atomic_set(atomic_t *v, int new)
index 50440f3ddc43845ccdb4f81cbae09410e10e93c9..f267d956458f593cff4242697377b231e14f9ffe 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/barrier.h>
 
 
-#define ATOMIC_INIT(i)         { (i) }
 #define ATOMIC64_INIT(i)       { (i) }
 
 #define atomic_read(v)         READ_ONCE((v)->counter)
index 47228b0d4163f38002709bb76308d63917f7dca0..756c5cc58f94494b0c5fffc7d85aa2cc554a8bcf 100644 (file)
@@ -16,8 +16,6 @@
  * We do not have SMP m68k systems, so we don't have to deal with that.
  */
 
-#define ATOMIC_INIT(i) { (i) }
-
 #define atomic_read(v)         READ_ONCE((v)->counter)
 #define atomic_set(v, i)       WRITE_ONCE(((v)->counter), (i))
 
index e5ac88392d1f5e7342a25f102eaa657a243601e8..f904084fcb1fd3510101d0ef1b3810031332d488 100644 (file)
@@ -45,7 +45,6 @@ static __always_inline type pfx##_xchg(pfx##_t *v, type n)            \
        return xchg(&v->counter, n);                                    \
 }
 
-#define ATOMIC_INIT(i)         { (i) }
 ATOMIC_OPS(atomic, int)
 
 #ifdef CONFIG_64BIT
index 118953d417634369047dc71bb2db3a400b7719fd..f960e2f32b1b6451b37e88947f78079e5dea30e6 100644 (file)
@@ -136,8 +136,6 @@ ATOMIC_OPS(xor, ^=)
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-#define ATOMIC_INIT(i) { (i) }
-
 #ifdef CONFIG_64BIT
 
 #define ATOMIC64_INIT(i) { (i) }
index 498785ffc25f33c0c9656d0596be2daed78c11ad..0311c3c4296062a7616bc28ae76c1e36fd65278e 100644 (file)
@@ -11,8 +11,6 @@
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
 
-#define ATOMIC_INIT(i)         { (i) }
-
 /*
  * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
index 96f95c9ebd97f6cfa3782ab745aa96a8381ff4bc..400a8c8b6de752bc41c4a507a4dbd6ef52a23463 100644 (file)
@@ -19,8 +19,6 @@
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
 
-#define ATOMIC_INIT(i) { (i) }
-
 #define __atomic_acquire_fence()                                       \
        __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
 
index 491ad53a0d4e8102f661bd6f1eccbc9cdac0c830..cae473a7b6f77cd56f52677fc26104753d018d80 100644 (file)
@@ -15,8 +15,6 @@
 #include <asm/barrier.h>
 #include <asm/cmpxchg.h>
 
-#define ATOMIC_INIT(i)  { (i) }
-
 static inline int atomic_read(const atomic_t *v)
 {
        int c;
index f37b95a80232d65d3467b49acb89a80290703752..7c2a8a703b9a25c24a9425551cee715fcdbd67db 100644 (file)
@@ -19,8 +19,6 @@
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
 
-#define ATOMIC_INIT(i) { (i) }
-
 #define atomic_read(v)         READ_ONCE((v)->counter)
 #define atomic_set(v,i)                WRITE_ONCE((v)->counter, (i))
 
index 94c930f0bc62a7b3aa9736c310bb2a7024abecc2..efad5532f16992610e632b2428ae1848c0661198 100644 (file)
@@ -18,8 +18,6 @@
 #include <asm/barrier.h>
 #include <asm-generic/atomic64.h>
 
-#define ATOMIC_INIT(i)  { (i) }
-
 int atomic_add_return(int, atomic_t *);
 int atomic_fetch_add(int, atomic_t *);
 int atomic_fetch_and(int, atomic_t *);
index b60448397d4ffd8f69f2d11c4395256791c757f9..6b235d3d1d9db76bcb4541059c79fbc6fbe023ec 100644 (file)
@@ -12,7 +12,6 @@
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
 
-#define ATOMIC_INIT(i)         { (i) }
 #define ATOMIC64_INIT(i)       { (i) }
 
 #define atomic_read(v)         READ_ONCE((v)->counter)
index bf35e476a77613547b383f274ff71b403cb4a074..b6cac6e9bb70c41329f4b60d179ed239e31c5648 100644 (file)
@@ -14,8 +14,6 @@
  * resource counting etc..
  */
 
-#define ATOMIC_INIT(i) { (i) }
-
 /**
  * arch_atomic_read - read atomic variable
  * @v: pointer of type atomic_t
index 3e7c6134ed32bacda6e5ea2bb99850d3107d6914..744c2f463845d5c3fb1676b420fb3aded367bafe 100644 (file)
@@ -19,8 +19,6 @@
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
 
-#define ATOMIC_INIT(i) { (i) }
-
 /*
  * This Xtensa implementation assumes that the right mechanism
  * for exclusion is for locking interrupts to level EXCM_LEVEL.
index 286867f593d294381a3e8fdc32cc5f77b377c390..11f96f40f4a79f293ffa4fb5766e222b9042d751 100644 (file)
@@ -159,8 +159,6 @@ ATOMIC_OP(xor, ^)
  * resource counting etc..
  */
 
-#define ATOMIC_INIT(i) { (i) }
-
 /**
  * atomic_read - read atomic variable
  * @v: pointer of type atomic_t
index d3021c87917953be758958fdf0fef4731702224c..a147977602b5ea8b4dc815c602a965c0c543983e 100644 (file)
@@ -167,6 +167,8 @@ typedef struct {
        int counter;
 } atomic_t;
 
+#define ATOMIC_INIT(i) { (i) }
+
 #ifdef CONFIG_64BIT
 typedef struct {
        s64 counter;