locking,arch,xtensa: Fold atomic_ops
authorPeter Zijlstra <peterz@infradead.org>
Wed, 26 Mar 2014 17:31:12 +0000 (18:31 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 14 Aug 2014 10:48:14 +0000 (12:48 +0200)
Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Zankel <chris@zankel.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: linux-xtensa@linux-xtensa.org
Link: http://lkml.kernel.org/r/20140508135852.879575796@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/xtensa/include/asm/atomic.h

index e5103b47a8cefaf98a6e97d5f6bbd987eecb8c91..626676660b80a929ab6b61320b78e4d5f9aa51c9 100644 (file)
  */
 #define atomic_set(v,i)                ((v)->counter = (i))
 
-/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-static inline void atomic_add(int i, atomic_t * v)
-{
 #if XCHAL_HAVE_S32C1I
-       unsigned long tmp;
-       int result;
-
-       __asm__ __volatile__(
-                       "1:     l32i    %1, %3, 0\n"
-                       "       wsr     %1, scompare1\n"
-                       "       add     %0, %1, %2\n"
-                       "       s32c1i  %0, %3, 0\n"
-                       "       bne     %0, %1, 1b\n"
-                       : "=&a" (result), "=&a" (tmp)
-                       : "a" (i), "a" (v)
-                       : "memory"
-                       );
-#else
-       unsigned int vval;
-
-       __asm__ __volatile__(
-                       "       rsil    a15, "__stringify(LOCKLEVEL)"\n"
-                       "       l32i    %0, %2, 0\n"
-                       "       add     %0, %0, %1\n"
-                       "       s32i    %0, %2, 0\n"
-                       "       wsr     a15, ps\n"
-                       "       rsync\n"
-                       : "=&a" (vval)
-                       : "a" (i), "a" (v)
-                       : "a15", "memory"
-                       );
-#endif
-}
-
-/**
- * atomic_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v.
- */
-static inline void atomic_sub(int i, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
-       unsigned long tmp;
-       int result;
-
-       __asm__ __volatile__(
-                       "1:     l32i    %1, %3, 0\n"
-                       "       wsr     %1, scompare1\n"
-                       "       sub     %0, %1, %2\n"
-                       "       s32c1i  %0, %3, 0\n"
-                       "       bne     %0, %1, 1b\n"
-                       : "=&a" (result), "=&a" (tmp)
-                       : "a" (i), "a" (v)
-                       : "memory"
-                       );
-#else
-       unsigned int vval;
-
-       __asm__ __volatile__(
-                       "       rsil    a15, "__stringify(LOCKLEVEL)"\n"
-                       "       l32i    %0, %2, 0\n"
-                       "       sub     %0, %0, %1\n"
-                       "       s32i    %0, %2, 0\n"
-                       "       wsr     a15, ps\n"
-                       "       rsync\n"
-                       : "=&a" (vval)
-                       : "a" (i), "a" (v)
-                       : "a15", "memory"
-                       );
-#endif
+#define ATOMIC_OP(op)                                                  \
+static inline void atomic_##op(int i, atomic_t * v)                    \
+{                                                                      \
+       unsigned long tmp;                                              \
+       int result;                                                     \
+                                                                       \
+       __asm__ __volatile__(                                           \
+                       "1:     l32i    %1, %3, 0\n"                    \
+                       "       wsr     %1, scompare1\n"                \
+                       "       " #op " %0, %1, %2\n"                   \
+                       "       s32c1i  %0, %3, 0\n"                    \
+                       "       bne     %0, %1, 1b\n"                   \
+                       : "=&a" (result), "=&a" (tmp)                   \
+                       : "a" (i), "a" (v)                              \
+                       : "memory"                                      \
+                       );                                              \
+}                                                                      \
+
+#define ATOMIC_OP_RETURN(op)                                           \
+static inline int atomic_##op##_return(int i, atomic_t * v)            \
+{                                                                      \
+       unsigned long tmp;                                              \
+       int result;                                                     \
+                                                                       \
+       __asm__ __volatile__(                                           \
+                       "1:     l32i    %1, %3, 0\n"                    \
+                       "       wsr     %1, scompare1\n"                \
+                       "       " #op " %0, %1, %2\n"                   \
+                       "       s32c1i  %0, %3, 0\n"                    \
+                       "       bne     %0, %1, 1b\n"                   \
+                       "       " #op " %0, %0, %2\n"                   \
+                       : "=&a" (result), "=&a" (tmp)                   \
+                       : "a" (i), "a" (v)                              \
+                       : "memory"                                      \
+                       );                                              \
+                                                                       \
+       return result;                                                  \
 }
 
-/*
- * We use atomic_{add|sub}_return to define other functions.
- */
-
-static inline int atomic_add_return(int i, atomic_t * v)
-{
-#if XCHAL_HAVE_S32C1I
-       unsigned long tmp;
-       int result;
-
-       __asm__ __volatile__(
-                       "1:     l32i    %1, %3, 0\n"
-                       "       wsr     %1, scompare1\n"
-                       "       add     %0, %1, %2\n"
-                       "       s32c1i  %0, %3, 0\n"
-                       "       bne     %0, %1, 1b\n"
-                       "       add     %0, %0, %2\n"
-                       : "=&a" (result), "=&a" (tmp)
-                       : "a" (i), "a" (v)
-                       : "memory"
-                       );
-
-       return result;
-#else
-       unsigned int vval;
-
-       __asm__ __volatile__(
-                       "       rsil    a15,"__stringify(LOCKLEVEL)"\n"
-                       "       l32i    %0, %2, 0\n"
-                       "       add     %0, %0, %1\n"
-                       "       s32i    %0, %2, 0\n"
-                       "       wsr     a15, ps\n"
-                       "       rsync\n"
-                       : "=&a" (vval)
-                       : "a" (i), "a" (v)
-                       : "a15", "memory"
-                       );
-
-       return vval;
-#endif
+#else /* XCHAL_HAVE_S32C1I */
+
+#define ATOMIC_OP(op)                                                  \
+static inline void atomic_##op(int i, atomic_t * v)                    \
+{                                                                      \
+       unsigned int vval;                                              \
+                                                                       \
+       __asm__ __volatile__(                                           \
+                       "       rsil    a15, "__stringify(LOCKLEVEL)"\n"\
+                       "       l32i    %0, %2, 0\n"                    \
+                       "       " #op " %0, %0, %1\n"                   \
+                       "       s32i    %0, %2, 0\n"                    \
+                       "       wsr     a15, ps\n"                      \
+                       "       rsync\n"                                \
+                       : "=&a" (vval)                                  \
+                       : "a" (i), "a" (v)                              \
+                       : "a15", "memory"                               \
+                       );                                              \
+}                                                                      \
+
+#define ATOMIC_OP_RETURN(op)                                           \
+static inline int atomic_##op##_return(int i, atomic_t * v)            \
+{                                                                      \
+       unsigned int vval;                                              \
+                                                                       \
+       __asm__ __volatile__(                                           \
+                       "       rsil    a15,"__stringify(LOCKLEVEL)"\n" \
+                       "       l32i    %0, %2, 0\n"                    \
+                       "       " #op " %0, %0, %1\n"                   \
+                       "       s32i    %0, %2, 0\n"                    \
+                       "       wsr     a15, ps\n"                      \
+                       "       rsync\n"                                \
+                       : "=&a" (vval)                                  \
+                       : "a" (i), "a" (v)                              \
+                       : "a15", "memory"                               \
+                       );                                              \
+                                                                       \
+       return vval;                                                    \
 }
 
-static inline int atomic_sub_return(int i, atomic_t * v)
-{
-#if XCHAL_HAVE_S32C1I
-       unsigned long tmp;
-       int result;
+#endif /* XCHAL_HAVE_S32C1I */
 
-       __asm__ __volatile__(
-                       "1:     l32i    %1, %3, 0\n"
-                       "       wsr     %1, scompare1\n"
-                       "       sub     %0, %1, %2\n"
-                       "       s32c1i  %0, %3, 0\n"
-                       "       bne     %0, %1, 1b\n"
-                       "       sub     %0, %0, %2\n"
-                       : "=&a" (result), "=&a" (tmp)
-                       : "a" (i), "a" (v)
-                       : "memory"
-                       );
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
 
-       return result;
-#else
-       unsigned int vval;
-
-       __asm__ __volatile__(
-                       "       rsil    a15,"__stringify(LOCKLEVEL)"\n"
-                       "       l32i    %0, %2, 0\n"
-                       "       sub     %0, %0, %1\n"
-                       "       s32i    %0, %2, 0\n"
-                       "       wsr     a15, ps\n"
-                       "       rsync\n"
-                       : "=&a" (vval)
-                       : "a" (i), "a" (v)
-                       : "a15", "memory"
-                       );
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
 
-       return vval;
-#endif
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
 
 /**
  * atomic_sub_and_test - subtract value from variable and test result