locking,arch,hexagon: Fold atomic_ops
authorPeter Zijlstra <peterz@infradead.org>
Sun, 23 Mar 2014 17:20:26 +0000 (18:20 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 14 Aug 2014 10:48:06 +0000 (12:48 +0200)
OK, no LoC saved in this case because the !return variants were
defined in terms of the return ops. Still do it because this also
prepares for easy addition of new ops.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Richard Kuo <rkuo@codeaurora.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: linux-hexagon@vger.kernel.org
Link: http://lkml.kernel.org/r/20140508135852.171567636@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/hexagon/include/asm/atomic.h

index de916b11bff520ccadbc8d885787aff053514c62..93d07025f183d65becd620aba67a2d4f9bf34563 100644 (file)
@@ -94,41 +94,47 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
        return __oldval;
 }
 
-static inline int atomic_add_return(int i, atomic_t *v)
-{
-       int output;
-
-       __asm__ __volatile__ (
-               "1:     %0 = memw_locked(%1);\n"
-               "       %0 = add(%0,%2);\n"
-               "       memw_locked(%1,P3)=%0;\n"
-               "       if !P3 jump 1b;\n"
-               : "=&r" (output)
-               : "r" (&v->counter), "r" (i)
-               : "memory", "p3"
-       );
-       return output;
-
+#define ATOMIC_OP(op)                                                  \
+static inline void atomic_##op(int i, atomic_t *v)                     \
+{                                                                      \
+       int output;                                                     \
+                                                                       \
+       __asm__ __volatile__ (                                          \
+               "1:     %0 = memw_locked(%1);\n"                        \
+               "       %0 = "#op "(%0,%2);\n"                          \
+               "       memw_locked(%1,P3)=%0;\n"                       \
+               "       if !P3 jump 1b;\n"                              \
+               : "=&r" (output)                                        \
+               : "r" (&v->counter), "r" (i)                            \
+               : "memory", "p3"                                        \
+       );                                                              \
+}                                                                      \
+
+#define ATOMIC_OP_RETURN(op)                                                   \
+static inline int atomic_##op##_return(int i, atomic_t *v)             \
+{                                                                      \
+       int output;                                                     \
+                                                                       \
+       __asm__ __volatile__ (                                          \
+               "1:     %0 = memw_locked(%1);\n"                        \
+               "       %0 = "#op "(%0,%2);\n"                          \
+               "       memw_locked(%1,P3)=%0;\n"                       \
+               "       if !P3 jump 1b;\n"                              \
+               : "=&r" (output)                                        \
+               : "r" (&v->counter), "r" (i)                            \
+               : "memory", "p3"                                        \
+       );                                                              \
+       return output;                                                  \
 }
 
-#define atomic_add(i, v) atomic_add_return(i, (v))
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
 
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
-       int output;
-       __asm__ __volatile__ (
-               "1:     %0 = memw_locked(%1);\n"
-               "       %0 = sub(%0,%2);\n"
-               "       memw_locked(%1,P3)=%0\n"
-               "       if !P3 jump 1b;\n"
-               : "=&r" (output)
-               : "r" (&v->counter), "r" (i)
-               : "memory", "p3"
-       );
-       return output;
-}
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
 
-#define atomic_sub(i, v) atomic_sub_return(i, (v))
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
 
 /**
  * __atomic_add_unless - add unless the number is a given value