openrisc: add optimized atomic operations
authorStefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Tue, 13 May 2014 19:30:56 +0000 (22:30 +0300)
committerStafford Horne <shorne@gmail.com>
Fri, 24 Feb 2017 19:14:06 +0000 (04:14 +0900)
Using the l.lwa and l.swa atomic instruction pair.
Most openrisc processor cores provide these instructions now. If the
instructions are not available emulation is provided.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
[shorne@gmail.com: remove OPENRISC_HAVE_INST_LWA_SWA config suggesed by
Alan Cox https://lkml.org/lkml/2014/7/23/666]
[shorne@gmail.com: expand to implement all ops suggested by Peter
Zijlstra https://lkml.org/lkml/2017/2/20/317]
Signed-off-by: Stafford Horne <shorne@gmail.com>
arch/openrisc/include/asm/Kbuild
arch/openrisc/include/asm/atomic.h [new file with mode: 0644]
include/asm-generic/atomic.h

index 15e6ed526453d3d162a99063f60ad8e1529b9027..1cedd6309fa6aedc8714d0552514874b795828de 100644 (file)
@@ -1,7 +1,6 @@
 
 header-y += ucontext.h
 
-generic-y += atomic.h
 generic-y += auxvec.h
 generic-y += barrier.h
 generic-y += bitsperlong.h
diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h
new file mode 100644 (file)
index 0000000..146e166
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_OPENRISC_ATOMIC_H
+#define __ASM_OPENRISC_ATOMIC_H
+
+#include <linux/types.h>
+
+/* Atomically perform op with v->counter and i */
+#define ATOMIC_OP(op)                                                  \
+static inline void atomic_##op(int i, atomic_t *v)                     \
+{                                                                      \
+       int tmp;                                                        \
+                                                                       \
+       __asm__ __volatile__(                                           \
+               "1:     l.lwa   %0,0(%1)        \n"                     \
+               "       l." #op " %0,%0,%2      \n"                     \
+               "       l.swa   0(%1),%0        \n"                     \
+               "       l.bnf   1b              \n"                     \
+               "        l.nop                  \n"                     \
+               : "=&r"(tmp)                                            \
+               : "r"(&v->counter), "r"(i)                              \
+               : "cc", "memory");                                      \
+}
+
+/* Atomically perform op with v->counter and i, return the result */
+#define ATOMIC_OP_RETURN(op)                                           \
+static inline int atomic_##op##_return(int i, atomic_t *v)             \
+{                                                                      \
+       int tmp;                                                        \
+                                                                       \
+       __asm__ __volatile__(                                           \
+               "1:     l.lwa   %0,0(%1)        \n"                     \
+               "       l." #op " %0,%0,%2      \n"                     \
+               "       l.swa   0(%1),%0        \n"                     \
+               "       l.bnf   1b              \n"                     \
+               "        l.nop                  \n"                     \
+               : "=&r"(tmp)                                            \
+               : "r"(&v->counter), "r"(i)                              \
+               : "cc", "memory");                                      \
+                                                                       \
+       return tmp;                                                     \
+}
+
+/* Atomically perform op with v->counter and i, return orig v->counter */
+#define ATOMIC_FETCH_OP(op)                                            \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       int tmp, old;                                                   \
+                                                                       \
+       __asm__ __volatile__(                                           \
+               "1:     l.lwa   %0,0(%2)        \n"                     \
+               "       l." #op " %1,%0,%3      \n"                     \
+               "       l.swa   0(%2),%1        \n"                     \
+               "       l.bnf   1b              \n"                     \
+               "        l.nop                  \n"                     \
+               : "=&r"(old), "=&r"(tmp)                                \
+               : "r"(&v->counter), "r"(i)                              \
+               : "cc", "memory");                                      \
+                                                                       \
+       return old;                                                     \
+}
+
+ATOMIC_OP_RETURN(add)
+ATOMIC_OP_RETURN(sub)
+
+ATOMIC_FETCH_OP(add)
+ATOMIC_FETCH_OP(sub)
+ATOMIC_FETCH_OP(and)
+ATOMIC_FETCH_OP(or)
+ATOMIC_FETCH_OP(xor)
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define atomic_add_return      atomic_add_return
+#define atomic_sub_return      atomic_sub_return
+#define atomic_fetch_add       atomic_fetch_add
+#define atomic_fetch_sub       atomic_fetch_sub
+#define atomic_fetch_and       atomic_fetch_and
+#define atomic_fetch_or                atomic_fetch_or
+#define atomic_fetch_xor       atomic_fetch_xor
+#define atomic_and     atomic_and
+#define atomic_or      atomic_or
+#define atomic_xor     atomic_xor
+
+/*
+ * Atomically add a to v->counter as long as v is not already u.
+ * Returns the original value at v->counter.
+ *
+ * This is often used through atomic_inc_not_zero()
+ */
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int old, tmp;
+
+       __asm__ __volatile__(
+               "1:     l.lwa %0, 0(%2)         \n"
+               "       l.sfeq %0, %4           \n"
+               "       l.bf 2f                 \n"
+               "        l.add %1, %0, %3       \n"
+               "       l.swa 0(%2), %1         \n"
+               "       l.bnf 1b                \n"
+               "        l.nop                  \n"
+               "2:                             \n"
+               : "=&r"(old), "=&r" (tmp)
+               : "r"(&v->counter), "r"(a), "r"(u)
+               : "cc", "memory");
+
+       return old;
+}
+#define __atomic_add_unless    __atomic_add_unless
+
+#include <asm-generic/atomic.h>
+
+#endif /* __ASM_OPENRISC_ATOMIC_H */
index 9ed8b987185b45b1157993abf4b0fe5a6c0b23a8..3f38eb03649c93873c678677964425a1daf09c26 100644 (file)
@@ -223,6 +223,7 @@ static inline void atomic_dec(atomic_t *v)
 #define atomic_xchg(ptr, v)            (xchg(&(ptr)->counter, (v)))
 #define atomic_cmpxchg(v, old, new)    (cmpxchg(&((v)->counter), (old), (new)))
 
+#ifndef __atomic_add_unless
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
        int c, old;
@@ -231,5 +232,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
                c = old;
        return c;
 }
+#endif
 
 #endif /* __ASM_GENERIC_ATOMIC_H */