alpha: Improve atomic_add_unless
authorRichard Henderson <rth@twiddle.net>
Thu, 11 Jul 2013 14:42:14 +0000 (07:42 -0700)
committerMatt Turner <mattst88@gmail.com>
Fri, 19 Jul 2013 20:54:24 +0000 (13:54 -0700)
Use ll/sc loops instead of C loops around cmpxchg.
Update the atomic64_add_unless block comment to match the code.

Reviewed-and-Tested-by: Matt Turner <mattst88@gmail.com>
Signed-off-by: Matt Turner <mattst88@gmail.com>
Signed-off-by: Richard Henderson <rth@twiddle.net>
arch/alpha/include/asm/atomic.h

index c2cbe4fc391cd7d77d319cb0cbeabd19f7e5ecbb..0dc18fc4d9258940fb71e420e8a1ba9eed284c31 100644 (file)
@@ -186,17 +186,24 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
  */
 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 {
-       int c, old;
-       c = atomic_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c;
+       int c, new, old;
+       smp_mb();
+       __asm__ __volatile__(
+       "1:     ldl_l   %[old],%[mem]\n"
+       "       cmpeq   %[old],%[u],%[c]\n"
+       "       addl    %[old],%[a],%[new]\n"
+       "       bne     %[c],2f\n"
+       "       stl_c   %[new],%[mem]\n"
+       "       beq     %[new],3f\n"
+       "2:\n"
+       ".subsection 2\n"
+       "3:     br      1b\n"
+       ".previous"
+       : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
+       : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
+       : "memory");
+       smp_mb();
+       return old;
 }
 
 
@@ -207,21 +214,28 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  * @u: ...unless v is equal to u.
  *
  * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
+ * Returns true iff @v was not @u.
  */
 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
 {
-       long c, old;
-       c = atomic64_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic64_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c != (u);
+       long c, tmp;
+       smp_mb();
+       __asm__ __volatile__(
+       "1:     ldq_l   %[tmp],%[mem]\n"
+       "       cmpeq   %[tmp],%[u],%[c]\n"
+       "       addq    %[tmp],%[a],%[tmp]\n"
+       "       bne     %[c],2f\n"
+       "       stq_c   %[tmp],%[mem]\n"
+       "       beq     %[tmp],3f\n"
+       "2:\n"
+       ".subsection 2\n"
+       "3:     br      1b\n"
+       ".previous"
+       : [tmp] "=&r"(tmp), [c] "=&r"(c)
+       : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
+       : "memory");
+       smp_mb();
+       return !c;
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)