locking/atomic, powerpc: Use s64 for atomic64
authorMark Rutland <mark.rutland@arm.com>
Wed, 22 May 2019 13:22:42 +0000 (14:22 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 3 Jun 2019 10:32:56 +0000 (12:32 +0200)
As a step towards making the atomic64 API use consistent types treewide,
let's have the powerpc atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long, matching the generated headers.

As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long on 64-bit. This will be converted in a subsequent
patch.

Otherwise, there should be no functional change as a result of this
patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Michael Ellerman <mpe@ellerman.id.au>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aou@eecs.berkeley.edu
Cc: arnd@arndb.de
Cc: bp@alien8.de
Cc: catalin.marinas@arm.com
Cc: davem@davemloft.net
Cc: fenghua.yu@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: ink@jurassic.park.msu.ru
Cc: jhogan@kernel.org
Cc: linux@armlinux.org.uk
Cc: mattst88@gmail.com
Cc: palmer@sifive.com
Cc: paul.burton@mips.com
Cc: ralf@linux-mips.org
Cc: rth@twiddle.net
Cc: tony.luck@intel.com
Cc: vgupta@synopsys.com
Link: https://lkml.kernel.org/r/20190522132250.26499-11-mark.rutland@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/powerpc/include/asm/atomic.h

index 52eafaf74054d2f3d22e0adadb8a57d61a5b89f9..31c231ea56b77c4d81bb248bb38041350fc312ef 100644 (file)
@@ -297,24 +297,24 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
 
 #define ATOMIC64_INIT(i)       { (i) }
 
-static __inline__ long atomic64_read(const atomic64_t *v)
+static __inline__ s64 atomic64_read(const atomic64_t *v)
 {
-       long t;
+       s64 t;
 
        __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
 
        return t;
 }
 
-static __inline__ void atomic64_set(atomic64_t *v, long i)
+static __inline__ void atomic64_set(atomic64_t *v, s64 i)
 {
        __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
 }
 
 #define ATOMIC64_OP(op, asm_op)                                                \
-static __inline__ void atomic64_##op(long a, atomic64_t *v)            \
+static __inline__ void atomic64_##op(s64 a, atomic64_t *v)             \
 {                                                                      \
-       long t;                                                         \
+       s64 t;                                                          \
                                                                        \
        __asm__ __volatile__(                                           \
 "1:    ldarx   %0,0,%3         # atomic64_" #op "\n"                   \
@@ -327,10 +327,10 @@ static __inline__ void atomic64_##op(long a, atomic64_t *v)               \
 }
 
 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)                         \
-static inline long                                                     \
-atomic64_##op##_return_relaxed(long a, atomic64_t *v)                  \
+static inline s64                                                      \
+atomic64_##op##_return_relaxed(s64 a, atomic64_t *v)                   \
 {                                                                      \
-       long t;                                                         \
+       s64 t;                                                          \
                                                                        \
        __asm__ __volatile__(                                           \
 "1:    ldarx   %0,0,%3         # atomic64_" #op "_return_relaxed\n"    \
@@ -345,10 +345,10 @@ atomic64_##op##_return_relaxed(long a, atomic64_t *v)                     \
 }
 
 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)                          \
-static inline long                                                     \
-atomic64_fetch_##op##_relaxed(long a, atomic64_t *v)                   \
+static inline s64                                                      \
+atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v)                    \
 {                                                                      \
-       long res, t;                                                    \
+       s64 res, t;                                                     \
                                                                        \
        __asm__ __volatile__(                                           \
 "1:    ldarx   %0,0,%4         # atomic64_fetch_" #op "_relaxed\n"     \
@@ -396,7 +396,7 @@ ATOMIC64_OPS(xor, xor)
 
 static __inline__ void atomic64_inc(atomic64_t *v)
 {
-       long t;
+       s64 t;
 
        __asm__ __volatile__(
 "1:    ldarx   %0,0,%2         # atomic64_inc\n\
@@ -409,9 +409,9 @@ static __inline__ void atomic64_inc(atomic64_t *v)
 }
 #define atomic64_inc atomic64_inc
 
-static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
+static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
 {
-       long t;
+       s64 t;
 
        __asm__ __volatile__(
 "1:    ldarx   %0,0,%2         # atomic64_inc_return_relaxed\n"
@@ -427,7 +427,7 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
 
 static __inline__ void atomic64_dec(atomic64_t *v)
 {
-       long t;
+       s64 t;
 
        __asm__ __volatile__(
 "1:    ldarx   %0,0,%2         # atomic64_dec\n\
@@ -440,9 +440,9 @@ static __inline__ void atomic64_dec(atomic64_t *v)
 }
 #define atomic64_dec atomic64_dec
 
-static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
+static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
 {
-       long t;
+       s64 t;
 
        __asm__ __volatile__(
 "1:    ldarx   %0,0,%2         # atomic64_dec_return_relaxed\n"
@@ -463,9 +463,9 @@ static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
  * Atomically test *v and decrement if it is greater than 0.
  * The function returns the old value of *v minus 1.
  */
-static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
+static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
 {
-       long t;
+       s64 t;
 
        __asm__ __volatile__(
        PPC_ATOMIC_ENTRY_BARRIER
@@ -502,9 +502,9 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns the old value of @v.
  */
-static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
+static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 {
-       long t;
+       s64 t;
 
        __asm__ __volatile__ (
        PPC_ATOMIC_ENTRY_BARRIER
@@ -534,7 +534,7 @@ static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
  */
 static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
 {
-       long t1, t2;
+       s64 t1, t2;
 
        __asm__ __volatile__ (
        PPC_ATOMIC_ENTRY_BARRIER