s390/spinlock: cleanup spinlock code
authorPhilipp Hachtmann <phacht@linux.vnet.ibm.com>
Mon, 7 Apr 2014 16:25:23 +0000 (18:25 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 20 May 2014 06:58:41 +0000 (08:58 +0200)
Improve the spinlock code in several aspects:
 - Have _raw_compare_and_swap return true if the operation has been
   successful instead of returning the old value.
 - Remove the "volatile" from arch_spinlock_t and arch_rwlock_t
 - Rename 'owner_cpu' to 'lock'
 - Add helper functions arch_spin_trylock_once / arch_spin_tryrelease_once

[ Martin Schwidefsky: patch breakdown and code beautification ]

Signed-off-by: Philipp Hachtmann <phacht@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/spinlock.h
arch/s390/include/asm/spinlock_types.h
arch/s390/lib/spinlock.c

index 83e5d216105e86a2df2ab1367bcfc3fd1590542a..b60212a02d08d5e7d56ad549459614ce964f3583 100644 (file)
 extern int spin_retry;
 
 static inline int
-_raw_compare_and_swap(volatile unsigned int *lock,
-                     unsigned int old, unsigned int new)
+_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
 {
+       unsigned int old_expected = old;
+
        asm volatile(
                "       cs      %0,%3,%1"
                : "=d" (old), "=Q" (*lock)
                : "0" (old), "d" (new), "Q" (*lock)
                : "cc", "memory" );
-       return old;
+       return old == old_expected;
 }
 
 /*
@@ -34,57 +35,66 @@ _raw_compare_and_swap(volatile unsigned int *lock,
  * (the type definitions are in asm/spinlock_types.h)
  */
 
-#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
-#define arch_spin_unlock_wait(lock) \
-       do { while (arch_spin_is_locked(lock)) \
-                arch_spin_relax(lock); } while (0)
-
-extern void arch_spin_lock_wait(arch_spinlock_t *);
-extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
-extern int arch_spin_trylock_retry(arch_spinlock_t *);
-extern void arch_spin_relax(arch_spinlock_t *lock);
+void arch_spin_lock_wait(arch_spinlock_t *);
+int arch_spin_trylock_retry(arch_spinlock_t *);
+void arch_spin_relax(arch_spinlock_t *);
+void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
 
 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 {
-       return lock.owner_cpu == 0;
+       return lock.lock == 0;
 }
 
-static inline void arch_spin_lock(arch_spinlock_t *lp)
+static inline int arch_spin_is_locked(arch_spinlock_t *lp)
+{
+       return ACCESS_ONCE(lp->lock) != 0;
+}
+
+static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
 {
-       int old;
+       unsigned int new = ~smp_processor_id();
 
-       old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
-       if (likely(old == 0))
-               return;
-       arch_spin_lock_wait(lp);
+       return _raw_compare_and_swap(&lp->lock, 0, new);
 }
 
-static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
-                                        unsigned long flags)
+static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
 {
-       int old;
+       unsigned int old = ~smp_processor_id();
 
-       old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
-       if (likely(old == 0))
-               return;
-       arch_spin_lock_wait_flags(lp, flags);
+       return _raw_compare_and_swap(&lp->lock, old, 0);
 }
 
-static inline int arch_spin_trylock(arch_spinlock_t *lp)
+static inline void arch_spin_lock(arch_spinlock_t *lp)
 {
-       int old;
+       if (unlikely(!arch_spin_trylock_once(lp)))
+               arch_spin_lock_wait(lp);
+}
 
-       old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
-       if (likely(old == 0))
-               return 1;
-       return arch_spin_trylock_retry(lp);
+static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
+                                       unsigned long flags)
+{
+       if (unlikely(!arch_spin_trylock_once(lp)))
+               arch_spin_lock_wait_flags(lp, flags);
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lp)
+{
+       if (unlikely(!arch_spin_trylock_once(lp)))
+               return arch_spin_trylock_retry(lp);
+       return 1;
 }
 
 static inline void arch_spin_unlock(arch_spinlock_t *lp)
 {
-       _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
+       arch_spin_tryrelease_once(lp);
 }
-               
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       while (arch_spin_is_locked(lock))
+               arch_spin_relax(lock);
+}
+
 /*
  * Read-write spinlocks, allowing multiple readers
  * but only one writer.
@@ -119,7 +129,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned int old;
        old = rw->lock & 0x7fffffffU;
-       if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
+       if (!_raw_compare_and_swap(&rw->lock, old, old + 1))
                _raw_read_lock_wait(rw);
 }
 
@@ -127,30 +137,28 @@ static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
 {
        unsigned int old;
        old = rw->lock & 0x7fffffffU;
-       if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
+       if (!_raw_compare_and_swap(&rw->lock, old, old + 1))
                _raw_read_lock_wait_flags(rw, flags);
 }
 
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
-       unsigned int old, cmp;
+       unsigned int old;
 
-       old = rw->lock;
        do {
-               cmp = old;
-               old = _raw_compare_and_swap(&rw->lock, old, old - 1);
-       } while (cmp != old);
+               old = ACCESS_ONCE(rw->lock);
+       } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
 }
 
 static inline void arch_write_lock(arch_rwlock_t *rw)
 {
-       if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
+       if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
                _raw_write_lock_wait(rw);
 }
 
 static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
 {
-       if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
+       if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
                _raw_write_lock_wait_flags(rw, flags);
 }
 
@@ -163,14 +171,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        unsigned int old;
        old = rw->lock & 0x7fffffffU;
-       if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
+       if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1)))
                return 1;
        return _raw_read_trylock_retry(rw);
 }
 
 static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
-       if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
+       if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
                return 1;
        return _raw_write_trylock_retry(rw);
 }
index 9c76656a0af035bf04d55680a03c589ef0e1b0f1..b2cd6ff7c2c5bcdad941bb91297fa64fb8908616 100644 (file)
@@ -6,13 +6,13 @@
 #endif
 
 typedef struct {
-       volatile unsigned int owner_cpu;
+       unsigned int lock;
 } __attribute__ ((aligned (4))) arch_spinlock_t;
 
-#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
 
 typedef struct {
-       volatile unsigned int lock;
+       unsigned int lock;
 } arch_rwlock_t;
 
 #define __ARCH_RW_LOCK_UNLOCKED                { 0 }
index f709983f41f883ed567aeba87a6c1e15bc196340..4a3b33b2dbb94581f1466bdd333731aeba99739d 100644 (file)
@@ -31,22 +31,21 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
        unsigned int owner;
 
        while (1) {
-               owner = lp->owner_cpu;
+               owner = lp->lock;
                if (!owner || smp_vcpu_scheduled(~owner)) {
                        for (count = spin_retry; count > 0; count--) {
                                if (arch_spin_is_locked(lp))
                                        continue;
-                               if (_raw_compare_and_swap(&lp->owner_cpu, 0,
-                                                         cpu) == 0)
+                               if (_raw_compare_and_swap(&lp->lock, 0, cpu))
                                        return;
                        }
                        if (MACHINE_IS_LPAR)
                                continue;
                }
-               owner = lp->owner_cpu;
+               owner = lp->lock;
                if (owner)
                        smp_yield_cpu(~owner);
-               if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
+               if (_raw_compare_and_swap(&lp->lock, 0, cpu))
                        return;
        }
 }
@@ -60,57 +59,55 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
 
        local_irq_restore(flags);
        while (1) {
-               owner = lp->owner_cpu;
+               owner = lp->lock;
                if (!owner || smp_vcpu_scheduled(~owner)) {
                        for (count = spin_retry; count > 0; count--) {
                                if (arch_spin_is_locked(lp))
                                        continue;
                                local_irq_disable();
-                               if (_raw_compare_and_swap(&lp->owner_cpu, 0,
-                                                         cpu) == 0)
+                               if (_raw_compare_and_swap(&lp->lock, 0, cpu))
                                        return;
                                local_irq_restore(flags);
                        }
                        if (MACHINE_IS_LPAR)
                                continue;
                }
-               owner = lp->owner_cpu;
+               owner = lp->lock;
                if (owner)
                        smp_yield_cpu(~owner);
                local_irq_disable();
-               if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
+               if (_raw_compare_and_swap(&lp->lock, 0, cpu))
                        return;
                local_irq_restore(flags);
        }
 }
 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
 
+void arch_spin_relax(arch_spinlock_t *lp)
+{
+       unsigned int cpu = lp->lock;
+       if (cpu != 0) {
+               if (MACHINE_IS_VM || MACHINE_IS_KVM ||
+                   !smp_vcpu_scheduled(~cpu))
+                       smp_yield_cpu(~cpu);
+       }
+}
+EXPORT_SYMBOL(arch_spin_relax);
+
 int arch_spin_trylock_retry(arch_spinlock_t *lp)
 {
-       unsigned int cpu = ~smp_processor_id();
        int count;
 
        for (count = spin_retry; count > 0; count--) {
                if (arch_spin_is_locked(lp))
                        continue;
-               if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
+               if (arch_spin_trylock_once(lp))
                        return 1;
        }
        return 0;
 }
 EXPORT_SYMBOL(arch_spin_trylock_retry);
 
-void arch_spin_relax(arch_spinlock_t *lock)
-{
-       unsigned int cpu = lock->owner_cpu;
-       if (cpu != 0) {
-               if (MACHINE_IS_VM || MACHINE_IS_KVM ||
-                   !smp_vcpu_scheduled(~cpu))
-                       smp_yield_cpu(~cpu);
-       }
-}
-EXPORT_SYMBOL(arch_spin_relax);
-
 void _raw_read_lock_wait(arch_rwlock_t *rw)
 {
        unsigned int old;
@@ -124,7 +121,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
                if (!arch_read_can_lock(rw))
                        continue;
                old = rw->lock & 0x7fffffffU;
-               if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+               if (_raw_compare_and_swap(&rw->lock, old, old + 1))
                        return;
        }
 }
@@ -145,7 +142,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
                        continue;
                old = rw->lock & 0x7fffffffU;
                local_irq_disable();
-               if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+               if (_raw_compare_and_swap(&rw->lock, old, old + 1))
                        return;
        }
 }
@@ -160,7 +157,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
                if (!arch_read_can_lock(rw))
                        continue;
                old = rw->lock & 0x7fffffffU;
-               if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+               if (_raw_compare_and_swap(&rw->lock, old, old + 1))
                        return 1;
        }
        return 0;
@@ -178,7 +175,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
                }
                if (!arch_write_can_lock(rw))
                        continue;
-               if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+               if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
                        return;
        }
 }
@@ -197,7 +194,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
                if (!arch_write_can_lock(rw))
                        continue;
                local_irq_disable();
-               if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+               if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
                        return;
        }
 }
@@ -210,7 +207,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
        while (count-- > 0) {
                if (!arch_write_can_lock(rw))
                        continue;
-               if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+               if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
                        return 1;
        }
        return 0;