locking/pvqspinlock: Relax cmpxchg's to improve performance on some architectures
authorWaiman Long <longman@redhat.com>
Mon, 14 Aug 2017 20:07:02 +0000 (16:07 -0400)
committerIngo Molnar <mingo@kernel.org>
Tue, 29 Aug 2017 13:14:38 +0000 (15:14 +0200)
All the locking related cmpxchg's in the following functions are
replaced with the _acquire variants:

 - pv_queued_spin_steal_lock()
 - trylock_clear_pending()

This change should help performance on architectures that use LL/SC.

The cmpxchg in pv_kick_node() is replaced with a relaxed version
with explicit memory barrier to make sure that it is fully ordered
in the writing of next->lock and the reading of pn->state whether
the cmpxchg is a success or failure without affecting performance in
non-LL/SC architectures.

On a 2-socket 12-core 96-thread Power8 system with pvqspinlock
explicitly enabled, the performance of a locking microbenchmark
with and without this patch on a 4.13-rc4 kernel with Xinhui's PPC
qspinlock patch were as follows:

  # of thread     w/o patch    with patch      % Change
  -----------     ---------    ----------      --------
       8         5054.8 Mop/s  5209.4 Mop/s     +3.1%
      16         3985.0 Mop/s  4015.0 Mop/s     +0.8%
      32         2378.2 Mop/s  2396.0 Mop/s     +0.7%

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrea Parri <parri.andrea@gmail.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Pan Xinhui <xinhui@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Link: http://lkml.kernel.org/r/1502741222-24360-1-git-send-email-longman@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/qspinlock_paravirt.h

index 4ccfcaae5b89da53ae688893eddbc717c5124317..43555681c40b496dd6494a0f52240f932b7bef38 100644 (file)
@@ -72,7 +72,7 @@ static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
        struct __qspinlock *l = (void *)lock;
 
        if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
-           (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
+           (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
                qstat_inc(qstat_pv_lock_stealing, true);
                return true;
        }
@@ -101,16 +101,16 @@ static __always_inline void clear_pending(struct qspinlock *lock)
 
 /*
  * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
- * barrier. Therefore, an atomic cmpxchg() is used to acquire the lock
- * just to be sure that it will get it.
+ * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
+ * lock just to be sure that it will get it.
  */
 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
 {
        struct __qspinlock *l = (void *)lock;
 
        return !READ_ONCE(l->locked) &&
-              (cmpxchg(&l->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL)
-                       == _Q_PENDING_VAL);
+              (cmpxchg_acquire(&l->locked_pending, _Q_PENDING_VAL,
+                               _Q_LOCKED_VAL) == _Q_PENDING_VAL);
 }
 #else /* _Q_PENDING_BITS == 8 */
 static __always_inline void set_pending(struct qspinlock *lock)
@@ -138,7 +138,7 @@ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
                 */
                old = val;
                new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
-               val = atomic_cmpxchg(&lock->val, old, new);
+               val = atomic_cmpxchg_acquire(&lock->val, old, new);
 
                if (val == old)
                        return 1;
@@ -362,8 +362,18 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
         * observe its next->locked value and advance itself.
         *
         * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
+        *
+        * The write to next->locked in arch_mcs_spin_unlock_contended()
+        * must be ordered before the read of pn->state in the cmpxchg()
+        * below for the code to work correctly. To guarantee full ordering
+        * irrespective of the success or failure of the cmpxchg(),
+        * a relaxed version with explicit barrier is used. The control
+        * dependency will order the reading of pn->state before any
+        * subsequent writes.
         */
-       if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted)
+       smp_mb__before_atomic();
+       if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed)
+           != vcpu_halted)
                return;
 
        /*