Merge branch 'x86/boot' into x86/urgent
[linux-2.6-block.git] / kernel / locking / qspinlock.c
index 82bb4a9e9009270c1cf2008e5dbddc60a1f9e950..38c49202d532b3b96554756b706b0fc64127c7aa 100644 (file)
  * Authors: Waiman Long <waiman.long@hp.com>
  *          Peter Zijlstra <peterz@infradead.org>
  */
+
+#ifndef _GEN_PV_LOCK_SLOWPATH
+
 #include <linux/smp.h>
 #include <linux/bug.h>
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
 #include <linux/mutex.h>
+#include <asm/byteorder.h>
 #include <asm/qspinlock.h>
 
 /*
  * node; whereby avoiding the need to carry a node from lock to unlock, and
  * preserving existing lock API. This also makes the unlock code simpler and
  * faster.
+ *
+ * N.B. The current implementation only supports architectures that allow
+ *      atomic operations on smaller 8-bit and 16-bit data types.
+ *
  */
 
 #include "mcs_spinlock.h"
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define MAX_NODES      8
+#else
+#define MAX_NODES      4
+#endif
+
 /*
  * Per-CPU queue node structures; we can never have more than 4 nested
  * contexts: task, softirq, hardirq, nmi.
  *
  * Exactly fits one 64-byte cacheline on a 64-bit architecture.
+ *
+ * PV doubles the storage and uses the second cacheline for PV state.
  */
-static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]);
+static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
 
 /*
  * We must be able to distinguish between no-tail and the tail at 0:0,
@@ -96,6 +112,75 @@ static inline struct mcs_spinlock *decode_tail(u32 tail)
 
 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
 
+/*
+ * By using the whole 2nd least significant byte for the pending bit, we
+ * can allow better optimization of the lock acquisition for the pending
+ * bit holder.
+ *
+ * This internal structure is also used by the set_locked function which
+ * is not restricted to _Q_PENDING_BITS == 8.
+ */
+struct __qspinlock {
+       union {
+               atomic_t val;
+#ifdef __LITTLE_ENDIAN
+               struct {
+                       u8      locked;
+                       u8      pending;
+               };
+               struct {
+                       u16     locked_pending;
+                       u16     tail;
+               };
+#else
+               struct {
+                       u16     tail;
+                       u16     locked_pending;
+               };
+               struct {
+                       u8      reserved[2];
+                       u8      pending;
+                       u8      locked;
+               };
+#endif
+       };
+};
+
+#if _Q_PENDING_BITS == 8
+/**
+ * clear_pending_set_locked - take ownership and clear the pending bit.
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,1,0 -> *,0,1
+ *
+ * Lock stealing is not allowed if this function is used.
+ */
+static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
+{
+       struct __qspinlock *l = (void *)lock;
+
+       WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
+}
+
+/*
+ * xchg_tail - Put in the new queue tail code word & retrieve previous one
+ * @lock : Pointer to queued spinlock structure
+ * @tail : The new queue tail code word
+ * Return: The previous queue tail code word
+ *
+ * xchg(lock, tail)
+ *
+ * p,*,* -> n,*,* ; prev = xchg(lock, node)
+ */
+static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
+{
+       struct __qspinlock *l = (void *)lock;
+
+       return (u32)xchg(&l->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
+}
+
+#else /* _Q_PENDING_BITS == 8 */
+
 /**
  * clear_pending_set_locked - take ownership and clear the pending bit.
  * @lock: Pointer to queued spinlock structure
@@ -131,6 +216,46 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
        }
        return old;
 }
+#endif /* _Q_PENDING_BITS == 8 */
+
+/**
+ * set_locked - Set the lock bit and own the lock
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,*,0 -> *,0,1
+ */
+static __always_inline void set_locked(struct qspinlock *lock)
+{
+       struct __qspinlock *l = (void *)lock;
+
+       WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
+}
+
+
+/*
+ * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+                                          struct mcs_spinlock *node) { }
+
+#define pv_enabled()           false
+
+#define pv_init_node           __pv_init_node
+#define pv_wait_node           __pv_wait_node
+#define pv_kick_node           __pv_kick_node
+#define pv_wait_head           __pv_wait_head
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define queued_spin_lock_slowpath      native_queued_spin_lock_slowpath
+#endif
+
+#endif /* _GEN_PV_LOCK_SLOWPATH */
 
 /**
  * queued_spin_lock_slowpath - acquire the queued spinlock
@@ -161,6 +286,12 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 
        BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
 
+       if (pv_enabled())
+               goto queue;
+
+       if (virt_queued_spin_lock(lock))
+               return;
+
        /*
         * wait for in-progress pending->locked hand-overs
         *
@@ -205,8 +336,13 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
         * we're pending, wait for the owner to go away.
         *
         * *,1,1 -> *,1,0
+        *
+        * this wait loop must be a load-acquire such that we match the
+        * store-release that clears the locked bit and create lock
+        * sequentiality; this is because not all clear_pending_set_locked()
+        * implementations imply full barriers.
         */
-       while ((val = atomic_read(&lock->val)) & _Q_LOCKED_MASK)
+       while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK)
                cpu_relax();
 
        /*
@@ -229,6 +365,7 @@ queue:
        node += idx;
        node->locked = 0;
        node->next = NULL;
+       pv_init_node(node);
 
        /*
         * We touched a (possibly) cold cacheline in the per-cpu queue node;
@@ -254,6 +391,7 @@ queue:
                prev = decode_tail(old);
                WRITE_ONCE(prev->next, node);
 
+               pv_wait_node(node);
                arch_mcs_spin_lock_contended(&node->locked);
        }
 
@@ -262,8 +400,15 @@ queue:
         * go away.
         *
         * *,x,y -> *,0,0
+        *
+        * this wait loop must use a load-acquire such that we match the
+        * store-release that clears the locked bit and create lock
+        * sequentiality; this is because the set_locked() function below
+        * does not imply a full barrier.
+        *
         */
-       while ((val = atomic_read(&lock->val)) & _Q_LOCKED_PENDING_MASK)
+       pv_wait_head(lock, node);
+       while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
                cpu_relax();
 
        /*
@@ -271,15 +416,19 @@ queue:
         *
         * n,0,0 -> 0,0,1 : lock, uncontended
         * *,0,0 -> *,0,1 : lock, contended
+        *
+        * If the queue head is the only one in the queue (lock value == tail),
+        * clear the tail code and grab the lock. Otherwise, we only need
+        * to grab the lock.
         */
        for (;;) {
-               new = _Q_LOCKED_VAL;
-               if (val != tail)
-                       new |= val;
-
-               old = atomic_cmpxchg(&lock->val, val, new);
-               if (old == val)
+               if (val != tail) {
+                       set_locked(lock);
                        break;
+               }
+               old = atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL);
+               if (old == val)
+                       goto release;   /* No contention */
 
                val = old;
        }
@@ -287,12 +436,11 @@ queue:
        /*
         * contended path; wait for next, release.
         */
-       if (new != _Q_LOCKED_VAL) {
-               while (!(next = READ_ONCE(node->next)))
-                       cpu_relax();
+       while (!(next = READ_ONCE(node->next)))
+               cpu_relax();
 
-               arch_mcs_spin_unlock_contended(&next->locked);
-       }
+       arch_mcs_spin_unlock_contended(&next->locked);
+       pv_kick_node(next);
 
 release:
        /*
@@ -301,3 +449,25 @@ release:
        this_cpu_dec(mcs_nodes[0].count);
 }
 EXPORT_SYMBOL(queued_spin_lock_slowpath);
+
+/*
+ * Generate the paravirt code for queued_spin_unlock_slowpath().
+ */
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define _GEN_PV_LOCK_SLOWPATH
+
+#undef  pv_enabled
+#define pv_enabled()   true
+
+#undef pv_init_node
+#undef pv_wait_node
+#undef pv_kick_node
+#undef pv_wait_head
+
+#undef  queued_spin_lock_slowpath
+#define queued_spin_lock_slowpath      __pv_queued_spin_lock_slowpath
+
+#include "qspinlock_paravirt.h"
+#include "qspinlock.c"
+
+#endif