locking/pvqspinlock, x86: Enable PV qspinlock for Xen
authorDavid Vrabel <david.vrabel@citrix.com>
Fri, 24 Apr 2015 18:56:40 +0000 (14:56 -0400)
committerIngo Molnar <mingo@kernel.org>
Fri, 8 May 2015 10:37:18 +0000 (12:37 +0200)
This patch adds the necessary Xen specific code to allow Xen to
support the CPU halting and kicking operations needed by the queue
spinlock PV code.

Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Daniel J Blueman <daniel@numascale.com>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paolo Bonzini <paolo.bonzini@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1429901803-29771-12-git-send-email-Waiman.Long@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/xen/spinlock.c
kernel/Kconfig.locks

index 956374c1edbc31e4c1eb50c3fb29cb8828ad44b5..af907a90fb19ffb3a4990d8554a109ddd15f5e0d 100644 (file)
 #include "xen-ops.h"
 #include "debugfs.h"
 
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifdef CONFIG_QUEUED_SPINLOCK
+
+#include <asm/qspinlock.h>
+
+static void xen_qlock_kick(int cpu)
+{
+       xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+}
+
+/*
+ * Halt the current CPU & release it back to the host
+ */
+static void xen_qlock_wait(u8 *byte, u8 val)
+{
+       int irq = __this_cpu_read(lock_kicker_irq);
+
+       /* If kicker interrupts not initialized yet, just spin */
+       if (irq == -1)
+               return;
+
+       /* clear pending */
+       xen_clear_irq_pending(irq);
+       barrier();
+
+       /*
+        * We check the byte value after clearing pending IRQ to make sure
+        * that we won't miss a wakeup event because of the clearing.
+        *
+        * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
+        * So it is effectively a memory barrier for x86.
+        */
+       if (READ_ONCE(*byte) != val)
+               return;
+
+       /*
+        * If an interrupt happens here, it will leave the wakeup irq
+        * pending, which will cause xen_poll_irq() to return
+        * immediately.
+        */
+
+       /* Block until irq becomes pending (or perhaps a spurious wakeup) */
+       xen_poll_irq(irq);
+}
+
+#else /* CONFIG_QUEUED_SPINLOCK */
+
 enum xen_contention_stat {
        TAKEN_SLOW,
        TAKEN_SLOW_PICKUP,
@@ -100,12 +150,9 @@ struct xen_lock_waiting {
        __ticket_t want;
 };
 
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
 static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
 static cpumask_t waiting_cpus;
 
-static bool xen_pvspin = true;
 __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
 {
        int irq = __this_cpu_read(lock_kicker_irq);
@@ -217,6 +264,7 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
                }
        }
 }
+#endif /* CONFIG_QUEUED_SPINLOCK */
 
 static irqreturn_t dummy_handler(int irq, void *dev_id)
 {
@@ -280,8 +328,16 @@ void __init xen_init_spinlocks(void)
                return;
        }
        printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
+#ifdef CONFIG_QUEUED_SPINLOCK
+       __pv_init_lock_hash();
+       pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+       pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
+       pv_lock_ops.wait = xen_qlock_wait;
+       pv_lock_ops.kick = xen_qlock_kick;
+#else
        pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
        pv_lock_ops.unlock_kick = xen_unlock_kick;
+#endif
 }
 
 /*
@@ -310,7 +366,7 @@ static __init int xen_parse_nopvspin(char *arg)
 }
 early_param("xen_nopvspin", xen_parse_nopvspin);
 
-#ifdef CONFIG_XEN_DEBUG_FS
+#if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCK)
 
 static struct dentry *d_spin_debug;
 
index 4379eef9334d16bb44d9591630675aeea31ae822..95dd7587ec342e375ad66bcc8ed0b262edbaae8f 100644 (file)
@@ -240,7 +240,7 @@ config ARCH_USE_QUEUED_SPINLOCK
 
 config QUEUED_SPINLOCK
        def_bool y if ARCH_USE_QUEUED_SPINLOCK
-       depends on SMP && (!PARAVIRT_SPINLOCKS || !XEN)
+       depends on SMP
 
 config ARCH_USE_QUEUE_RWLOCK
        bool