1 // SPDX-License-Identifier: GPL-2.0
3 * Split spinlock implementation out into its own file, so it can be
4 * compiled in a FTRACE-compatible way.
6 #include <linux/kernel_stat.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/log2.h>
10 #include <linux/gfp.h>
11 #include <linux/slab.h>
13 #include <asm/paravirt.h>
15 #include <xen/interface/xen.h>
16 #include <xen/events.h>
21 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
22 static DEFINE_PER_CPU(char *, irq_name);
23 static bool xen_pvspin = true;
25 #include <asm/qspinlock.h>
27 static void xen_qlock_kick(int cpu)
29 int irq = per_cpu(lock_kicker_irq, cpu);
31 /* Don't kick if the target's kicker interrupt is not initialized. */
35 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
39 * Halt the current CPU & release it back to the host
41 static void xen_qlock_wait(u8 *byte, u8 val)
43 int irq = __this_cpu_read(lock_kicker_irq);
45 /* If kicker interrupts not initialized yet, just spin */
50 xen_clear_irq_pending(irq);
54 * We check the byte value after clearing pending IRQ to make sure
55 * that we won't miss a wakeup event because of the clearing.
57 * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
58 * So it is effectively a memory barrier for x86.
60 if (READ_ONCE(*byte) != val)
64 * If an interrupt happens here, it will leave the wakeup irq
65 * pending, which will cause xen_poll_irq() to return
69 /* Block until irq becomes pending (or perhaps a spurious wakeup) */
73 static irqreturn_t dummy_handler(int irq, void *dev_id)
79 void xen_init_lock_cpu(int cpu)
87 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
88 cpu, per_cpu(lock_kicker_irq, cpu));
90 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
91 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
94 IRQF_PERCPU|IRQF_NOBALANCING,
99 disable_irq(irq); /* make sure it's never delivered */
100 per_cpu(lock_kicker_irq, cpu) = irq;
101 per_cpu(irq_name, cpu) = name;
104 printk("cpu %d spinlock event irq %d\n", cpu, irq);
107 void xen_uninit_lock_cpu(int cpu)
112 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
113 per_cpu(lock_kicker_irq, cpu) = -1;
114 kfree(per_cpu(irq_name, cpu));
115 per_cpu(irq_name, cpu) = NULL;
118 PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
121 * Our init of PV spinlocks is split in two init functions due to us
122 * using paravirt patching and jump labels patching and having to do
123 * all of this before SMP code is invoked.
125 * The paravirt patching needs to be done _before_ the alternative asm code
126 * is started, otherwise we would not patch the core kernel code.
128 void __init xen_init_spinlocks(void)
132 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
135 printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
137 __pv_init_lock_hash();
138 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
139 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
140 pv_lock_ops.wait = xen_qlock_wait;
141 pv_lock_ops.kick = xen_qlock_kick;
142 pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
145 static __init int xen_parse_nopvspin(char *arg)
150 early_param("xen_nopvspin", xen_parse_nopvspin);