Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
d5de8841 JF |
2 | /* |
3 | * Split spinlock implementation out into its own file, so it can be | |
4 | * compiled in a FTRACE-compatible way. | |
5 | */ | |
6 | #include <linux/kernel_stat.h> | |
7 | #include <linux/spinlock.h> | |
994025ca JF |
8 | #include <linux/debugfs.h> |
9 | #include <linux/log2.h> | |
5a0e3ad6 | 10 | #include <linux/gfp.h> |
354e7b76 | 11 | #include <linux/slab.h> |
d5de8841 JF |
12 | |
13 | #include <asm/paravirt.h> | |
14 | ||
15 | #include <xen/interface/xen.h> | |
16 | #include <xen/events.h> | |
17 | ||
18 | #include "xen-ops.h" | |
994025ca JF |
19 | #include "debugfs.h" |
20 | ||
e95e6f17 DV |
21 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; |
22 | static DEFINE_PER_CPU(char *, irq_name); | |
23 | static bool xen_pvspin = true; | |
24 | ||
e95e6f17 DV |
25 | #include <asm/qspinlock.h> |
26 | ||
27 | static void xen_qlock_kick(int cpu) | |
28 | { | |
707e59ba RL |
29 | int irq = per_cpu(lock_kicker_irq, cpu); |
30 | ||
31 | /* Don't kick if the target's kicker interrupt is not initialized. */ | |
32 | if (irq == -1) | |
33 | return; | |
34 | ||
e95e6f17 DV |
35 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); |
36 | } | |
37 | ||
38 | /* | |
39 | * Halt the current CPU & release it back to the host | |
40 | */ | |
41 | static void xen_qlock_wait(u8 *byte, u8 val) | |
42 | { | |
43 | int irq = __this_cpu_read(lock_kicker_irq); | |
44 | ||
45 | /* If kicker interrupts not initialized yet, just spin */ | |
46 | if (irq == -1) | |
47 | return; | |
48 | ||
49 | /* clear pending */ | |
50 | xen_clear_irq_pending(irq); | |
51 | barrier(); | |
52 | ||
53 | /* | |
54 | * We check the byte value after clearing pending IRQ to make sure | |
55 | * that we won't miss a wakeup event because of the clearing. | |
56 | * | |
57 | * The sync_clear_bit() call in xen_clear_irq_pending() is atomic. | |
58 | * So it is effectively a memory barrier for x86. | |
59 | */ | |
60 | if (READ_ONCE(*byte) != val) | |
61 | return; | |
62 | ||
63 | /* | |
64 | * If an interrupt happens here, it will leave the wakeup irq | |
65 | * pending, which will cause xen_poll_irq() to return | |
66 | * immediately. | |
67 | */ | |
68 | ||
69 | /* Block until irq becomes pending (or perhaps a spurious wakeup) */ | |
70 | xen_poll_irq(irq); | |
71 | } | |
72 | ||
d5de8841 JF |
73 | static irqreturn_t dummy_handler(int irq, void *dev_id) |
74 | { | |
75 | BUG(); | |
76 | return IRQ_HANDLED; | |
77 | } | |
78 | ||
148f9bb8 | 79 | void xen_init_lock_cpu(int cpu) |
d5de8841 JF |
80 | { |
81 | int irq; | |
354e7b76 | 82 | char *name; |
d5de8841 | 83 | |
3310bbed KRW |
84 | if (!xen_pvspin) |
85 | return; | |
86 | ||
cb91f8f4 | 87 | WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", |
cb9c6f15 KRW |
88 | cpu, per_cpu(lock_kicker_irq, cpu)); |
89 | ||
d5de8841 JF |
90 | name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); |
91 | irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, | |
92 | cpu, | |
93 | dummy_handler, | |
9d71cee6 | 94 | IRQF_PERCPU|IRQF_NOBALANCING, |
d5de8841 JF |
95 | name, |
96 | NULL); | |
97 | ||
98 | if (irq >= 0) { | |
99 | disable_irq(irq); /* make sure it's never delivered */ | |
100 | per_cpu(lock_kicker_irq, cpu) = irq; | |
354e7b76 | 101 | per_cpu(irq_name, cpu) = name; |
d5de8841 JF |
102 | } |
103 | ||
104 | printk("cpu %d spinlock event irq %d\n", cpu, irq); | |
105 | } | |
106 | ||
d68d82af AN |
107 | void xen_uninit_lock_cpu(int cpu) |
108 | { | |
3310bbed KRW |
109 | if (!xen_pvspin) |
110 | return; | |
111 | ||
d68d82af | 112 | unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); |
cb9c6f15 | 113 | per_cpu(lock_kicker_irq, cpu) = -1; |
354e7b76 KRW |
114 | kfree(per_cpu(irq_name, cpu)); |
115 | per_cpu(irq_name, cpu) = NULL; | |
d68d82af AN |
116 | } |
117 | ||
3cded417 PZ |
118 | PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); |
119 | ||
a945928e KRW |
120 | /* |
121 | * Our init of PV spinlocks is split in two init functions due to us | |
122 | * using paravirt patching and jump labels patching and having to do | |
123 | * all of this before SMP code is invoked. | |
124 | * | |
125 | * The paravirt patching needs to be done _before_ the alternative asm code | |
126 | * is started, otherwise we would not patch the core kernel code. | |
127 | */ | |
d5de8841 JF |
128 | void __init xen_init_spinlocks(void) |
129 | { | |
70dd4998 | 130 | |
b8fa70b5 JF |
131 | if (!xen_pvspin) { |
132 | printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); | |
133 | return; | |
134 | } | |
e0fc17a9 | 135 | printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); |
cfd8983f | 136 | |
e95e6f17 DV |
137 | __pv_init_lock_hash(); |
138 | pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; | |
139 | pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); | |
140 | pv_lock_ops.wait = xen_qlock_wait; | |
141 | pv_lock_ops.kick = xen_qlock_kick; | |
3cded417 | 142 | pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen); |
d5de8841 | 143 | } |
994025ca | 144 | |
b8fa70b5 JF |
145 | static __init int xen_parse_nopvspin(char *arg) |
146 | { | |
147 | xen_pvspin = false; | |
148 | return 0; | |
149 | } | |
150 | early_param("xen_nopvspin", xen_parse_nopvspin); | |
151 |