Merge remote-tracking branch 'regulator/topic/core' into regulator-next
[linux-2.6-block.git] / arch / x86 / xen / smp.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/smp.h>
3 #include <linux/cpu.h>
4 #include <linux/slab.h>
5 #include <linux/cpumask.h>
6 #include <linux/percpu.h>
7
8 #include <xen/events.h>
9
10 #include <xen/hvc-console.h>
11 #include "xen-ops.h"
12 #include "smp.h"
13
14 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
15 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
16 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
17 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
18
19 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
20 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
21
22 /*
23  * Reschedule call back.
24  */
25 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
26 {
27         inc_irq_stat(irq_resched_count);
28         scheduler_ipi();
29
30         return IRQ_HANDLED;
31 }
32
33 void xen_smp_intr_free(unsigned int cpu)
34 {
35         if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
36                 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
37                 per_cpu(xen_resched_irq, cpu).irq = -1;
38                 kfree(per_cpu(xen_resched_irq, cpu).name);
39                 per_cpu(xen_resched_irq, cpu).name = NULL;
40         }
41         if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
42                 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
43                 per_cpu(xen_callfunc_irq, cpu).irq = -1;
44                 kfree(per_cpu(xen_callfunc_irq, cpu).name);
45                 per_cpu(xen_callfunc_irq, cpu).name = NULL;
46         }
47         if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
48                 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
49                 per_cpu(xen_debug_irq, cpu).irq = -1;
50                 kfree(per_cpu(xen_debug_irq, cpu).name);
51                 per_cpu(xen_debug_irq, cpu).name = NULL;
52         }
53         if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
54                 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
55                                        NULL);
56                 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
57                 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
58                 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
59         }
60 }
61
62 int xen_smp_intr_init(unsigned int cpu)
63 {
64         int rc;
65         char *resched_name, *callfunc_name, *debug_name;
66
67         resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
68         rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
69                                     cpu,
70                                     xen_reschedule_interrupt,
71                                     IRQF_PERCPU|IRQF_NOBALANCING,
72                                     resched_name,
73                                     NULL);
74         if (rc < 0)
75                 goto fail;
76         per_cpu(xen_resched_irq, cpu).irq = rc;
77         per_cpu(xen_resched_irq, cpu).name = resched_name;
78
79         callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
80         rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
81                                     cpu,
82                                     xen_call_function_interrupt,
83                                     IRQF_PERCPU|IRQF_NOBALANCING,
84                                     callfunc_name,
85                                     NULL);
86         if (rc < 0)
87                 goto fail;
88         per_cpu(xen_callfunc_irq, cpu).irq = rc;
89         per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
90
91         debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
92         rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
93                                      IRQF_PERCPU | IRQF_NOBALANCING,
94                                      debug_name, NULL);
95         if (rc < 0)
96                 goto fail;
97         per_cpu(xen_debug_irq, cpu).irq = rc;
98         per_cpu(xen_debug_irq, cpu).name = debug_name;
99
100         callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
101         rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
102                                     cpu,
103                                     xen_call_function_single_interrupt,
104                                     IRQF_PERCPU|IRQF_NOBALANCING,
105                                     callfunc_name,
106                                     NULL);
107         if (rc < 0)
108                 goto fail;
109         per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
110         per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
111
112         return 0;
113
114  fail:
115         xen_smp_intr_free(cpu);
116         return rc;
117 }
118
119 void __init xen_smp_cpus_done(unsigned int max_cpus)
120 {
121         int cpu, rc, count = 0;
122
123         if (xen_hvm_domain())
124                 native_smp_cpus_done(max_cpus);
125         else
126                 calculate_max_logical_packages();
127
128         if (xen_have_vcpu_info_placement)
129                 return;
130
131         for_each_online_cpu(cpu) {
132                 if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
133                         continue;
134
135                 rc = cpu_down(cpu);
136
137                 if (rc == 0) {
138                         /*
139                          * Reset vcpu_info so this cpu cannot be onlined again.
140                          */
141                         xen_vcpu_info_reset(cpu);
142                         count++;
143                 } else {
144                         pr_warn("%s: failed to bring CPU %d down, error %d\n",
145                                 __func__, cpu, rc);
146                 }
147         }
148         WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
149 }
150
151 void xen_smp_send_reschedule(int cpu)
152 {
153         xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
154 }
155
156 static void __xen_send_IPI_mask(const struct cpumask *mask,
157                               int vector)
158 {
159         unsigned cpu;
160
161         for_each_cpu_and(cpu, mask, cpu_online_mask)
162                 xen_send_IPI_one(cpu, vector);
163 }
164
165 void xen_smp_send_call_function_ipi(const struct cpumask *mask)
166 {
167         int cpu;
168
169         __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
170
171         /* Make sure other vcpus get a chance to run if they need to. */
172         for_each_cpu(cpu, mask) {
173                 if (xen_vcpu_stolen(cpu)) {
174                         HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
175                         break;
176                 }
177         }
178 }
179
180 void xen_smp_send_call_function_single_ipi(int cpu)
181 {
182         __xen_send_IPI_mask(cpumask_of(cpu),
183                           XEN_CALL_FUNCTION_SINGLE_VECTOR);
184 }
185
186 static inline int xen_map_vector(int vector)
187 {
188         int xen_vector;
189
190         switch (vector) {
191         case RESCHEDULE_VECTOR:
192                 xen_vector = XEN_RESCHEDULE_VECTOR;
193                 break;
194         case CALL_FUNCTION_VECTOR:
195                 xen_vector = XEN_CALL_FUNCTION_VECTOR;
196                 break;
197         case CALL_FUNCTION_SINGLE_VECTOR:
198                 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
199                 break;
200         case IRQ_WORK_VECTOR:
201                 xen_vector = XEN_IRQ_WORK_VECTOR;
202                 break;
203 #ifdef CONFIG_X86_64
204         case NMI_VECTOR:
205         case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
206                 xen_vector = XEN_NMI_VECTOR;
207                 break;
208 #endif
209         default:
210                 xen_vector = -1;
211                 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
212                         vector);
213         }
214
215         return xen_vector;
216 }
217
218 void xen_send_IPI_mask(const struct cpumask *mask,
219                               int vector)
220 {
221         int xen_vector = xen_map_vector(vector);
222
223         if (xen_vector >= 0)
224                 __xen_send_IPI_mask(mask, xen_vector);
225 }
226
227 void xen_send_IPI_all(int vector)
228 {
229         int xen_vector = xen_map_vector(vector);
230
231         if (xen_vector >= 0)
232                 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
233 }
234
235 void xen_send_IPI_self(int vector)
236 {
237         int xen_vector = xen_map_vector(vector);
238
239         if (xen_vector >= 0)
240                 xen_send_IPI_one(smp_processor_id(), xen_vector);
241 }
242
243 void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
244                                 int vector)
245 {
246         unsigned cpu;
247         unsigned int this_cpu = smp_processor_id();
248         int xen_vector = xen_map_vector(vector);
249
250         if (!(num_online_cpus() > 1) || (xen_vector < 0))
251                 return;
252
253         for_each_cpu_and(cpu, mask, cpu_online_mask) {
254                 if (this_cpu == cpu)
255                         continue;
256
257                 xen_send_IPI_one(cpu, xen_vector);
258         }
259 }
260
261 void xen_send_IPI_allbutself(int vector)
262 {
263         xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
264 }
265
266 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
267 {
268         irq_enter();
269         generic_smp_call_function_interrupt();
270         inc_irq_stat(irq_call_count);
271         irq_exit();
272
273         return IRQ_HANDLED;
274 }
275
276 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
277 {
278         irq_enter();
279         generic_smp_call_function_single_interrupt();
280         inc_irq_stat(irq_call_count);
281         irq_exit();
282
283         return IRQ_HANDLED;
284 }