1 // SPDX-License-Identifier: GPL-2.0
5 * This file implements the Xen versions of smp_ops. SMP under Xen is
6 * very straightforward. Bringing a CPU up is simply a matter of
7 * loading its initial context and setting it running.
9 * IPIs are handled through the Xen event mechanism.
11 * Because virtual CPUs can be scheduled onto any real CPU, there's no
12 * useful topology information for the kernel to make use of. As a
13 * result, all CPUs are treated as if they're single-core and
16 #include <linux/sched.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/irq_work.h>
22 #include <linux/tick.h>
23 #include <linux/nmi.h>
24 #include <linux/cpuhotplug.h>
25 #include <linux/stackprotector.h>
26 #include <linux/pgtable.h>
28 #include <asm/paravirt.h>
29 #include <asm/idtentry.h>
33 #include <asm/io_apic.h>
35 #include <xen/interface/xen.h>
36 #include <xen/interface/vcpu.h>
37 #include <xen/interface/xenpmu.h>
39 #include <asm/spec-ctrl.h>
40 #include <asm/xen/interface.h>
41 #include <asm/xen/hypercall.h>
45 #include <xen/events.h>
47 #include <xen/hvc-console.h>
53 cpumask_var_t xen_cpu_initialized_map;
55 static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
56 static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
58 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
60 static void cpu_bringup(void)
65 cpuhp_ap_sync_alive();
68 touch_softlockup_watchdog();
70 /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
71 if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
72 xen_enable_sysenter();
75 cpu = smp_processor_id();
76 smp_store_cpu_info(cpu);
77 set_cpu_sibling_map(cpu);
79 speculative_store_bypass_ht_init();
81 xen_setup_cpu_clockevents();
83 notify_cpu_starting(cpu);
85 set_cpu_online(cpu, true);
89 /* We can take interrupts now: we're officially "up". */
93 asmlinkage __visible void cpu_bringup_and_idle(void)
96 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
99 void xen_smp_intr_free_pv(unsigned int cpu)
101 kfree(per_cpu(xen_irq_work, cpu).name);
102 per_cpu(xen_irq_work, cpu).name = NULL;
103 if (per_cpu(xen_irq_work, cpu).irq >= 0) {
104 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
105 per_cpu(xen_irq_work, cpu).irq = -1;
108 kfree(per_cpu(xen_pmu_irq, cpu).name);
109 per_cpu(xen_pmu_irq, cpu).name = NULL;
110 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
111 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
112 per_cpu(xen_pmu_irq, cpu).irq = -1;
116 int xen_smp_intr_init_pv(unsigned int cpu)
119 char *callfunc_name, *pmu_name;
121 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
122 per_cpu(xen_irq_work, cpu).name = callfunc_name;
123 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
125 xen_irq_work_interrupt,
126 IRQF_PERCPU|IRQF_NOBALANCING,
131 per_cpu(xen_irq_work, cpu).irq = rc;
134 pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
135 per_cpu(xen_pmu_irq, cpu).name = pmu_name;
136 rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
138 IRQF_PERCPU|IRQF_NOBALANCING,
142 per_cpu(xen_pmu_irq, cpu).irq = rc;
148 xen_smp_intr_free_pv(cpu);
152 static void __init xen_pv_smp_config(void)
157 topology_register_boot_apic(apicid++);
159 for (i = 1; i < nr_cpu_ids; i++)
160 topology_register_apic(apicid++, CPU_ACPIID_INVALID, true);
162 /* Pretend to be a proper enumerated system */
163 smp_found_config = 1;
166 static void __init xen_pv_smp_prepare_boot_cpu(void)
168 BUG_ON(smp_processor_id() != 0);
169 native_smp_prepare_boot_cpu();
171 if (!xen_feature(XENFEAT_writable_page_tables))
172 /* We've switched to the "real" per-cpu gdt, so make
173 * sure the old memory can be recycled. */
174 make_lowmem_page_readwrite(xen_initial_gdt);
176 xen_setup_vcpu_info_placement();
179 * The alternative logic (which patches the unlock/lock) runs before
180 * the smp bootup up code is activated. Hence we need to set this up
181 * the core kernel is being patched. Otherwise we will have only
182 * modules patched but not core code.
184 xen_init_spinlocks();
187 static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
191 if (ioapic_is_disabled) {
192 char *m = (max_cpus == 0) ?
193 "The nosmp parameter is incompatible with Xen; " \
194 "use Xen dom0_max_vcpus=1 parameter" :
195 "The noapic parameter is incompatible with Xen";
200 xen_init_lock_cpu(0);
202 smp_prepare_cpus_common();
204 speculative_store_bypass_ht_init();
208 if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
211 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
212 panic("could not allocate xen_cpu_initialized_map\n");
214 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
216 /* Restrict the possible_map according to max_cpus. */
217 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
218 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
220 set_cpu_possible(cpu, false);
223 for_each_possible_cpu(cpu)
224 set_cpu_present(cpu, true);
228 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
230 struct vcpu_guest_context *ctxt;
231 struct desc_struct *gdt;
232 unsigned long gdt_mfn;
234 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
237 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
239 cpumask_clear_cpu(cpu, xen_cpu_initialized_map);
243 gdt = get_cpu_gdt_rw(cpu);
246 * Bring up the CPU in cpu_bringup_and_idle() with the stack
247 * pointing just below where pt_regs would be if it were a normal
250 ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle;
251 ctxt->flags = VGCF_IN_KERNEL;
252 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
253 ctxt->user_regs.ds = __USER_DS;
254 ctxt->user_regs.es = __USER_DS;
255 ctxt->user_regs.ss = __KERNEL_DS;
256 ctxt->user_regs.cs = __KERNEL_CS;
257 ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle);
259 xen_copy_trap_info(ctxt->trap_ctxt);
261 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
263 gdt_mfn = arbitrary_virt_to_mfn(gdt);
264 make_lowmem_page_readonly(gdt);
265 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
267 ctxt->gdt_frames[0] = gdt_mfn;
268 ctxt->gdt_ents = GDT_ENTRIES;
271 * Set SS:SP that Xen will use when entering guest kernel mode
272 * from guest user mode. Subsequent calls to load_sp0() can
275 ctxt->kernel_ss = __KERNEL_DS;
276 ctxt->kernel_sp = task_top_of_stack(idle);
278 ctxt->gs_base_kernel = per_cpu_offset(cpu);
279 ctxt->event_callback_eip =
280 (unsigned long)xen_asm_exc_xen_hypervisor_callback;
281 ctxt->failsafe_callback_eip =
282 (unsigned long)xen_failsafe_callback;
283 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
285 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
286 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
293 static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
297 rc = common_cpu_up(cpu, idle);
301 xen_setup_runstate_info(cpu);
303 /* make sure interrupts start blocked */
304 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
306 rc = cpu_initialize_context(cpu, idle);
313 * Why is this a BUG? If the hypercall fails then everything can be
316 BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL));
321 static void xen_pv_poll_sync_state(void)
323 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
326 #ifdef CONFIG_HOTPLUG_CPU
327 static int xen_pv_cpu_disable(void)
329 unsigned int cpu = smp_processor_id();
333 cpu_disable_common();
335 load_cr3(swapper_pg_dir);
339 static void xen_pv_cpu_die(unsigned int cpu)
341 while (HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu), NULL)) {
342 __set_current_state(TASK_UNINTERRUPTIBLE);
343 schedule_timeout(HZ/10);
347 static void xen_pv_cleanup_dead_cpu(unsigned int cpu)
349 xen_smp_intr_free(cpu);
350 xen_uninit_lock_cpu(cpu);
351 xen_teardown_timer(cpu);
355 static void __noreturn xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
358 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL);
359 xen_cpu_bringup_again((unsigned long)task_pt_regs(current));
363 #else /* !CONFIG_HOTPLUG_CPU */
364 static int xen_pv_cpu_disable(void)
369 static void xen_pv_cpu_die(unsigned int cpu)
374 static void xen_pv_cleanup_dead_cpu(unsigned int cpu)
379 static void __noreturn xen_pv_play_dead(void)
385 static void stop_self(void *v)
387 int cpu = smp_processor_id();
389 /* make sure we're not pinning something down */
390 load_cr3(swapper_pg_dir);
391 /* should set up a minimal gdt */
393 set_cpu_online(cpu, false);
395 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL);
399 static void xen_pv_stop_other_cpus(int wait)
401 smp_call_function(stop_self, NULL, wait);
404 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
407 inc_irq_stat(apic_irq_work_irqs);
412 void __init xen_smp_count_cpus(void)
416 for (cpus = 0; cpus < nr_cpu_ids; cpus++) {
417 if (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpus, NULL) < 0)
421 pr_info("Xen PV: Detected %u vCPUS\n", cpus);
422 if (cpus < nr_cpu_ids)
423 set_nr_cpu_ids(cpus);
426 static const struct smp_ops xen_smp_ops __initconst = {
427 .smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu,
428 .smp_prepare_cpus = xen_pv_smp_prepare_cpus,
429 .smp_cpus_done = xen_smp_cpus_done,
431 .kick_ap_alive = xen_pv_kick_ap,
432 .cpu_die = xen_pv_cpu_die,
433 .cleanup_dead_cpu = xen_pv_cleanup_dead_cpu,
434 .poll_sync_state = xen_pv_poll_sync_state,
435 .cpu_disable = xen_pv_cpu_disable,
436 .play_dead = xen_pv_play_dead,
438 .stop_other_cpus = xen_pv_stop_other_cpus,
439 .smp_send_reschedule = xen_smp_send_reschedule,
441 .send_call_func_ipi = xen_smp_send_call_function_ipi,
442 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
445 void __init xen_smp_init(void)
447 smp_ops = xen_smp_ops;
449 /* Avoid searching for BIOS MP tables */
450 x86_init.mpparse.find_mptable = x86_init_noop;
451 x86_init.mpparse.early_parse_smp_cfg = x86_init_noop;
453 /* XEN/PV Dom0 has halfways sane topology information via CPUID/MADT */
454 if (xen_initial_domain())
455 x86_init.mpparse.parse_smp_cfg = x86_init_noop;
457 x86_init.mpparse.parse_smp_cfg = xen_pv_smp_config;