Revert "hwmon: (sch56xx-common) Add DMI override table"
[linux-block.git] / kernel / smp.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
3d442233
JA
2/*
3 * Generic helpers for smp ipi calls
4 *
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
3d442233 6 */
ca7dfdbb
ME
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
47885016 10#include <linux/irq_work.h>
3d442233 11#include <linux/rcupdate.h>
59190f42 12#include <linux/rculist.h>
641cd4cf 13#include <linux/kernel.h>
9984de1a 14#include <linux/export.h>
0b13fda1
IM
15#include <linux/percpu.h>
16#include <linux/init.h>
f9d34595 17#include <linux/interrupt.h>
5a0e3ad6 18#include <linux/gfp.h>
3d442233 19#include <linux/smp.h>
8969a5ed 20#include <linux/cpu.h>
c6f4459f 21#include <linux/sched.h>
4c822698 22#include <linux/sched/idle.h>
47ae4b05 23#include <linux/hypervisor.h>
35feb604
PM
24#include <linux/sched/clock.h>
25#include <linux/nmi.h>
26#include <linux/sched/debug.h>
8d0968cc 27#include <linux/jump_label.h>
3d442233 28
cc9cb0a7 29#include <trace/events/ipi.h>
949fa3f1
LB
30#define CREATE_TRACE_POINTS
31#include <trace/events/csd.h>
32#undef CREATE_TRACE_POINTS
cc9cb0a7 33
3bb5d2ee 34#include "smpboot.h"
1f8db415 35#include "sched/smp.h"
3bb5d2ee 36
545b8c8d 37#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
3d442233
JA
38
39struct call_function_data {
6366d062 40 call_single_data_t __percpu *csd;
0b13fda1 41 cpumask_var_t cpumask;
3fc5b3b6 42 cpumask_var_t cpumask_ipi;
3d442233
JA
43};
44
a22793c7 45static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
e03bcb68 46
6897fc22 47static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
8969a5ed 48
0d3a00b3
IK
49static DEFINE_PER_CPU(atomic_t, trigger_backtrace) = ATOMIC_INIT(1);
50
16bf5a5e 51static void __flush_smp_call_function_queue(bool warn_cpu_offline);
8d056c48 52
31487f83 53int smpcfd_prepare_cpu(unsigned int cpu)
8969a5ed 54{
8969a5ed
PZ
55 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
56
31487f83
RW
57 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
58 cpu_to_node(cpu)))
59 return -ENOMEM;
3fc5b3b6
AL
60 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
61 cpu_to_node(cpu))) {
62 free_cpumask_var(cfd->cpumask);
63 return -ENOMEM;
64 }
6366d062
PM
65 cfd->csd = alloc_percpu(call_single_data_t);
66 if (!cfd->csd) {
8969a5ed 67 free_cpumask_var(cfd->cpumask);
3fc5b3b6 68 free_cpumask_var(cfd->cpumask_ipi);
31487f83
RW
69 return -ENOMEM;
70 }
71
72 return 0;
8969a5ed
PZ
73}
74
31487f83
RW
75int smpcfd_dead_cpu(unsigned int cpu)
76{
77 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
78
79 free_cpumask_var(cfd->cpumask);
3fc5b3b6 80 free_cpumask_var(cfd->cpumask_ipi);
6366d062 81 free_percpu(cfd->csd);
31487f83
RW
82 return 0;
83}
84
85int smpcfd_dying_cpu(unsigned int cpu)
86{
87 /*
88 * The IPIs for the smp-call-function callbacks queued by other
89 * CPUs might arrive late, either due to hardware latencies or
90 * because this CPU disabled interrupts (inside stop-machine)
91 * before the IPIs were sent. So flush out any pending callbacks
92 * explicitly (without waiting for the IPIs to arrive), to
93 * ensure that the outgoing CPU doesn't go offline with work
94 * still pending.
95 */
16bf5a5e 96 __flush_smp_call_function_queue(false);
afaa653c 97 irq_work_run();
31487f83
RW
98 return 0;
99}
8969a5ed 100
d8ad7d11 101void __init call_function_init(void)
3d442233
JA
102{
103 int i;
104
6897fc22
CH
105 for_each_possible_cpu(i)
106 init_llist_head(&per_cpu(call_single_queue, i));
8969a5ed 107
31487f83 108 smpcfd_prepare_cpu(smp_processor_id());
3d442233
JA
109}
110
08407b5f 111static __always_inline void
5c312497 112send_call_function_single_ipi(int cpu)
08407b5f 113{
68f4ff04 114 if (call_function_single_prep_ipi(cpu)) {
5c312497
PZ
115 trace_ipi_send_cpu(cpu, _RET_IP_,
116 generic_smp_call_function_single_interrupt);
68f4ff04
VS
117 arch_send_call_function_single_ipi(cpu);
118 }
119}
120
121static __always_inline void
5c312497 122send_call_function_ipi_mask(struct cpumask *mask)
68f4ff04 123{
5c312497
PZ
124 trace_ipi_send_cpumask(mask, _RET_IP_,
125 generic_smp_call_function_single_interrupt);
08407b5f
VS
126 arch_send_call_function_ipi_mask(mask);
127}
128
949fa3f1
LB
129static __always_inline void
130csd_do_func(smp_call_func_t func, void *info, struct __call_single_data *csd)
131{
132 trace_csd_function_entry(func, csd);
133 func(info);
134 trace_csd_function_exit(func, csd);
135}
136
35feb604
PM
137#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
138
c5219860 139static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled);
8d0968cc 140
1771257c
PM
141/*
142 * Parse the csdlock_debug= kernel boot parameter.
143 *
144 * If you need to restore the old "ext" value that once provided
145 * additional debugging information, reapply the following commits:
146 *
147 * de7b09ef658d ("locking/csd_lock: Prepare more CSD lock debugging")
148 * a5aabace5fb8 ("locking/csd_lock: Add more data to CSD lock debugging")
149 */
8d0968cc
JG
150static int __init csdlock_debug(char *str)
151{
203e4358 152 int ret;
8d0968cc
JG
153 unsigned int val = 0;
154
203e4358
PM
155 ret = get_option(&str, &val);
156 if (ret) {
157 if (val)
158 static_branch_enable(&csdlock_debug_enabled);
159 else
160 static_branch_disable(&csdlock_debug_enabled);
161 }
8d0968cc 162
9c9b26b0 163 return 1;
8d0968cc 164}
9c9b26b0 165__setup("csdlock_debug=", csdlock_debug);
8d0968cc 166
35feb604
PM
167static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
168static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
169static DEFINE_PER_CPU(void *, cur_csd_info);
170
3791a223
PM
171static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
172module_param(csd_lock_timeout, ulong, 0444);
173
2b722160 174static atomic_t csd_bug_count = ATOMIC_INIT(0);
35feb604
PM
175
176/* Record current CSD work for current CPU, NULL to erase. */
1139aeb1 177static void __csd_lock_record(struct __call_single_data *csd)
35feb604
PM
178{
179 if (!csd) {
180 smp_mb(); /* NULL cur_csd after unlock. */
181 __this_cpu_write(cur_csd, NULL);
182 return;
183 }
184 __this_cpu_write(cur_csd_func, csd->func);
185 __this_cpu_write(cur_csd_info, csd->info);
186 smp_wmb(); /* func and info before csd. */
187 __this_cpu_write(cur_csd, csd);
188 smp_mb(); /* Update cur_csd before function call. */
189 /* Or before unlock, as the case may be. */
190}
191
1139aeb1 192static __always_inline void csd_lock_record(struct __call_single_data *csd)
8d0968cc
JG
193{
194 if (static_branch_unlikely(&csdlock_debug_enabled))
195 __csd_lock_record(csd);
196}
197
1139aeb1 198static int csd_lock_wait_getcpu(struct __call_single_data *csd)
35feb604
PM
199{
200 unsigned int csd_type;
201
202 csd_type = CSD_TYPE(csd);
203 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
a787bdaf 204 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
35feb604
PM
205 return -1;
206}
207
208/*
209 * Complain if too much time spent waiting. Note that only
210 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
211 * so waiting on other types gets much less information.
212 */
1139aeb1 213static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
35feb604
PM
214{
215 int cpu = -1;
216 int cpux;
217 bool firsttime;
218 u64 ts2, ts_delta;
219 call_single_data_t *cpu_cur_csd;
545b8c8d 220 unsigned int flags = READ_ONCE(csd->node.u_flags);
3791a223 221 unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC;
35feb604
PM
222
223 if (!(flags & CSD_FLAG_LOCK)) {
224 if (!unlikely(*bug_id))
225 return true;
226 cpu = csd_lock_wait_getcpu(csd);
227 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
228 *bug_id, raw_smp_processor_id(), cpu);
229 return true;
230 }
231
232 ts2 = sched_clock();
233 ts_delta = ts2 - *ts1;
3791a223 234 if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
35feb604
PM
235 return false;
236
237 firsttime = !*bug_id;
238 if (firsttime)
239 *bug_id = atomic_inc_return(&csd_bug_count);
240 cpu = csd_lock_wait_getcpu(csd);
241 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
242 cpux = 0;
243 else
244 cpux = cpu;
245 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
246 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
247 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
248 cpu, csd->func, csd->info);
249 if (cpu_cur_csd && csd != cpu_cur_csd) {
250 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
251 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
252 READ_ONCE(per_cpu(cur_csd_info, cpux)));
253 } else {
254 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
255 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
256 }
257 if (cpu >= 0) {
0d3a00b3
IK
258 if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0))
259 dump_cpu_task(cpu);
35feb604
PM
260 if (!cpu_cur_csd) {
261 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
262 arch_send_call_function_single_ipi(cpu);
263 }
264 }
5bd00f6d
IK
265 if (firsttime)
266 dump_stack();
35feb604
PM
267 *ts1 = ts2;
268
269 return false;
270}
271
8969a5ed
PZ
272/*
273 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
274 *
0b13fda1
IM
275 * For non-synchronous ipi calls the csd can still be in use by the
276 * previous function call. For multi-cpu calls its even more interesting
277 * as we'll have to ensure no other cpu is observing our csd.
8969a5ed 278 */
1139aeb1 279static void __csd_lock_wait(struct __call_single_data *csd)
35feb604
PM
280{
281 int bug_id = 0;
282 u64 ts0, ts1;
283
284 ts1 = ts0 = sched_clock();
285 for (;;) {
286 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
287 break;
288 cpu_relax();
289 }
290 smp_acquire__after_ctrl_dep();
291}
292
1139aeb1 293static __always_inline void csd_lock_wait(struct __call_single_data *csd)
8d0968cc
JG
294{
295 if (static_branch_unlikely(&csdlock_debug_enabled)) {
296 __csd_lock_wait(csd);
297 return;
298 }
299
300 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
301}
35feb604 302#else
1139aeb1 303static void csd_lock_record(struct __call_single_data *csd)
35feb604
PM
304{
305}
306
1139aeb1 307static __always_inline void csd_lock_wait(struct __call_single_data *csd)
8969a5ed 308{
545b8c8d 309 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
6e275637 310}
35feb604 311#endif
6e275637 312
1139aeb1 313static __always_inline void csd_lock(struct __call_single_data *csd)
6e275637 314{
e1d12f32 315 csd_lock_wait(csd);
545b8c8d 316 csd->node.u_flags |= CSD_FLAG_LOCK;
8969a5ed
PZ
317
318 /*
0b13fda1
IM
319 * prevent CPU from reordering the above assignment
320 * to ->flags with any subsequent assignments to other
966a9671 321 * fields of the specified call_single_data_t structure:
8969a5ed 322 */
8053871d 323 smp_wmb();
8969a5ed
PZ
324}
325
1139aeb1 326static __always_inline void csd_unlock(struct __call_single_data *csd)
8969a5ed 327{
545b8c8d 328 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
0b13fda1 329
8969a5ed 330 /*
0b13fda1 331 * ensure we're all done before releasing data:
8969a5ed 332 */
545b8c8d 333 smp_store_release(&csd->node.u_flags, 0);
3d442233
JA
334}
335
68f4ff04
VS
336static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
337
338void __smp_call_single_queue(int cpu, struct llist_node *node)
339{
340 /*
341 * We have to check the type of the CSD before queueing it, because
342 * once queued it can have its flags cleared by
343 * flush_smp_call_function_queue()
344 * even if we haven't sent the smp_call IPI yet (e.g. the stopper
345 * executes migration_cpu_stop() on the remote CPU).
346 */
bf5a8c26 347 if (trace_csd_queue_cpu_enabled()) {
68f4ff04
VS
348 call_single_data_t *csd;
349 smp_call_func_t func;
350
351 csd = container_of(node, call_single_data_t, node.llist);
352 func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
353 sched_ttwu_pending : csd->func;
354
bf5a8c26 355 trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
68f4ff04 356 }
5c312497
PZ
357
358 /*
359 * The list addition should be visible to the target CPU when it pops
360 * the head of the list to pull the entry off it in the IPI handler
361 * because of normal cache coherency rules implied by the underlying
362 * llist ops.
363 *
364 * If IPIs can go out of order to the cache coherency protocol
365 * in an architecture, sufficient synchronisation should be added
366 * to arch code to make it appear to obey cache coherency WRT
367 * locking and barrier primitives. Generic code isn't really
368 * equipped to do the right thing...
369 */
370 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
371 send_call_function_single_ipi(cpu);
4b44a21d
PZ
372}
373
3d442233 374/*
966a9671 375 * Insert a previously allocated call_single_data_t element
0b13fda1
IM
376 * for execution on the given CPU. data must already have
377 * ->func, ->info, and ->flags set.
3d442233 378 */
1139aeb1 379static int generic_exec_single(int cpu, struct __call_single_data *csd)
3d442233 380{
8b28499a 381 if (cpu == smp_processor_id()) {
4b44a21d
PZ
382 smp_call_func_t func = csd->func;
383 void *info = csd->info;
8053871d
LT
384 unsigned long flags;
385
386 /*
387 * We can unlock early even for the synchronous on-stack case,
388 * since we're doing this from the same CPU..
389 */
35feb604 390 csd_lock_record(csd);
8053871d 391 csd_unlock(csd);
8b28499a 392 local_irq_save(flags);
949fa3f1 393 csd_do_func(func, info, NULL);
35feb604 394 csd_lock_record(NULL);
8b28499a
FW
395 local_irq_restore(flags);
396 return 0;
397 }
398
5224b961
LT
399 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
400 csd_unlock(csd);
8b28499a 401 return -ENXIO;
5224b961 402 }
8b28499a 403
545b8c8d 404 __smp_call_single_queue(cpu, &csd->node.llist);
3d442233 405
8b28499a 406 return 0;
3d442233
JA
407}
408
8d056c48
SB
409/**
410 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
411 *
412 * Invoked by arch to handle an IPI for call function single.
413 * Must be called with interrupts disabled.
3d442233
JA
414 */
415void generic_smp_call_function_single_interrupt(void)
416{
16bf5a5e 417 __flush_smp_call_function_queue(true);
8d056c48
SB
418}
419
420/**
16bf5a5e 421 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
8d056c48
SB
422 *
423 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
424 * offline CPU. Skip this check if set to 'false'.
425 *
426 * Flush any pending smp-call-function callbacks queued on this CPU. This is
427 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
428 * to ensure that all pending IPI callbacks are run before it goes completely
429 * offline.
430 *
431 * Loop through the call_single_queue and run all the queued callbacks.
432 * Must be called with interrupts disabled.
433 */
16bf5a5e 434static void __flush_smp_call_function_queue(bool warn_cpu_offline)
8d056c48 435{
966a9671 436 call_single_data_t *csd, *csd_next;
52103be0
PZ
437 struct llist_node *entry, *prev;
438 struct llist_head *head;
a219ccf4 439 static bool warned;
0d3a00b3 440 atomic_t *tbt;
a219ccf4 441
83efcbd0 442 lockdep_assert_irqs_disabled();
8d056c48 443
0d3a00b3
IK
444 /* Allow waiters to send backtrace NMI from here onwards */
445 tbt = this_cpu_ptr(&trigger_backtrace);
446 atomic_set_release(tbt, 1);
447
bb964a92 448 head = this_cpu_ptr(&call_single_queue);
8d056c48 449 entry = llist_del_all(head);
a219ccf4 450 entry = llist_reverse_order(entry);
3d442233 451
8d056c48
SB
452 /* There shouldn't be any pending callbacks on an offline CPU. */
453 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
9e949a38 454 !warned && entry != NULL)) {
a219ccf4
SB
455 warned = true;
456 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
457
458 /*
459 * We don't have to use the _safe() variant here
460 * because we are not invoking the IPI handlers yet.
461 */
545b8c8d 462 llist_for_each_entry(csd, entry, node.llist) {
4b44a21d
PZ
463 switch (CSD_TYPE(csd)) {
464 case CSD_TYPE_ASYNC:
465 case CSD_TYPE_SYNC:
466 case CSD_TYPE_IRQ_WORK:
467 pr_warn("IPI callback %pS sent to offline CPU\n",
468 csd->func);
469 break;
470
a1488664
PZ
471 case CSD_TYPE_TTWU:
472 pr_warn("IPI task-wakeup sent to offline CPU\n");
473 break;
474
4b44a21d
PZ
475 default:
476 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
477 CSD_TYPE(csd));
478 break;
479 }
480 }
a219ccf4 481 }
3d442233 482
52103be0
PZ
483 /*
484 * First; run all SYNC callbacks, people are waiting for us.
485 */
486 prev = NULL;
545b8c8d 487 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
8053871d 488 /* Do we wait until *after* callback? */
4b44a21d
PZ
489 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
490 smp_call_func_t func = csd->func;
491 void *info = csd->info;
492
52103be0 493 if (prev) {
545b8c8d 494 prev->next = &csd_next->node.llist;
52103be0 495 } else {
545b8c8d 496 entry = &csd_next->node.llist;
52103be0 497 }
4b44a21d 498
35feb604 499 csd_lock_record(csd);
949fa3f1 500 csd_do_func(func, info, csd);
8053871d 501 csd_unlock(csd);
35feb604 502 csd_lock_record(NULL);
8053871d 503 } else {
545b8c8d 504 prev = &csd->node.llist;
8053871d 505 }
3d442233 506 }
47885016 507
1771257c 508 if (!entry)
a1488664
PZ
509 return;
510
47885016 511 /*
52103be0 512 * Second; run all !SYNC callbacks.
47885016 513 */
a1488664 514 prev = NULL;
545b8c8d 515 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
4b44a21d 516 int type = CSD_TYPE(csd);
52103be0 517
a1488664
PZ
518 if (type != CSD_TYPE_TTWU) {
519 if (prev) {
545b8c8d 520 prev->next = &csd_next->node.llist;
a1488664 521 } else {
545b8c8d 522 entry = &csd_next->node.llist;
a1488664 523 }
4b44a21d 524
a1488664
PZ
525 if (type == CSD_TYPE_ASYNC) {
526 smp_call_func_t func = csd->func;
527 void *info = csd->info;
528
35feb604 529 csd_lock_record(csd);
a1488664 530 csd_unlock(csd);
949fa3f1 531 csd_do_func(func, info, csd);
35feb604 532 csd_lock_record(NULL);
a1488664
PZ
533 } else if (type == CSD_TYPE_IRQ_WORK) {
534 irq_work_single(csd);
535 }
536
537 } else {
545b8c8d 538 prev = &csd->node.llist;
4b44a21d 539 }
52103be0 540 }
a1488664
PZ
541
542 /*
543 * Third; only CSD_TYPE_TTWU is left, issue those.
544 */
949fa3f1
LB
545 if (entry) {
546 csd = llist_entry(entry, typeof(*csd), node.llist);
547 csd_do_func(sched_ttwu_pending, entry, csd);
548 }
3d442233
JA
549}
550
16bf5a5e
TG
551
552/**
553 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
554 * from task context (idle, migration thread)
555 *
556 * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
557 * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
558 * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
559 * handle queued SMP function calls before scheduling.
560 *
561 * The migration thread has to ensure that an eventually pending wakeup has
562 * been handled before it migrates a task.
563 */
564void flush_smp_call_function_queue(void)
b2a02fc4 565{
1a90bfd2 566 unsigned int was_pending;
b2a02fc4
PZ
567 unsigned long flags;
568
569 if (llist_empty(this_cpu_ptr(&call_single_queue)))
570 return;
571
572 local_irq_save(flags);
1a90bfd2
SAS
573 /* Get the already pending soft interrupts for RT enabled kernels */
574 was_pending = local_softirq_pending();
16bf5a5e 575 __flush_smp_call_function_queue(true);
f9d34595 576 if (local_softirq_pending())
1a90bfd2 577 do_softirq_post_smp_call_flush(was_pending);
f9d34595 578
b2a02fc4 579 local_irq_restore(flags);
3d442233
JA
580}
581
582/*
583 * smp_call_function_single - Run a function on a specific CPU
584 * @func: The function to run. This must be fast and non-blocking.
585 * @info: An arbitrary pointer to pass to the function.
3d442233
JA
586 * @wait: If true, wait until function has completed on other CPUs.
587 *
72f279b2 588 * Returns 0 on success, else a negative status code.
3d442233 589 */
3a5f65df 590int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
8691e5a8 591 int wait)
3d442233 592{
966a9671
YH
593 call_single_data_t *csd;
594 call_single_data_t csd_stack = {
545b8c8d 595 .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
966a9671 596 };
0b13fda1 597 int this_cpu;
8b28499a 598 int err;
3d442233 599
0b13fda1
IM
600 /*
601 * prevent preemption and reschedule on another processor,
602 * as well as CPU removal
603 */
604 this_cpu = get_cpu();
605
269c861b
SS
606 /*
607 * Can deadlock when called with interrupts disabled.
608 * We allow cpu's that are not yet online though, as no one else can
609 * send smp call function interrupt to this cpu and as such deadlocks
610 * can't happen.
611 */
612 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
613 && !oops_in_progress);
3d442233 614
19dbdcb8
PZ
615 /*
616 * When @wait we can deadlock when we interrupt between llist_add() and
617 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
618 * csd_lock() on because the interrupt context uses the same csd
619 * storage.
620 */
621 WARN_ON_ONCE(!in_task());
622
8053871d
LT
623 csd = &csd_stack;
624 if (!wait) {
625 csd = this_cpu_ptr(&csd_data);
626 csd_lock(csd);
627 }
628
4b44a21d
PZ
629 csd->func = func;
630 csd->info = info;
35feb604 631#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
545b8c8d
PZ
632 csd->node.src = smp_processor_id();
633 csd->node.dst = cpu;
e48c15b7 634#endif
4b44a21d
PZ
635
636 err = generic_exec_single(cpu, csd);
8053871d
LT
637
638 if (wait)
639 csd_lock_wait(csd);
3d442233
JA
640
641 put_cpu();
0b13fda1 642
f73be6de 643 return err;
3d442233
JA
644}
645EXPORT_SYMBOL(smp_call_function_single);
646
d7877c03 647/**
49b3bd21 648 * smp_call_function_single_async() - Run an asynchronous function on a
c46fff2a 649 * specific CPU.
d7877c03
FW
650 * @cpu: The CPU to run on.
651 * @csd: Pre-allocated and setup data structure
d7877c03 652 *
c46fff2a
FW
653 * Like smp_call_function_single(), but the call is asynchonous and
654 * can thus be done from contexts with disabled interrupts.
655 *
656 * The caller passes his own pre-allocated data structure
657 * (ie: embedded in an object) and is responsible for synchronizing it
658 * such that the IPIs performed on the @csd are strictly serialized.
659 *
5a18ceca
PX
660 * If the function is called with one csd which has not yet been
661 * processed by previous call to smp_call_function_single_async(), the
662 * function will return immediately with -EBUSY showing that the csd
663 * object is still in progress.
664 *
c46fff2a
FW
665 * NOTE: Be careful, there is unfortunately no current debugging facility to
666 * validate the correctness of this serialization.
49b3bd21
RD
667 *
668 * Return: %0 on success or negative errno value on error
d7877c03 669 */
1139aeb1 670int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
d7877c03
FW
671{
672 int err = 0;
d7877c03 673
fce8ad15 674 preempt_disable();
8053871d 675
545b8c8d 676 if (csd->node.u_flags & CSD_FLAG_LOCK) {
5a18ceca
PX
677 err = -EBUSY;
678 goto out;
679 }
8053871d 680
545b8c8d 681 csd->node.u_flags = CSD_FLAG_LOCK;
8053871d
LT
682 smp_wmb();
683
4b44a21d 684 err = generic_exec_single(cpu, csd);
5a18ceca
PX
685
686out:
fce8ad15 687 preempt_enable();
d7877c03
FW
688
689 return err;
690}
c46fff2a 691EXPORT_SYMBOL_GPL(smp_call_function_single_async);
d7877c03 692
2ea6dec4
RR
693/*
694 * smp_call_function_any - Run a function on any of the given cpus
695 * @mask: The mask of cpus it can run on.
696 * @func: The function to run. This must be fast and non-blocking.
697 * @info: An arbitrary pointer to pass to the function.
698 * @wait: If true, wait until function has completed.
699 *
700 * Returns 0 on success, else a negative status code (if no cpus were online).
2ea6dec4
RR
701 *
702 * Selection preference:
703 * 1) current cpu if in @mask
704 * 2) any cpu of current node if in @mask
705 * 3) any other online cpu in @mask
706 */
707int smp_call_function_any(const struct cpumask *mask,
3a5f65df 708 smp_call_func_t func, void *info, int wait)
2ea6dec4
RR
709{
710 unsigned int cpu;
711 const struct cpumask *nodemask;
712 int ret;
713
714 /* Try for same CPU (cheapest) */
715 cpu = get_cpu();
716 if (cpumask_test_cpu(cpu, mask))
717 goto call;
718
719 /* Try for same node. */
af2422c4 720 nodemask = cpumask_of_node(cpu_to_node(cpu));
2ea6dec4
RR
721 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
722 cpu = cpumask_next_and(cpu, nodemask, mask)) {
723 if (cpu_online(cpu))
724 goto call;
725 }
726
727 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
728 cpu = cpumask_any_and(mask, cpu_online_mask);
729call:
730 ret = smp_call_function_single(cpu, func, info, wait);
731 put_cpu();
732 return ret;
733}
734EXPORT_SYMBOL_GPL(smp_call_function_any);
735
a32a4d8a
NA
736/*
737 * Flags to be used as scf_flags argument of smp_call_function_many_cond().
738 *
739 * %SCF_WAIT: Wait until function execution is completed
740 * %SCF_RUN_LOCAL: Run also locally if local cpu is set in cpumask
741 */
742#define SCF_WAIT (1U << 0)
743#define SCF_RUN_LOCAL (1U << 1)
744
67719ef2
SAS
745static void smp_call_function_many_cond(const struct cpumask *mask,
746 smp_call_func_t func, void *info,
a32a4d8a
NA
747 unsigned int scf_flags,
748 smp_cond_func_t cond_func)
3d442233 749{
a32a4d8a 750 int cpu, last_cpu, this_cpu = smp_processor_id();
e1d12f32 751 struct call_function_data *cfd;
a32a4d8a 752 bool wait = scf_flags & SCF_WAIT;
bf5a8c26 753 int nr_cpus = 0;
a32a4d8a
NA
754 bool run_remote = false;
755 bool run_local = false;
a32a4d8a
NA
756
757 lockdep_assert_preemption_disabled();
3d442233 758
269c861b
SS
759 /*
760 * Can deadlock when called with interrupts disabled.
761 * We allow cpu's that are not yet online though, as no one else can
762 * send smp call function interrupt to this cpu and as such deadlocks
763 * can't happen.
764 */
a32a4d8a
NA
765 if (cpu_online(this_cpu) && !oops_in_progress &&
766 !early_boot_irqs_disabled)
767 lockdep_assert_irqs_enabled();
3d442233 768
19dbdcb8
PZ
769 /*
770 * When @wait we can deadlock when we interrupt between llist_add() and
771 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
772 * csd_lock() on because the interrupt context uses the same csd
773 * storage.
774 */
775 WARN_ON_ONCE(!in_task());
776
a32a4d8a
NA
777 /* Check if we need local execution. */
778 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
779 run_local = true;
780
781 /* Check if we need remote execution, i.e., any CPU excluding this one. */
54b11e6d 782 cpu = cpumask_first_and(mask, cpu_online_mask);
0b13fda1 783 if (cpu == this_cpu)
54b11e6d 784 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
a32a4d8a
NA
785 if (cpu < nr_cpu_ids)
786 run_remote = true;
0b13fda1 787
a32a4d8a
NA
788 if (run_remote) {
789 cfd = this_cpu_ptr(&cfd_data);
790 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
791 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
45a57919 792
a32a4d8a
NA
793 cpumask_clear(cfd->cpumask_ipi);
794 for_each_cpu(cpu, cfd->cpumask) {
6366d062 795 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
9a46ad6d 796
5c312497
PZ
797 if (cond_func && !cond_func(cpu, info)) {
798 __cpumask_clear_cpu(cpu, cfd->cpumask);
a32a4d8a 799 continue;
5c312497 800 }
67719ef2 801
a32a4d8a
NA
802 csd_lock(csd);
803 if (wait)
804 csd->node.u_flags |= CSD_TYPE_SYNC;
805 csd->func = func;
806 csd->info = info;
35feb604 807#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
a32a4d8a
NA
808 csd->node.src = smp_processor_id();
809 csd->node.dst = cpu;
e48c15b7 810#endif
bf5a8c26
LB
811 trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
812
a32a4d8a
NA
813 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
814 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
815 nr_cpus++;
816 last_cpu = cpu;
817 }
818 }
819
820 /*
821 * Choose the most efficient way to send an IPI. Note that the
822 * number of CPUs might be zero due to concurrent changes to the
823 * provided mask.
824 */
825 if (nr_cpus == 1)
5c312497 826 send_call_function_single_ipi(last_cpu);
a32a4d8a 827 else if (likely(nr_cpus > 1))
5c312497 828 send_call_function_ipi_mask(cfd->cpumask_ipi);
9a46ad6d 829 }
561920a0 830
a32a4d8a
NA
831 if (run_local && (!cond_func || cond_func(this_cpu, info))) {
832 unsigned long flags;
833
834 local_irq_save(flags);
949fa3f1 835 csd_do_func(func, info, NULL);
a32a4d8a
NA
836 local_irq_restore(flags);
837 }
3d442233 838
a32a4d8a 839 if (run_remote && wait) {
e1d12f32 840 for_each_cpu(cpu, cfd->cpumask) {
966a9671 841 call_single_data_t *csd;
e1d12f32 842
6366d062 843 csd = per_cpu_ptr(cfd->csd, cpu);
9a46ad6d
SL
844 csd_lock_wait(csd);
845 }
846 }
3d442233 847}
67719ef2
SAS
848
849/**
a32a4d8a 850 * smp_call_function_many(): Run a function on a set of CPUs.
67719ef2
SAS
851 * @mask: The set of cpus to run on (only runs on online subset).
852 * @func: The function to run. This must be fast and non-blocking.
853 * @info: An arbitrary pointer to pass to the function.
49b3bd21 854 * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
a32a4d8a
NA
855 * (atomically) until function has completed on other CPUs. If
856 * %SCF_RUN_LOCAL is set, the function will also be run locally
857 * if the local CPU is set in the @cpumask.
67719ef2
SAS
858 *
859 * If @wait is true, then returns once @func has returned.
860 *
861 * You must not call this function with disabled interrupts or from a
862 * hardware interrupt handler or from a bottom half handler. Preemption
863 * must be disabled when calling this function.
864 */
865void smp_call_function_many(const struct cpumask *mask,
866 smp_call_func_t func, void *info, bool wait)
867{
a32a4d8a 868 smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
67719ef2 869}
54b11e6d 870EXPORT_SYMBOL(smp_call_function_many);
3d442233
JA
871
872/**
873 * smp_call_function(): Run a function on all other CPUs.
874 * @func: The function to run. This must be fast and non-blocking.
875 * @info: An arbitrary pointer to pass to the function.
0b13fda1
IM
876 * @wait: If true, wait (atomically) until function has completed
877 * on other CPUs.
3d442233 878 *
54b11e6d 879 * Returns 0.
3d442233
JA
880 *
881 * If @wait is true, then returns once @func has returned; otherwise
72f279b2 882 * it returns just before the target cpu calls @func.
3d442233
JA
883 *
884 * You must not call this function with disabled interrupts or from a
885 * hardware interrupt handler or from a bottom half handler.
886 */
caa75932 887void smp_call_function(smp_call_func_t func, void *info, int wait)
3d442233 888{
3d442233 889 preempt_disable();
54b11e6d 890 smp_call_function_many(cpu_online_mask, func, info, wait);
3d442233 891 preempt_enable();
3d442233
JA
892}
893EXPORT_SYMBOL(smp_call_function);
351f8f8e 894
34db18a0
AW
895/* Setup configured maximum number of CPUs to activate */
896unsigned int setup_max_cpus = NR_CPUS;
897EXPORT_SYMBOL(setup_max_cpus);
898
899
900/*
901 * Setup routine for controlling SMP activation
902 *
903 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
904 * activation entirely (the MPS table probe still happens, though).
905 *
906 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
907 * greater than 0, limits the maximum number of CPUs activated in
908 * SMP mode to <NUM>.
909 */
910
ba831b7b 911void __weak __init arch_disable_smp_support(void) { }
34db18a0
AW
912
913static int __init nosmp(char *str)
914{
915 setup_max_cpus = 0;
916 arch_disable_smp_support();
917
918 return 0;
919}
920
921early_param("nosmp", nosmp);
922
923/* this is hard limit */
924static int __init nrcpus(char *str)
925{
926 int nr_cpus;
927
58934356 928 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
38bef8e5 929 set_nr_cpu_ids(nr_cpus);
34db18a0
AW
930
931 return 0;
932}
933
934early_param("nr_cpus", nrcpus);
935
936static int __init maxcpus(char *str)
937{
938 get_option(&str, &setup_max_cpus);
939 if (setup_max_cpus == 0)
940 arch_disable_smp_support();
941
942 return 0;
943}
944
945early_param("maxcpus", maxcpus);
946
6f9c07be 947#if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS)
34db18a0 948/* Setup number of possible processor ids */
9b130ad5 949unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
34db18a0 950EXPORT_SYMBOL(nr_cpu_ids);
53fc190c 951#endif
34db18a0
AW
952
953/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
954void __init setup_nr_cpu_ids(void)
955{
38bef8e5 956 set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
34db18a0
AW
957}
958
959/* Called by boot processor to activate the rest. */
960void __init smp_init(void)
961{
92b23278 962 int num_nodes, num_cpus;
34db18a0 963
3bb5d2ee 964 idle_threads_init();
4cb28ced 965 cpuhp_threads_init();
3bb5d2ee 966
51111dce
ME
967 pr_info("Bringing up secondary CPUs ...\n");
968
b99a2659 969 bringup_nonboot_cpus(setup_max_cpus);
34db18a0 970
92b23278
ME
971 num_nodes = num_online_nodes();
972 num_cpus = num_online_cpus();
973 pr_info("Brought up %d node%s, %d CPU%s\n",
974 num_nodes, (num_nodes > 1 ? "s" : ""),
975 num_cpus, (num_cpus > 1 ? "s" : ""));
976
34db18a0 977 /* Any cleanup work */
34db18a0
AW
978 smp_cpus_done(setup_max_cpus);
979}
980
b3a7e98e
GBY
981/*
982 * on_each_cpu_cond(): Call a function on each processor for which
983 * the supplied function cond_func returns true, optionally waiting
984 * for all the required CPUs to finish. This may include the local
985 * processor.
986 * @cond_func: A callback function that is passed a cpu id and
7b7b8a2c 987 * the info parameter. The function is called
b3a7e98e
GBY
988 * with preemption disabled. The function should
989 * return a blooean value indicating whether to IPI
990 * the specified CPU.
991 * @func: The function to run on all applicable CPUs.
992 * This must be fast and non-blocking.
993 * @info: An arbitrary pointer to pass to both functions.
994 * @wait: If true, wait (atomically) until function has
995 * completed on other CPUs.
b3a7e98e
GBY
996 *
997 * Preemption is disabled to protect against CPUs going offline but not online.
998 * CPUs going online during the call will not be seen or sent an IPI.
999 *
1000 * You must not call this function with disabled interrupts or
1001 * from a hardware interrupt handler or from a bottom half handler.
1002 */
5671d814 1003void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
cb923159 1004 void *info, bool wait, const struct cpumask *mask)
b3a7e98e 1005{
a32a4d8a 1006 unsigned int scf_flags = SCF_RUN_LOCAL;
67719ef2 1007
a32a4d8a
NA
1008 if (wait)
1009 scf_flags |= SCF_WAIT;
67719ef2 1010
a32a4d8a
NA
1011 preempt_disable();
1012 smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
1013 preempt_enable();
b3a7e98e 1014}
7d49b28a
RR
1015EXPORT_SYMBOL(on_each_cpu_cond_mask);
1016
f37f435f
TG
1017static void do_nothing(void *unused)
1018{
1019}
1020
1021/**
1022 * kick_all_cpus_sync - Force all cpus out of idle
1023 *
1024 * Used to synchronize the update of pm_idle function pointer. It's
1025 * called after the pointer is updated and returns after the dummy
1026 * callback function has been executed on all cpus. The execution of
1027 * the function can only happen on the remote cpus after they have
1028 * left the idle function which had been called via pm_idle function
1029 * pointer. So it's guaranteed that nothing uses the previous pointer
1030 * anymore.
1031 */
1032void kick_all_cpus_sync(void)
1033{
1034 /* Make sure the change is visible before we kick the cpus */
1035 smp_mb();
1036 smp_call_function(do_nothing, NULL, 1);
1037}
1038EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
c6f4459f
CL
1039
1040/**
1041 * wake_up_all_idle_cpus - break all cpus out of idle
1042 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
1043 * including idle polling cpus, for non-idle cpus, we will do nothing
1044 * for them.
1045 */
1046void wake_up_all_idle_cpus(void)
1047{
1048 int cpu;
1049
96611c26
PZ
1050 for_each_possible_cpu(cpu) {
1051 preempt_disable();
1052 if (cpu != smp_processor_id() && cpu_online(cpu))
1053 wake_up_if_idle(cpu);
1054 preempt_enable();
c6f4459f 1055 }
c6f4459f
CL
1056}
1057EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
df8ce9d7
JG
1058
1059/**
49b3bd21
RD
1060 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1061 * @work: &work_struct
1062 * @done: &completion to signal
1063 * @func: function to call
1064 * @data: function's data argument
1065 * @ret: return value from @func
1066 * @cpu: target CPU (%-1 for any CPU)
df8ce9d7
JG
1067 *
1068 * Used to call a function on a specific cpu and wait for it to return.
1069 * Optionally make sure the call is done on a specified physical cpu via vcpu
1070 * pinning in order to support virtualized environments.
1071 */
1072struct smp_call_on_cpu_struct {
1073 struct work_struct work;
1074 struct completion done;
1075 int (*func)(void *);
1076 void *data;
1077 int ret;
1078 int cpu;
1079};
1080
1081static void smp_call_on_cpu_callback(struct work_struct *work)
1082{
1083 struct smp_call_on_cpu_struct *sscs;
1084
1085 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1086 if (sscs->cpu >= 0)
1087 hypervisor_pin_vcpu(sscs->cpu);
1088 sscs->ret = sscs->func(sscs->data);
1089 if (sscs->cpu >= 0)
1090 hypervisor_pin_vcpu(-1);
1091
1092 complete(&sscs->done);
1093}
1094
1095int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1096{
1097 struct smp_call_on_cpu_struct sscs = {
df8ce9d7
JG
1098 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1099 .func = func,
1100 .data = par,
1101 .cpu = phys ? cpu : -1,
1102 };
1103
8db54949
PZ
1104 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1105
df8ce9d7
JG
1106 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1107 return -ENXIO;
1108
1109 queue_work_on(cpu, system_wq, &sscs.work);
1110 wait_for_completion(&sscs.done);
1111
1112 return sscs.ret;
1113}
1114EXPORT_SYMBOL_GPL(smp_call_on_cpu);