Linux 6.16-rc6
[linux-block.git] / kernel / smp.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
3d442233
JA
2/*
3 * Generic helpers for smp ipi calls
4 *
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
3d442233 6 */
ca7dfdbb
ME
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
47885016 10#include <linux/irq_work.h>
3d442233 11#include <linux/rcupdate.h>
59190f42 12#include <linux/rculist.h>
641cd4cf 13#include <linux/kernel.h>
9984de1a 14#include <linux/export.h>
0b13fda1
IM
15#include <linux/percpu.h>
16#include <linux/init.h>
f9d34595 17#include <linux/interrupt.h>
5a0e3ad6 18#include <linux/gfp.h>
3d442233 19#include <linux/smp.h>
8969a5ed 20#include <linux/cpu.h>
c6f4459f 21#include <linux/sched.h>
4c822698 22#include <linux/sched/idle.h>
47ae4b05 23#include <linux/hypervisor.h>
35feb604
PM
24#include <linux/sched/clock.h>
25#include <linux/nmi.h>
26#include <linux/sched/debug.h>
8d0968cc 27#include <linux/jump_label.h>
c4df1593 28#include <linux/string_choices.h>
3d442233 29
cc9cb0a7 30#include <trace/events/ipi.h>
949fa3f1
LB
31#define CREATE_TRACE_POINTS
32#include <trace/events/csd.h>
33#undef CREATE_TRACE_POINTS
cc9cb0a7 34
3bb5d2ee 35#include "smpboot.h"
1f8db415 36#include "sched/smp.h"
3bb5d2ee 37
545b8c8d 38#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
3d442233
JA
39
40struct call_function_data {
6366d062 41 call_single_data_t __percpu *csd;
0b13fda1 42 cpumask_var_t cpumask;
3fc5b3b6 43 cpumask_var_t cpumask_ipi;
3d442233
JA
44};
45
a22793c7 46static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
e03bcb68 47
6897fc22 48static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
8969a5ed 49
0d3a00b3
IK
50static DEFINE_PER_CPU(atomic_t, trigger_backtrace) = ATOMIC_INIT(1);
51
16bf5a5e 52static void __flush_smp_call_function_queue(bool warn_cpu_offline);
8d056c48 53
31487f83 54int smpcfd_prepare_cpu(unsigned int cpu)
8969a5ed 55{
8969a5ed
PZ
56 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
57
31487f83
RW
58 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
59 cpu_to_node(cpu)))
60 return -ENOMEM;
3fc5b3b6
AL
61 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
62 cpu_to_node(cpu))) {
63 free_cpumask_var(cfd->cpumask);
64 return -ENOMEM;
65 }
6366d062
PM
66 cfd->csd = alloc_percpu(call_single_data_t);
67 if (!cfd->csd) {
8969a5ed 68 free_cpumask_var(cfd->cpumask);
3fc5b3b6 69 free_cpumask_var(cfd->cpumask_ipi);
31487f83
RW
70 return -ENOMEM;
71 }
72
73 return 0;
8969a5ed
PZ
74}
75
31487f83
RW
76int smpcfd_dead_cpu(unsigned int cpu)
77{
78 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
79
80 free_cpumask_var(cfd->cpumask);
3fc5b3b6 81 free_cpumask_var(cfd->cpumask_ipi);
6366d062 82 free_percpu(cfd->csd);
31487f83
RW
83 return 0;
84}
85
86int smpcfd_dying_cpu(unsigned int cpu)
87{
88 /*
89 * The IPIs for the smp-call-function callbacks queued by other
90 * CPUs might arrive late, either due to hardware latencies or
91 * because this CPU disabled interrupts (inside stop-machine)
92 * before the IPIs were sent. So flush out any pending callbacks
93 * explicitly (without waiting for the IPIs to arrive), to
94 * ensure that the outgoing CPU doesn't go offline with work
95 * still pending.
96 */
16bf5a5e 97 __flush_smp_call_function_queue(false);
afaa653c 98 irq_work_run();
31487f83
RW
99 return 0;
100}
8969a5ed 101
d8ad7d11 102void __init call_function_init(void)
3d442233
JA
103{
104 int i;
105
6897fc22
CH
106 for_each_possible_cpu(i)
107 init_llist_head(&per_cpu(call_single_queue, i));
8969a5ed 108
31487f83 109 smpcfd_prepare_cpu(smp_processor_id());
3d442233
JA
110}
111
08407b5f 112static __always_inline void
5c312497 113send_call_function_single_ipi(int cpu)
08407b5f 114{
68f4ff04 115 if (call_function_single_prep_ipi(cpu)) {
5c312497
PZ
116 trace_ipi_send_cpu(cpu, _RET_IP_,
117 generic_smp_call_function_single_interrupt);
68f4ff04
VS
118 arch_send_call_function_single_ipi(cpu);
119 }
120}
121
122static __always_inline void
5c312497 123send_call_function_ipi_mask(struct cpumask *mask)
68f4ff04 124{
5c312497
PZ
125 trace_ipi_send_cpumask(mask, _RET_IP_,
126 generic_smp_call_function_single_interrupt);
08407b5f
VS
127 arch_send_call_function_ipi_mask(mask);
128}
129
949fa3f1 130static __always_inline void
d090ec0d 131csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
949fa3f1
LB
132{
133 trace_csd_function_entry(func, csd);
134 func(info);
135 trace_csd_function_exit(func, csd);
136}
137
35feb604
PM
138#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
139
c5219860 140static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled);
8d0968cc 141
1771257c
PM
142/*
143 * Parse the csdlock_debug= kernel boot parameter.
144 *
145 * If you need to restore the old "ext" value that once provided
146 * additional debugging information, reapply the following commits:
147 *
148 * de7b09ef658d ("locking/csd_lock: Prepare more CSD lock debugging")
149 * a5aabace5fb8 ("locking/csd_lock: Add more data to CSD lock debugging")
150 */
8d0968cc
JG
151static int __init csdlock_debug(char *str)
152{
203e4358 153 int ret;
8d0968cc
JG
154 unsigned int val = 0;
155
203e4358
PM
156 ret = get_option(&str, &val);
157 if (ret) {
158 if (val)
159 static_branch_enable(&csdlock_debug_enabled);
160 else
161 static_branch_disable(&csdlock_debug_enabled);
162 }
8d0968cc 163
9c9b26b0 164 return 1;
8d0968cc 165}
9c9b26b0 166__setup("csdlock_debug=", csdlock_debug);
8d0968cc 167
35feb604
PM
168static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
169static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
170static DEFINE_PER_CPU(void *, cur_csd_info);
171
3791a223 172static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
0e4a19e2 173module_param(csd_lock_timeout, ulong, 0644);
94b3f0b5 174static int panic_on_ipistall; /* CSD panic timeout in milliseconds, 300000 for five minutes. */
0e4a19e2 175module_param(panic_on_ipistall, int, 0644);
3791a223 176
2b722160 177static atomic_t csd_bug_count = ATOMIC_INIT(0);
35feb604
PM
178
179/* Record current CSD work for current CPU, NULL to erase. */
d090ec0d 180static void __csd_lock_record(call_single_data_t *csd)
35feb604
PM
181{
182 if (!csd) {
183 smp_mb(); /* NULL cur_csd after unlock. */
184 __this_cpu_write(cur_csd, NULL);
185 return;
186 }
187 __this_cpu_write(cur_csd_func, csd->func);
188 __this_cpu_write(cur_csd_info, csd->info);
189 smp_wmb(); /* func and info before csd. */
190 __this_cpu_write(cur_csd, csd);
191 smp_mb(); /* Update cur_csd before function call. */
192 /* Or before unlock, as the case may be. */
193}
194
d090ec0d 195static __always_inline void csd_lock_record(call_single_data_t *csd)
8d0968cc
JG
196{
197 if (static_branch_unlikely(&csdlock_debug_enabled))
198 __csd_lock_record(csd);
199}
200
d090ec0d 201static int csd_lock_wait_getcpu(call_single_data_t *csd)
35feb604
PM
202{
203 unsigned int csd_type;
204
205 csd_type = CSD_TYPE(csd);
206 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
a787bdaf 207 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
35feb604
PM
208 return -1;
209}
210
ac9d4554
PM
211static atomic_t n_csd_lock_stuck;
212
213/**
214 * csd_lock_is_stuck - Has a CSD-lock acquisition been stuck too long?
215 *
216 * Returns @true if a CSD-lock acquisition is stuck and has been stuck
217 * long enough for a "non-responsive CSD lock" message to be printed.
218 */
219bool csd_lock_is_stuck(void)
220{
221 return !!atomic_read(&n_csd_lock_stuck);
222}
223
35feb604
PM
224/*
225 * Complain if too much time spent waiting. Note that only
226 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
227 * so waiting on other types gets much less information.
228 */
d40760d6 229static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id, unsigned long *nmessages)
35feb604
PM
230{
231 int cpu = -1;
232 int cpux;
233 bool firsttime;
234 u64 ts2, ts_delta;
235 call_single_data_t *cpu_cur_csd;
545b8c8d 236 unsigned int flags = READ_ONCE(csd->node.u_flags);
3791a223 237 unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC;
35feb604
PM
238
239 if (!(flags & CSD_FLAG_LOCK)) {
240 if (!unlikely(*bug_id))
241 return true;
242 cpu = csd_lock_wait_getcpu(csd);
243 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
244 *bug_id, raw_smp_processor_id(), cpu);
ac9d4554 245 atomic_dec(&n_csd_lock_stuck);
35feb604
PM
246 return true;
247 }
248
9861f7f6 249 ts2 = ktime_get_mono_fast_ns();
94b3f0b5 250 /* How long since we last checked for a stuck CSD lock.*/
35feb604 251 ts_delta = ts2 - *ts1;
d40760d6
PM
252 if (likely(ts_delta <= csd_lock_timeout_ns * (*nmessages + 1) *
253 (!*nmessages ? 1 : (ilog2(num_online_cpus()) / 2 + 1)) ||
254 csd_lock_timeout_ns == 0))
35feb604
PM
255 return false;
256
9fbaa441
RR
257 if (ts0 > ts2) {
258 /* Our own sched_clock went backward; don't blame another CPU. */
259 ts_delta = ts0 - ts2;
260 pr_alert("sched_clock on CPU %d went backward by %llu ns\n", raw_smp_processor_id(), ts_delta);
261 *ts1 = ts2;
262 return false;
263 }
264
35feb604
PM
265 firsttime = !*bug_id;
266 if (firsttime)
267 *bug_id = atomic_inc_return(&csd_bug_count);
268 cpu = csd_lock_wait_getcpu(csd);
269 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
270 cpux = 0;
271 else
272 cpux = cpu;
273 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
94b3f0b5
RR
274 /* How long since this CSD lock was stuck. */
275 ts_delta = ts2 - ts0;
c1972c8d
PM
276 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %lld ns for CPU#%02d %pS(%ps).\n",
277 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), (s64)ts_delta,
35feb604 278 cpu, csd->func, csd->info);
d40760d6 279 (*nmessages)++;
ac9d4554
PM
280 if (firsttime)
281 atomic_inc(&n_csd_lock_stuck);
94b3f0b5
RR
282 /*
283 * If the CSD lock is still stuck after 5 minutes, it is unlikely
284 * to become unstuck. Use a signed comparison to avoid triggering
285 * on underflows when the TSC is out of sync between sockets.
286 */
287 BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC));
35feb604
PM
288 if (cpu_cur_csd && csd != cpu_cur_csd) {
289 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
290 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
291 READ_ONCE(per_cpu(cur_csd_info, cpux)));
292 } else {
293 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
294 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
295 }
296 if (cpu >= 0) {
0d3a00b3
IK
297 if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0))
298 dump_cpu_task(cpu);
35feb604
PM
299 if (!cpu_cur_csd) {
300 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
301 arch_send_call_function_single_ipi(cpu);
302 }
303 }
5bd00f6d
IK
304 if (firsttime)
305 dump_stack();
35feb604
PM
306 *ts1 = ts2;
307
308 return false;
309}
310
8969a5ed
PZ
311/*
312 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
313 *
0b13fda1
IM
314 * For non-synchronous ipi calls the csd can still be in use by the
315 * previous function call. For multi-cpu calls its even more interesting
316 * as we'll have to ensure no other cpu is observing our csd.
8969a5ed 317 */
d090ec0d 318static void __csd_lock_wait(call_single_data_t *csd)
35feb604 319{
d40760d6 320 unsigned long nmessages = 0;
35feb604
PM
321 int bug_id = 0;
322 u64 ts0, ts1;
323
9861f7f6 324 ts1 = ts0 = ktime_get_mono_fast_ns();
35feb604 325 for (;;) {
d40760d6 326 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id, &nmessages))
35feb604
PM
327 break;
328 cpu_relax();
329 }
330 smp_acquire__after_ctrl_dep();
331}
332
d090ec0d 333static __always_inline void csd_lock_wait(call_single_data_t *csd)
8d0968cc
JG
334{
335 if (static_branch_unlikely(&csdlock_debug_enabled)) {
336 __csd_lock_wait(csd);
337 return;
338 }
339
340 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
341}
35feb604 342#else
d090ec0d 343static void csd_lock_record(call_single_data_t *csd)
35feb604
PM
344{
345}
346
d090ec0d 347static __always_inline void csd_lock_wait(call_single_data_t *csd)
8969a5ed 348{
545b8c8d 349 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
6e275637 350}
35feb604 351#endif
6e275637 352
d090ec0d 353static __always_inline void csd_lock(call_single_data_t *csd)
6e275637 354{
e1d12f32 355 csd_lock_wait(csd);
545b8c8d 356 csd->node.u_flags |= CSD_FLAG_LOCK;
8969a5ed
PZ
357
358 /*
0b13fda1
IM
359 * prevent CPU from reordering the above assignment
360 * to ->flags with any subsequent assignments to other
966a9671 361 * fields of the specified call_single_data_t structure:
8969a5ed 362 */
8053871d 363 smp_wmb();
8969a5ed
PZ
364}
365
d090ec0d 366static __always_inline void csd_unlock(call_single_data_t *csd)
8969a5ed 367{
545b8c8d 368 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
0b13fda1 369
8969a5ed 370 /*
0b13fda1 371 * ensure we're all done before releasing data:
8969a5ed 372 */
545b8c8d 373 smp_store_release(&csd->node.u_flags, 0);
3d442233
JA
374}
375
68f4ff04
VS
376static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
377
378void __smp_call_single_queue(int cpu, struct llist_node *node)
379{
380 /*
381 * We have to check the type of the CSD before queueing it, because
382 * once queued it can have its flags cleared by
383 * flush_smp_call_function_queue()
384 * even if we haven't sent the smp_call IPI yet (e.g. the stopper
385 * executes migration_cpu_stop() on the remote CPU).
386 */
bf5a8c26 387 if (trace_csd_queue_cpu_enabled()) {
68f4ff04
VS
388 call_single_data_t *csd;
389 smp_call_func_t func;
390
391 csd = container_of(node, call_single_data_t, node.llist);
392 func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
393 sched_ttwu_pending : csd->func;
394
bf5a8c26 395 trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
68f4ff04 396 }
5c312497
PZ
397
398 /*
399 * The list addition should be visible to the target CPU when it pops
400 * the head of the list to pull the entry off it in the IPI handler
401 * because of normal cache coherency rules implied by the underlying
402 * llist ops.
403 *
404 * If IPIs can go out of order to the cache coherency protocol
405 * in an architecture, sufficient synchronisation should be added
406 * to arch code to make it appear to obey cache coherency WRT
407 * locking and barrier primitives. Generic code isn't really
408 * equipped to do the right thing...
409 */
410 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
411 send_call_function_single_ipi(cpu);
4b44a21d
PZ
412}
413
3d442233 414/*
966a9671 415 * Insert a previously allocated call_single_data_t element
0b13fda1
IM
416 * for execution on the given CPU. data must already have
417 * ->func, ->info, and ->flags set.
3d442233 418 */
d090ec0d 419static int generic_exec_single(int cpu, call_single_data_t *csd)
3d442233 420{
8b28499a 421 if (cpu == smp_processor_id()) {
4b44a21d
PZ
422 smp_call_func_t func = csd->func;
423 void *info = csd->info;
8053871d
LT
424 unsigned long flags;
425
426 /*
427 * We can unlock early even for the synchronous on-stack case,
428 * since we're doing this from the same CPU..
429 */
35feb604 430 csd_lock_record(csd);
8053871d 431 csd_unlock(csd);
8b28499a 432 local_irq_save(flags);
949fa3f1 433 csd_do_func(func, info, NULL);
35feb604 434 csd_lock_record(NULL);
8b28499a
FW
435 local_irq_restore(flags);
436 return 0;
437 }
438
5224b961
LT
439 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
440 csd_unlock(csd);
8b28499a 441 return -ENXIO;
5224b961 442 }
8b28499a 443
545b8c8d 444 __smp_call_single_queue(cpu, &csd->node.llist);
3d442233 445
8b28499a 446 return 0;
3d442233
JA
447}
448
8d056c48
SB
449/**
450 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
451 *
452 * Invoked by arch to handle an IPI for call function single.
453 * Must be called with interrupts disabled.
3d442233
JA
454 */
455void generic_smp_call_function_single_interrupt(void)
456{
16bf5a5e 457 __flush_smp_call_function_queue(true);
8d056c48
SB
458}
459
460/**
16bf5a5e 461 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
8d056c48
SB
462 *
463 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
464 * offline CPU. Skip this check if set to 'false'.
465 *
466 * Flush any pending smp-call-function callbacks queued on this CPU. This is
467 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
468 * to ensure that all pending IPI callbacks are run before it goes completely
469 * offline.
470 *
471 * Loop through the call_single_queue and run all the queued callbacks.
472 * Must be called with interrupts disabled.
473 */
16bf5a5e 474static void __flush_smp_call_function_queue(bool warn_cpu_offline)
8d056c48 475{
966a9671 476 call_single_data_t *csd, *csd_next;
52103be0
PZ
477 struct llist_node *entry, *prev;
478 struct llist_head *head;
a219ccf4 479 static bool warned;
0d3a00b3 480 atomic_t *tbt;
a219ccf4 481
83efcbd0 482 lockdep_assert_irqs_disabled();
8d056c48 483
0d3a00b3
IK
484 /* Allow waiters to send backtrace NMI from here onwards */
485 tbt = this_cpu_ptr(&trigger_backtrace);
486 atomic_set_release(tbt, 1);
487
bb964a92 488 head = this_cpu_ptr(&call_single_queue);
8d056c48 489 entry = llist_del_all(head);
a219ccf4 490 entry = llist_reverse_order(entry);
3d442233 491
8d056c48
SB
492 /* There shouldn't be any pending callbacks on an offline CPU. */
493 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
9e949a38 494 !warned && entry != NULL)) {
a219ccf4
SB
495 warned = true;
496 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
497
498 /*
499 * We don't have to use the _safe() variant here
500 * because we are not invoking the IPI handlers yet.
501 */
545b8c8d 502 llist_for_each_entry(csd, entry, node.llist) {
4b44a21d
PZ
503 switch (CSD_TYPE(csd)) {
504 case CSD_TYPE_ASYNC:
505 case CSD_TYPE_SYNC:
506 case CSD_TYPE_IRQ_WORK:
507 pr_warn("IPI callback %pS sent to offline CPU\n",
508 csd->func);
509 break;
510
a1488664
PZ
511 case CSD_TYPE_TTWU:
512 pr_warn("IPI task-wakeup sent to offline CPU\n");
513 break;
514
4b44a21d
PZ
515 default:
516 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
517 CSD_TYPE(csd));
518 break;
519 }
520 }
a219ccf4 521 }
3d442233 522
52103be0
PZ
523 /*
524 * First; run all SYNC callbacks, people are waiting for us.
525 */
526 prev = NULL;
545b8c8d 527 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
8053871d 528 /* Do we wait until *after* callback? */
4b44a21d
PZ
529 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
530 smp_call_func_t func = csd->func;
531 void *info = csd->info;
532
52103be0 533 if (prev) {
545b8c8d 534 prev->next = &csd_next->node.llist;
52103be0 535 } else {
545b8c8d 536 entry = &csd_next->node.llist;
52103be0 537 }
4b44a21d 538
35feb604 539 csd_lock_record(csd);
949fa3f1 540 csd_do_func(func, info, csd);
8053871d 541 csd_unlock(csd);
35feb604 542 csd_lock_record(NULL);
8053871d 543 } else {
545b8c8d 544 prev = &csd->node.llist;
8053871d 545 }
3d442233 546 }
47885016 547
1771257c 548 if (!entry)
a1488664
PZ
549 return;
550
47885016 551 /*
52103be0 552 * Second; run all !SYNC callbacks.
47885016 553 */
a1488664 554 prev = NULL;
545b8c8d 555 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
4b44a21d 556 int type = CSD_TYPE(csd);
52103be0 557
a1488664
PZ
558 if (type != CSD_TYPE_TTWU) {
559 if (prev) {
545b8c8d 560 prev->next = &csd_next->node.llist;
a1488664 561 } else {
545b8c8d 562 entry = &csd_next->node.llist;
a1488664 563 }
4b44a21d 564
a1488664
PZ
565 if (type == CSD_TYPE_ASYNC) {
566 smp_call_func_t func = csd->func;
567 void *info = csd->info;
568
35feb604 569 csd_lock_record(csd);
a1488664 570 csd_unlock(csd);
949fa3f1 571 csd_do_func(func, info, csd);
35feb604 572 csd_lock_record(NULL);
a1488664
PZ
573 } else if (type == CSD_TYPE_IRQ_WORK) {
574 irq_work_single(csd);
575 }
576
577 } else {
545b8c8d 578 prev = &csd->node.llist;
4b44a21d 579 }
52103be0 580 }
a1488664
PZ
581
582 /*
583 * Third; only CSD_TYPE_TTWU is left, issue those.
584 */
949fa3f1
LB
585 if (entry) {
586 csd = llist_entry(entry, typeof(*csd), node.llist);
587 csd_do_func(sched_ttwu_pending, entry, csd);
588 }
3d442233
JA
589}
590
16bf5a5e
TG
591
592/**
593 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
594 * from task context (idle, migration thread)
595 *
596 * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
597 * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
598 * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
599 * handle queued SMP function calls before scheduling.
600 *
601 * The migration thread has to ensure that an eventually pending wakeup has
602 * been handled before it migrates a task.
603 */
604void flush_smp_call_function_queue(void)
b2a02fc4 605{
1a90bfd2 606 unsigned int was_pending;
b2a02fc4
PZ
607 unsigned long flags;
608
609 if (llist_empty(this_cpu_ptr(&call_single_queue)))
610 return;
611
612 local_irq_save(flags);
1a90bfd2
SAS
613 /* Get the already pending soft interrupts for RT enabled kernels */
614 was_pending = local_softirq_pending();
16bf5a5e 615 __flush_smp_call_function_queue(true);
f9d34595 616 if (local_softirq_pending())
1a90bfd2 617 do_softirq_post_smp_call_flush(was_pending);
f9d34595 618
b2a02fc4 619 local_irq_restore(flags);
3d442233
JA
620}
621
622/*
623 * smp_call_function_single - Run a function on a specific CPU
624 * @func: The function to run. This must be fast and non-blocking.
625 * @info: An arbitrary pointer to pass to the function.
3d442233
JA
626 * @wait: If true, wait until function has completed on other CPUs.
627 *
72f279b2 628 * Returns 0 on success, else a negative status code.
3d442233 629 */
3a5f65df 630int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
8691e5a8 631 int wait)
3d442233 632{
966a9671
YH
633 call_single_data_t *csd;
634 call_single_data_t csd_stack = {
545b8c8d 635 .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
966a9671 636 };
0b13fda1 637 int this_cpu;
8b28499a 638 int err;
3d442233 639
0b13fda1
IM
640 /*
641 * prevent preemption and reschedule on another processor,
642 * as well as CPU removal
643 */
644 this_cpu = get_cpu();
645
269c861b
SS
646 /*
647 * Can deadlock when called with interrupts disabled.
648 * We allow cpu's that are not yet online though, as no one else can
649 * send smp call function interrupt to this cpu and as such deadlocks
650 * can't happen.
651 */
652 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
653 && !oops_in_progress);
3d442233 654
19dbdcb8
PZ
655 /*
656 * When @wait we can deadlock when we interrupt between llist_add() and
657 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
658 * csd_lock() on because the interrupt context uses the same csd
659 * storage.
660 */
661 WARN_ON_ONCE(!in_task());
662
8053871d
LT
663 csd = &csd_stack;
664 if (!wait) {
665 csd = this_cpu_ptr(&csd_data);
666 csd_lock(csd);
667 }
668
4b44a21d
PZ
669 csd->func = func;
670 csd->info = info;
35feb604 671#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
545b8c8d
PZ
672 csd->node.src = smp_processor_id();
673 csd->node.dst = cpu;
e48c15b7 674#endif
4b44a21d
PZ
675
676 err = generic_exec_single(cpu, csd);
8053871d
LT
677
678 if (wait)
679 csd_lock_wait(csd);
3d442233
JA
680
681 put_cpu();
0b13fda1 682
f73be6de 683 return err;
3d442233
JA
684}
685EXPORT_SYMBOL(smp_call_function_single);
686
d7877c03 687/**
49b3bd21 688 * smp_call_function_single_async() - Run an asynchronous function on a
c46fff2a 689 * specific CPU.
d7877c03
FW
690 * @cpu: The CPU to run on.
691 * @csd: Pre-allocated and setup data structure
d7877c03 692 *
c46fff2a
FW
693 * Like smp_call_function_single(), but the call is asynchonous and
694 * can thus be done from contexts with disabled interrupts.
695 *
696 * The caller passes his own pre-allocated data structure
697 * (ie: embedded in an object) and is responsible for synchronizing it
698 * such that the IPIs performed on the @csd are strictly serialized.
699 *
5a18ceca
PX
700 * If the function is called with one csd which has not yet been
701 * processed by previous call to smp_call_function_single_async(), the
702 * function will return immediately with -EBUSY showing that the csd
703 * object is still in progress.
704 *
c46fff2a
FW
705 * NOTE: Be careful, there is unfortunately no current debugging facility to
706 * validate the correctness of this serialization.
49b3bd21
RD
707 *
708 * Return: %0 on success or negative errno value on error
d7877c03 709 */
d090ec0d 710int smp_call_function_single_async(int cpu, call_single_data_t *csd)
d7877c03
FW
711{
712 int err = 0;
d7877c03 713
fce8ad15 714 preempt_disable();
8053871d 715
545b8c8d 716 if (csd->node.u_flags & CSD_FLAG_LOCK) {
5a18ceca
PX
717 err = -EBUSY;
718 goto out;
719 }
8053871d 720
545b8c8d 721 csd->node.u_flags = CSD_FLAG_LOCK;
8053871d
LT
722 smp_wmb();
723
4b44a21d 724 err = generic_exec_single(cpu, csd);
5a18ceca
PX
725
726out:
fce8ad15 727 preempt_enable();
d7877c03
FW
728
729 return err;
730}
c46fff2a 731EXPORT_SYMBOL_GPL(smp_call_function_single_async);
d7877c03 732
2ea6dec4
RR
733/*
734 * smp_call_function_any - Run a function on any of the given cpus
735 * @mask: The mask of cpus it can run on.
736 * @func: The function to run. This must be fast and non-blocking.
737 * @info: An arbitrary pointer to pass to the function.
738 * @wait: If true, wait until function has completed.
739 *
740 * Returns 0 on success, else a negative status code (if no cpus were online).
2ea6dec4
RR
741 *
742 * Selection preference:
743 * 1) current cpu if in @mask
744 * 2) any cpu of current node if in @mask
745 * 3) any other online cpu in @mask
746 */
747int smp_call_function_any(const struct cpumask *mask,
3a5f65df 748 smp_call_func_t func, void *info, int wait)
2ea6dec4
RR
749{
750 unsigned int cpu;
751 const struct cpumask *nodemask;
752 int ret;
753
754 /* Try for same CPU (cheapest) */
755 cpu = get_cpu();
756 if (cpumask_test_cpu(cpu, mask))
757 goto call;
758
759 /* Try for same node. */
af2422c4 760 nodemask = cpumask_of_node(cpu_to_node(cpu));
2ea6dec4
RR
761 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
762 cpu = cpumask_next_and(cpu, nodemask, mask)) {
763 if (cpu_online(cpu))
764 goto call;
765 }
766
767 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
768 cpu = cpumask_any_and(mask, cpu_online_mask);
769call:
770 ret = smp_call_function_single(cpu, func, info, wait);
771 put_cpu();
772 return ret;
773}
774EXPORT_SYMBOL_GPL(smp_call_function_any);
775
a32a4d8a
NA
776/*
777 * Flags to be used as scf_flags argument of smp_call_function_many_cond().
778 *
779 * %SCF_WAIT: Wait until function execution is completed
780 * %SCF_RUN_LOCAL: Run also locally if local cpu is set in cpumask
781 */
782#define SCF_WAIT (1U << 0)
783#define SCF_RUN_LOCAL (1U << 1)
784
67719ef2
SAS
785static void smp_call_function_many_cond(const struct cpumask *mask,
786 smp_call_func_t func, void *info,
a32a4d8a
NA
787 unsigned int scf_flags,
788 smp_cond_func_t cond_func)
3d442233 789{
a32a4d8a 790 int cpu, last_cpu, this_cpu = smp_processor_id();
e1d12f32 791 struct call_function_data *cfd;
a32a4d8a 792 bool wait = scf_flags & SCF_WAIT;
bf5a8c26 793 int nr_cpus = 0;
a32a4d8a
NA
794 bool run_remote = false;
795 bool run_local = false;
a32a4d8a
NA
796
797 lockdep_assert_preemption_disabled();
3d442233 798
269c861b
SS
799 /*
800 * Can deadlock when called with interrupts disabled.
801 * We allow cpu's that are not yet online though, as no one else can
802 * send smp call function interrupt to this cpu and as such deadlocks
803 * can't happen.
804 */
a32a4d8a
NA
805 if (cpu_online(this_cpu) && !oops_in_progress &&
806 !early_boot_irqs_disabled)
807 lockdep_assert_irqs_enabled();
3d442233 808
19dbdcb8
PZ
809 /*
810 * When @wait we can deadlock when we interrupt between llist_add() and
811 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
812 * csd_lock() on because the interrupt context uses the same csd
813 * storage.
814 */
815 WARN_ON_ONCE(!in_task());
816
a32a4d8a 817 /* Check if we need local execution. */
63a48181
MD
818 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) &&
819 (!cond_func || cond_func(this_cpu, info)))
a32a4d8a
NA
820 run_local = true;
821
822 /* Check if we need remote execution, i.e., any CPU excluding this one. */
54b11e6d 823 cpu = cpumask_first_and(mask, cpu_online_mask);
0b13fda1 824 if (cpu == this_cpu)
54b11e6d 825 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
a32a4d8a
NA
826 if (cpu < nr_cpu_ids)
827 run_remote = true;
0b13fda1 828
a32a4d8a
NA
829 if (run_remote) {
830 cfd = this_cpu_ptr(&cfd_data);
831 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
832 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
45a57919 833
a32a4d8a
NA
834 cpumask_clear(cfd->cpumask_ipi);
835 for_each_cpu(cpu, cfd->cpumask) {
6366d062 836 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
9a46ad6d 837
5c312497
PZ
838 if (cond_func && !cond_func(cpu, info)) {
839 __cpumask_clear_cpu(cpu, cfd->cpumask);
a32a4d8a 840 continue;
5c312497 841 }
67719ef2 842
a32a4d8a
NA
843 csd_lock(csd);
844 if (wait)
845 csd->node.u_flags |= CSD_TYPE_SYNC;
846 csd->func = func;
847 csd->info = info;
35feb604 848#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
a32a4d8a
NA
849 csd->node.src = smp_processor_id();
850 csd->node.dst = cpu;
e48c15b7 851#endif
bf5a8c26
LB
852 trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
853
a32a4d8a
NA
854 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
855 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
856 nr_cpus++;
857 last_cpu = cpu;
858 }
859 }
860
861 /*
862 * Choose the most efficient way to send an IPI. Note that the
863 * number of CPUs might be zero due to concurrent changes to the
864 * provided mask.
865 */
866 if (nr_cpus == 1)
5c312497 867 send_call_function_single_ipi(last_cpu);
a32a4d8a 868 else if (likely(nr_cpus > 1))
5c312497 869 send_call_function_ipi_mask(cfd->cpumask_ipi);
9a46ad6d 870 }
561920a0 871
63a48181 872 if (run_local) {
a32a4d8a
NA
873 unsigned long flags;
874
875 local_irq_save(flags);
949fa3f1 876 csd_do_func(func, info, NULL);
a32a4d8a
NA
877 local_irq_restore(flags);
878 }
3d442233 879
a32a4d8a 880 if (run_remote && wait) {
e1d12f32 881 for_each_cpu(cpu, cfd->cpumask) {
966a9671 882 call_single_data_t *csd;
e1d12f32 883
6366d062 884 csd = per_cpu_ptr(cfd->csd, cpu);
9a46ad6d
SL
885 csd_lock_wait(csd);
886 }
887 }
3d442233 888}
67719ef2
SAS
889
890/**
a32a4d8a 891 * smp_call_function_many(): Run a function on a set of CPUs.
67719ef2
SAS
892 * @mask: The set of cpus to run on (only runs on online subset).
893 * @func: The function to run. This must be fast and non-blocking.
894 * @info: An arbitrary pointer to pass to the function.
49b3bd21 895 * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
a32a4d8a
NA
896 * (atomically) until function has completed on other CPUs. If
897 * %SCF_RUN_LOCAL is set, the function will also be run locally
898 * if the local CPU is set in the @cpumask.
67719ef2
SAS
899 *
900 * If @wait is true, then returns once @func has returned.
901 *
902 * You must not call this function with disabled interrupts or from a
903 * hardware interrupt handler or from a bottom half handler. Preemption
904 * must be disabled when calling this function.
905 */
906void smp_call_function_many(const struct cpumask *mask,
907 smp_call_func_t func, void *info, bool wait)
908{
a32a4d8a 909 smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
67719ef2 910}
54b11e6d 911EXPORT_SYMBOL(smp_call_function_many);
3d442233
JA
912
913/**
914 * smp_call_function(): Run a function on all other CPUs.
915 * @func: The function to run. This must be fast and non-blocking.
916 * @info: An arbitrary pointer to pass to the function.
0b13fda1
IM
917 * @wait: If true, wait (atomically) until function has completed
918 * on other CPUs.
3d442233 919 *
54b11e6d 920 * Returns 0.
3d442233
JA
921 *
922 * If @wait is true, then returns once @func has returned; otherwise
72f279b2 923 * it returns just before the target cpu calls @func.
3d442233
JA
924 *
925 * You must not call this function with disabled interrupts or from a
926 * hardware interrupt handler or from a bottom half handler.
927 */
caa75932 928void smp_call_function(smp_call_func_t func, void *info, int wait)
3d442233 929{
3d442233 930 preempt_disable();
54b11e6d 931 smp_call_function_many(cpu_online_mask, func, info, wait);
3d442233 932 preempt_enable();
3d442233
JA
933}
934EXPORT_SYMBOL(smp_call_function);
351f8f8e 935
34db18a0
AW
936/* Setup configured maximum number of CPUs to activate */
937unsigned int setup_max_cpus = NR_CPUS;
938EXPORT_SYMBOL(setup_max_cpus);
939
940
941/*
942 * Setup routine for controlling SMP activation
943 *
944 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
945 * activation entirely (the MPS table probe still happens, though).
946 *
947 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
948 * greater than 0, limits the maximum number of CPUs activated in
949 * SMP mode to <NUM>.
950 */
951
ba831b7b 952void __weak __init arch_disable_smp_support(void) { }
34db18a0
AW
953
954static int __init nosmp(char *str)
955{
956 setup_max_cpus = 0;
957 arch_disable_smp_support();
958
959 return 0;
960}
961
962early_param("nosmp", nosmp);
963
964/* this is hard limit */
965static int __init nrcpus(char *str)
966{
967 int nr_cpus;
968
58934356 969 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
38bef8e5 970 set_nr_cpu_ids(nr_cpus);
34db18a0
AW
971
972 return 0;
973}
974
975early_param("nr_cpus", nrcpus);
976
977static int __init maxcpus(char *str)
978{
979 get_option(&str, &setup_max_cpus);
980 if (setup_max_cpus == 0)
981 arch_disable_smp_support();
982
983 return 0;
984}
985
986early_param("maxcpus", maxcpus);
987
6f9c07be 988#if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS)
34db18a0 989/* Setup number of possible processor ids */
9b130ad5 990unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
34db18a0 991EXPORT_SYMBOL(nr_cpu_ids);
53fc190c 992#endif
34db18a0
AW
993
994/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
995void __init setup_nr_cpu_ids(void)
996{
38bef8e5 997 set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
34db18a0
AW
998}
999
1000/* Called by boot processor to activate the rest. */
1001void __init smp_init(void)
1002{
92b23278 1003 int num_nodes, num_cpus;
34db18a0 1004
3bb5d2ee 1005 idle_threads_init();
4cb28ced 1006 cpuhp_threads_init();
3bb5d2ee 1007
51111dce
ME
1008 pr_info("Bringing up secondary CPUs ...\n");
1009
b99a2659 1010 bringup_nonboot_cpus(setup_max_cpus);
34db18a0 1011
92b23278
ME
1012 num_nodes = num_online_nodes();
1013 num_cpus = num_online_cpus();
1014 pr_info("Brought up %d node%s, %d CPU%s\n",
c4df1593 1015 num_nodes, str_plural(num_nodes), num_cpus, str_plural(num_cpus));
92b23278 1016
34db18a0 1017 /* Any cleanup work */
34db18a0
AW
1018 smp_cpus_done(setup_max_cpus);
1019}
1020
b3a7e98e
GBY
1021/*
1022 * on_each_cpu_cond(): Call a function on each processor for which
1023 * the supplied function cond_func returns true, optionally waiting
1024 * for all the required CPUs to finish. This may include the local
1025 * processor.
1026 * @cond_func: A callback function that is passed a cpu id and
7b7b8a2c 1027 * the info parameter. The function is called
b3a7e98e
GBY
1028 * with preemption disabled. The function should
1029 * return a blooean value indicating whether to IPI
1030 * the specified CPU.
1031 * @func: The function to run on all applicable CPUs.
1032 * This must be fast and non-blocking.
1033 * @info: An arbitrary pointer to pass to both functions.
1034 * @wait: If true, wait (atomically) until function has
1035 * completed on other CPUs.
b3a7e98e
GBY
1036 *
1037 * Preemption is disabled to protect against CPUs going offline but not online.
1038 * CPUs going online during the call will not be seen or sent an IPI.
1039 *
1040 * You must not call this function with disabled interrupts or
1041 * from a hardware interrupt handler or from a bottom half handler.
1042 */
5671d814 1043void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
cb923159 1044 void *info, bool wait, const struct cpumask *mask)
b3a7e98e 1045{
a32a4d8a 1046 unsigned int scf_flags = SCF_RUN_LOCAL;
67719ef2 1047
a32a4d8a
NA
1048 if (wait)
1049 scf_flags |= SCF_WAIT;
67719ef2 1050
a32a4d8a
NA
1051 preempt_disable();
1052 smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
1053 preempt_enable();
b3a7e98e 1054}
7d49b28a
RR
1055EXPORT_SYMBOL(on_each_cpu_cond_mask);
1056
f37f435f
TG
1057static void do_nothing(void *unused)
1058{
1059}
1060
1061/**
1062 * kick_all_cpus_sync - Force all cpus out of idle
1063 *
1064 * Used to synchronize the update of pm_idle function pointer. It's
1065 * called after the pointer is updated and returns after the dummy
1066 * callback function has been executed on all cpus. The execution of
1067 * the function can only happen on the remote cpus after they have
1068 * left the idle function which had been called via pm_idle function
1069 * pointer. So it's guaranteed that nothing uses the previous pointer
1070 * anymore.
1071 */
1072void kick_all_cpus_sync(void)
1073{
1074 /* Make sure the change is visible before we kick the cpus */
1075 smp_mb();
1076 smp_call_function(do_nothing, NULL, 1);
1077}
1078EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
c6f4459f
CL
1079
1080/**
1081 * wake_up_all_idle_cpus - break all cpus out of idle
1082 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
1083 * including idle polling cpus, for non-idle cpus, we will do nothing
1084 * for them.
1085 */
1086void wake_up_all_idle_cpus(void)
1087{
1088 int cpu;
1089
96611c26
PZ
1090 for_each_possible_cpu(cpu) {
1091 preempt_disable();
1092 if (cpu != smp_processor_id() && cpu_online(cpu))
1093 wake_up_if_idle(cpu);
1094 preempt_enable();
c6f4459f 1095 }
c6f4459f
CL
1096}
1097EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
df8ce9d7
JG
1098
1099/**
49b3bd21
RD
1100 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1101 * @work: &work_struct
1102 * @done: &completion to signal
1103 * @func: function to call
1104 * @data: function's data argument
1105 * @ret: return value from @func
1106 * @cpu: target CPU (%-1 for any CPU)
df8ce9d7
JG
1107 *
1108 * Used to call a function on a specific cpu and wait for it to return.
1109 * Optionally make sure the call is done on a specified physical cpu via vcpu
1110 * pinning in order to support virtualized environments.
1111 */
1112struct smp_call_on_cpu_struct {
1113 struct work_struct work;
1114 struct completion done;
1115 int (*func)(void *);
1116 void *data;
1117 int ret;
1118 int cpu;
1119};
1120
1121static void smp_call_on_cpu_callback(struct work_struct *work)
1122{
1123 struct smp_call_on_cpu_struct *sscs;
1124
1125 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1126 if (sscs->cpu >= 0)
1127 hypervisor_pin_vcpu(sscs->cpu);
1128 sscs->ret = sscs->func(sscs->data);
1129 if (sscs->cpu >= 0)
1130 hypervisor_pin_vcpu(-1);
1131
1132 complete(&sscs->done);
1133}
1134
1135int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1136{
1137 struct smp_call_on_cpu_struct sscs = {
df8ce9d7
JG
1138 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1139 .func = func,
1140 .data = par,
1141 .cpu = phys ? cpu : -1,
1142 };
1143
8db54949
PZ
1144 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1145
df8ce9d7
JG
1146 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1147 return -ENXIO;
1148
1149 queue_work_on(cpu, system_wq, &sscs.work);
1150 wait_for_completion(&sscs.done);
77aeb1b6 1151 destroy_work_on_stack(&sscs.work);
df8ce9d7
JG
1152
1153 return sscs.ret;
1154}
1155EXPORT_SYMBOL_GPL(smp_call_on_cpu);