io_uring: add io_req_local_work_add wake fast path
[linux-block.git] / kernel / smp.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
3d442233
JA
2/*
3 * Generic helpers for smp ipi calls
4 *
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
3d442233 6 */
ca7dfdbb
ME
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
47885016 10#include <linux/irq_work.h>
3d442233 11#include <linux/rcupdate.h>
59190f42 12#include <linux/rculist.h>
641cd4cf 13#include <linux/kernel.h>
9984de1a 14#include <linux/export.h>
0b13fda1
IM
15#include <linux/percpu.h>
16#include <linux/init.h>
f9d34595 17#include <linux/interrupt.h>
5a0e3ad6 18#include <linux/gfp.h>
3d442233 19#include <linux/smp.h>
8969a5ed 20#include <linux/cpu.h>
c6f4459f 21#include <linux/sched.h>
4c822698 22#include <linux/sched/idle.h>
47ae4b05 23#include <linux/hypervisor.h>
35feb604
PM
24#include <linux/sched/clock.h>
25#include <linux/nmi.h>
26#include <linux/sched/debug.h>
8d0968cc 27#include <linux/jump_label.h>
3d442233 28
3bb5d2ee 29#include "smpboot.h"
1f8db415 30#include "sched/smp.h"
3bb5d2ee 31
545b8c8d 32#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
3d442233 33
a5aabace
JG
34#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
35union cfd_seq_cnt {
36 u64 val;
37 struct {
38 u64 src:16;
39 u64 dst:16;
40#define CFD_SEQ_NOCPU 0xffff
41 u64 type:4;
42#define CFD_SEQ_QUEUE 0
43#define CFD_SEQ_IPI 1
44#define CFD_SEQ_NOIPI 2
45#define CFD_SEQ_PING 3
46#define CFD_SEQ_PINGED 4
47#define CFD_SEQ_HANDLE 5
48#define CFD_SEQ_DEQUEUE 6
49#define CFD_SEQ_IDLE 7
50#define CFD_SEQ_GOTIPI 8
51#define CFD_SEQ_HDLEND 9
52 u64 cnt:28;
53 } u;
54};
55
56static char *seq_type[] = {
57 [CFD_SEQ_QUEUE] = "queue",
58 [CFD_SEQ_IPI] = "ipi",
59 [CFD_SEQ_NOIPI] = "noipi",
60 [CFD_SEQ_PING] = "ping",
61 [CFD_SEQ_PINGED] = "pinged",
62 [CFD_SEQ_HANDLE] = "handle",
63 [CFD_SEQ_DEQUEUE] = "dequeue (src CPU 0 == empty)",
64 [CFD_SEQ_IDLE] = "idle",
65 [CFD_SEQ_GOTIPI] = "gotipi",
66 [CFD_SEQ_HDLEND] = "hdlend (src CPU 0 == early)",
67};
68
69struct cfd_seq_local {
70 u64 ping;
71 u64 pinged;
72 u64 handle;
73 u64 dequeue;
74 u64 idle;
75 u64 gotipi;
76 u64 hdlend;
77};
78#endif
79
de7b09ef
JG
80struct cfd_percpu {
81 call_single_data_t csd;
a5aabace
JG
82#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
83 u64 seq_queue;
84 u64 seq_ipi;
85 u64 seq_noipi;
86#endif
de7b09ef
JG
87};
88
3d442233 89struct call_function_data {
de7b09ef 90 struct cfd_percpu __percpu *pcpu;
0b13fda1 91 cpumask_var_t cpumask;
3fc5b3b6 92 cpumask_var_t cpumask_ipi;
3d442233
JA
93};
94
a22793c7 95static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
e03bcb68 96
6897fc22 97static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
8969a5ed 98
16bf5a5e 99static void __flush_smp_call_function_queue(bool warn_cpu_offline);
8d056c48 100
31487f83 101int smpcfd_prepare_cpu(unsigned int cpu)
8969a5ed 102{
8969a5ed
PZ
103 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
104
31487f83
RW
105 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
106 cpu_to_node(cpu)))
107 return -ENOMEM;
3fc5b3b6
AL
108 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
109 cpu_to_node(cpu))) {
110 free_cpumask_var(cfd->cpumask);
111 return -ENOMEM;
112 }
de7b09ef
JG
113 cfd->pcpu = alloc_percpu(struct cfd_percpu);
114 if (!cfd->pcpu) {
8969a5ed 115 free_cpumask_var(cfd->cpumask);
3fc5b3b6 116 free_cpumask_var(cfd->cpumask_ipi);
31487f83
RW
117 return -ENOMEM;
118 }
119
120 return 0;
8969a5ed
PZ
121}
122
31487f83
RW
123int smpcfd_dead_cpu(unsigned int cpu)
124{
125 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
126
127 free_cpumask_var(cfd->cpumask);
3fc5b3b6 128 free_cpumask_var(cfd->cpumask_ipi);
de7b09ef 129 free_percpu(cfd->pcpu);
31487f83
RW
130 return 0;
131}
132
133int smpcfd_dying_cpu(unsigned int cpu)
134{
135 /*
136 * The IPIs for the smp-call-function callbacks queued by other
137 * CPUs might arrive late, either due to hardware latencies or
138 * because this CPU disabled interrupts (inside stop-machine)
139 * before the IPIs were sent. So flush out any pending callbacks
140 * explicitly (without waiting for the IPIs to arrive), to
141 * ensure that the outgoing CPU doesn't go offline with work
142 * still pending.
143 */
16bf5a5e 144 __flush_smp_call_function_queue(false);
afaa653c 145 irq_work_run();
31487f83
RW
146 return 0;
147}
8969a5ed 148
d8ad7d11 149void __init call_function_init(void)
3d442233
JA
150{
151 int i;
152
6897fc22
CH
153 for_each_possible_cpu(i)
154 init_llist_head(&per_cpu(call_single_queue, i));
8969a5ed 155
31487f83 156 smpcfd_prepare_cpu(smp_processor_id());
3d442233
JA
157}
158
35feb604
PM
159#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
160
8d0968cc 161static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled);
a5aabace 162static DEFINE_STATIC_KEY_FALSE(csdlock_debug_extended);
8d0968cc
JG
163
164static int __init csdlock_debug(char *str)
165{
166 unsigned int val = 0;
167
a5aabace
JG
168 if (str && !strcmp(str, "ext")) {
169 val = 1;
170 static_branch_enable(&csdlock_debug_extended);
171 } else
172 get_option(&str, &val);
173
8d0968cc
JG
174 if (val)
175 static_branch_enable(&csdlock_debug_enabled);
176
9c9b26b0 177 return 1;
8d0968cc 178}
9c9b26b0 179__setup("csdlock_debug=", csdlock_debug);
8d0968cc 180
35feb604
PM
181static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
182static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
183static DEFINE_PER_CPU(void *, cur_csd_info);
a5aabace 184static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local);
35feb604 185
3791a223
PM
186static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
187module_param(csd_lock_timeout, ulong, 0444);
188
2b722160 189static atomic_t csd_bug_count = ATOMIC_INIT(0);
a5aabace
JG
190static u64 cfd_seq;
191
192#define CFD_SEQ(s, d, t, c) \
193 (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c }
194
195static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type)
196{
197 union cfd_seq_cnt new, old;
198
199 new = CFD_SEQ(src, dst, type, 0);
200
201 do {
202 old.val = READ_ONCE(cfd_seq);
203 new.u.cnt = old.u.cnt + 1;
204 } while (cmpxchg(&cfd_seq, old.val, new.val) != old.val);
205
206 return old.val;
207}
208
209#define cfd_seq_store(var, src, dst, type) \
210 do { \
211 if (static_branch_unlikely(&csdlock_debug_extended)) \
212 var = cfd_seq_inc(src, dst, type); \
213 } while (0)
35feb604
PM
214
215/* Record current CSD work for current CPU, NULL to erase. */
1139aeb1 216static void __csd_lock_record(struct __call_single_data *csd)
35feb604
PM
217{
218 if (!csd) {
219 smp_mb(); /* NULL cur_csd after unlock. */
220 __this_cpu_write(cur_csd, NULL);
221 return;
222 }
223 __this_cpu_write(cur_csd_func, csd->func);
224 __this_cpu_write(cur_csd_info, csd->info);
225 smp_wmb(); /* func and info before csd. */
226 __this_cpu_write(cur_csd, csd);
227 smp_mb(); /* Update cur_csd before function call. */
228 /* Or before unlock, as the case may be. */
229}
230
1139aeb1 231static __always_inline void csd_lock_record(struct __call_single_data *csd)
8d0968cc
JG
232{
233 if (static_branch_unlikely(&csdlock_debug_enabled))
234 __csd_lock_record(csd);
235}
236
1139aeb1 237static int csd_lock_wait_getcpu(struct __call_single_data *csd)
35feb604
PM
238{
239 unsigned int csd_type;
240
241 csd_type = CSD_TYPE(csd);
242 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
a787bdaf 243 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
35feb604
PM
244 return -1;
245}
246
a5aabace
JG
247static void cfd_seq_data_add(u64 val, unsigned int src, unsigned int dst,
248 unsigned int type, union cfd_seq_cnt *data,
249 unsigned int *n_data, unsigned int now)
250{
251 union cfd_seq_cnt new[2];
252 unsigned int i, j, k;
253
254 new[0].val = val;
255 new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1);
256
257 for (i = 0; i < 2; i++) {
258 if (new[i].u.cnt <= now)
259 new[i].u.cnt |= 0x80000000U;
260 for (j = 0; j < *n_data; j++) {
261 if (new[i].u.cnt == data[j].u.cnt) {
262 /* Direct read value trumps generated one. */
263 if (i == 0)
264 data[j].val = new[i].val;
265 break;
266 }
267 if (new[i].u.cnt < data[j].u.cnt) {
268 for (k = *n_data; k > j; k--)
269 data[k].val = data[k - 1].val;
270 data[j].val = new[i].val;
271 (*n_data)++;
272 break;
273 }
274 }
275 if (j == *n_data) {
276 data[j].val = new[i].val;
277 (*n_data)++;
278 }
279 }
280}
281
282static const char *csd_lock_get_type(unsigned int type)
283{
284 return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type];
285}
286
1139aeb1 287static void csd_lock_print_extended(struct __call_single_data *csd, int cpu)
a5aabace
JG
288{
289 struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu);
290 unsigned int srccpu = csd->node.src;
291 struct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu);
292 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
293 unsigned int now;
294 union cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)];
295 unsigned int n_data = 0, i;
296
297 data[0].val = READ_ONCE(cfd_seq);
298 now = data[0].u.cnt;
299
300 cfd_seq_data_add(pcpu->seq_queue, srccpu, cpu, CFD_SEQ_QUEUE, data, &n_data, now);
301 cfd_seq_data_add(pcpu->seq_ipi, srccpu, cpu, CFD_SEQ_IPI, data, &n_data, now);
302 cfd_seq_data_add(pcpu->seq_noipi, srccpu, cpu, CFD_SEQ_NOIPI, data, &n_data, now);
303
304 cfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING, data, &n_data, now);
305 cfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now);
306
307 cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now);
308 cfd_seq_data_add(seq->gotipi, CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI, data, &n_data, now);
309 cfd_seq_data_add(seq->handle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE, data, &n_data, now);
310 cfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now);
311 cfd_seq_data_add(seq->hdlend, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND, data, &n_data, now);
312
313 for (i = 0; i < n_data; i++) {
314 pr_alert("\tcsd: cnt(%07x): %04x->%04x %s\n",
315 data[i].u.cnt & ~0x80000000U, data[i].u.src,
316 data[i].u.dst, csd_lock_get_type(data[i].u.type));
317 }
318 pr_alert("\tcsd: cnt now: %07x\n", now);
319}
320
35feb604
PM
321/*
322 * Complain if too much time spent waiting. Note that only
323 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
324 * so waiting on other types gets much less information.
325 */
1139aeb1 326static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
35feb604
PM
327{
328 int cpu = -1;
329 int cpux;
330 bool firsttime;
331 u64 ts2, ts_delta;
332 call_single_data_t *cpu_cur_csd;
545b8c8d 333 unsigned int flags = READ_ONCE(csd->node.u_flags);
3791a223 334 unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC;
35feb604
PM
335
336 if (!(flags & CSD_FLAG_LOCK)) {
337 if (!unlikely(*bug_id))
338 return true;
339 cpu = csd_lock_wait_getcpu(csd);
340 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
341 *bug_id, raw_smp_processor_id(), cpu);
342 return true;
343 }
344
345 ts2 = sched_clock();
346 ts_delta = ts2 - *ts1;
3791a223 347 if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
35feb604
PM
348 return false;
349
350 firsttime = !*bug_id;
351 if (firsttime)
352 *bug_id = atomic_inc_return(&csd_bug_count);
353 cpu = csd_lock_wait_getcpu(csd);
354 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
355 cpux = 0;
356 else
357 cpux = cpu;
358 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
359 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
360 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
361 cpu, csd->func, csd->info);
362 if (cpu_cur_csd && csd != cpu_cur_csd) {
363 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
364 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
365 READ_ONCE(per_cpu(cur_csd_info, cpux)));
366 } else {
367 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
368 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
369 }
370 if (cpu >= 0) {
a5aabace
JG
371 if (static_branch_unlikely(&csdlock_debug_extended))
372 csd_lock_print_extended(csd, cpu);
e73dfe30 373 dump_cpu_task(cpu);
35feb604
PM
374 if (!cpu_cur_csd) {
375 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
376 arch_send_call_function_single_ipi(cpu);
377 }
378 }
379 dump_stack();
380 *ts1 = ts2;
381
382 return false;
383}
384
8969a5ed
PZ
385/*
386 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
387 *
0b13fda1
IM
388 * For non-synchronous ipi calls the csd can still be in use by the
389 * previous function call. For multi-cpu calls its even more interesting
390 * as we'll have to ensure no other cpu is observing our csd.
8969a5ed 391 */
1139aeb1 392static void __csd_lock_wait(struct __call_single_data *csd)
35feb604
PM
393{
394 int bug_id = 0;
395 u64 ts0, ts1;
396
397 ts1 = ts0 = sched_clock();
398 for (;;) {
399 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
400 break;
401 cpu_relax();
402 }
403 smp_acquire__after_ctrl_dep();
404}
405
1139aeb1 406static __always_inline void csd_lock_wait(struct __call_single_data *csd)
8d0968cc
JG
407{
408 if (static_branch_unlikely(&csdlock_debug_enabled)) {
409 __csd_lock_wait(csd);
410 return;
411 }
412
413 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
414}
a5aabace
JG
415
416static void __smp_call_single_queue_debug(int cpu, struct llist_node *node)
417{
418 unsigned int this_cpu = smp_processor_id();
419 struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local);
420 struct call_function_data *cfd = this_cpu_ptr(&cfd_data);
421 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
422
423 cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
424 if (llist_add(node, &per_cpu(call_single_queue, cpu))) {
425 cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
426 cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING);
427 send_call_function_single_ipi(cpu);
428 cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED);
429 } else {
430 cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
431 }
432}
35feb604 433#else
a5aabace
JG
434#define cfd_seq_store(var, src, dst, type)
435
1139aeb1 436static void csd_lock_record(struct __call_single_data *csd)
35feb604
PM
437{
438}
439
1139aeb1 440static __always_inline void csd_lock_wait(struct __call_single_data *csd)
8969a5ed 441{
545b8c8d 442 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
6e275637 443}
35feb604 444#endif
6e275637 445
1139aeb1 446static __always_inline void csd_lock(struct __call_single_data *csd)
6e275637 447{
e1d12f32 448 csd_lock_wait(csd);
545b8c8d 449 csd->node.u_flags |= CSD_FLAG_LOCK;
8969a5ed
PZ
450
451 /*
0b13fda1
IM
452 * prevent CPU from reordering the above assignment
453 * to ->flags with any subsequent assignments to other
966a9671 454 * fields of the specified call_single_data_t structure:
8969a5ed 455 */
8053871d 456 smp_wmb();
8969a5ed
PZ
457}
458
1139aeb1 459static __always_inline void csd_unlock(struct __call_single_data *csd)
8969a5ed 460{
545b8c8d 461 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
0b13fda1 462
8969a5ed 463 /*
0b13fda1 464 * ensure we're all done before releasing data:
8969a5ed 465 */
545b8c8d 466 smp_store_release(&csd->node.u_flags, 0);
3d442233
JA
467}
468
966a9671 469static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
8b28499a 470
4b44a21d
PZ
471void __smp_call_single_queue(int cpu, struct llist_node *node)
472{
a5aabace
JG
473#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
474 if (static_branch_unlikely(&csdlock_debug_extended)) {
475 unsigned int type;
476
477 type = CSD_TYPE(container_of(node, call_single_data_t,
478 node.llist));
479 if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) {
480 __smp_call_single_queue_debug(cpu, node);
481 return;
482 }
483 }
484#endif
485
4b44a21d
PZ
486 /*
487 * The list addition should be visible before sending the IPI
488 * handler locks the list to pull the entry off it because of
489 * normal cache coherency rules implied by spinlocks.
490 *
491 * If IPIs can go out of order to the cache coherency protocol
492 * in an architecture, sufficient synchronisation should be added
493 * to arch code to make it appear to obey cache coherency WRT
494 * locking and barrier primitives. Generic code isn't really
495 * equipped to do the right thing...
496 */
497 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
498 send_call_function_single_ipi(cpu);
499}
500
3d442233 501/*
966a9671 502 * Insert a previously allocated call_single_data_t element
0b13fda1
IM
503 * for execution on the given CPU. data must already have
504 * ->func, ->info, and ->flags set.
3d442233 505 */
1139aeb1 506static int generic_exec_single(int cpu, struct __call_single_data *csd)
3d442233 507{
8b28499a 508 if (cpu == smp_processor_id()) {
4b44a21d
PZ
509 smp_call_func_t func = csd->func;
510 void *info = csd->info;
8053871d
LT
511 unsigned long flags;
512
513 /*
514 * We can unlock early even for the synchronous on-stack case,
515 * since we're doing this from the same CPU..
516 */
35feb604 517 csd_lock_record(csd);
8053871d 518 csd_unlock(csd);
8b28499a
FW
519 local_irq_save(flags);
520 func(info);
35feb604 521 csd_lock_record(NULL);
8b28499a
FW
522 local_irq_restore(flags);
523 return 0;
524 }
525
5224b961
LT
526 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
527 csd_unlock(csd);
8b28499a 528 return -ENXIO;
5224b961 529 }
8b28499a 530
545b8c8d 531 __smp_call_single_queue(cpu, &csd->node.llist);
3d442233 532
8b28499a 533 return 0;
3d442233
JA
534}
535
8d056c48
SB
536/**
537 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
538 *
539 * Invoked by arch to handle an IPI for call function single.
540 * Must be called with interrupts disabled.
3d442233
JA
541 */
542void generic_smp_call_function_single_interrupt(void)
543{
a5aabace
JG
544 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU,
545 smp_processor_id(), CFD_SEQ_GOTIPI);
16bf5a5e 546 __flush_smp_call_function_queue(true);
8d056c48
SB
547}
548
549/**
16bf5a5e 550 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
8d056c48
SB
551 *
552 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
553 * offline CPU. Skip this check if set to 'false'.
554 *
555 * Flush any pending smp-call-function callbacks queued on this CPU. This is
556 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
557 * to ensure that all pending IPI callbacks are run before it goes completely
558 * offline.
559 *
560 * Loop through the call_single_queue and run all the queued callbacks.
561 * Must be called with interrupts disabled.
562 */
16bf5a5e 563static void __flush_smp_call_function_queue(bool warn_cpu_offline)
8d056c48 564{
966a9671 565 call_single_data_t *csd, *csd_next;
52103be0
PZ
566 struct llist_node *entry, *prev;
567 struct llist_head *head;
a219ccf4
SB
568 static bool warned;
569
83efcbd0 570 lockdep_assert_irqs_disabled();
8d056c48 571
bb964a92 572 head = this_cpu_ptr(&call_single_queue);
a5aabace
JG
573 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->handle, CFD_SEQ_NOCPU,
574 smp_processor_id(), CFD_SEQ_HANDLE);
8d056c48 575 entry = llist_del_all(head);
a5aabace
JG
576 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->dequeue,
577 /* Special meaning of source cpu: 0 == queue empty */
578 entry ? CFD_SEQ_NOCPU : 0,
579 smp_processor_id(), CFD_SEQ_DEQUEUE);
a219ccf4 580 entry = llist_reverse_order(entry);
3d442233 581
8d056c48
SB
582 /* There shouldn't be any pending callbacks on an offline CPU. */
583 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
9e949a38 584 !warned && entry != NULL)) {
a219ccf4
SB
585 warned = true;
586 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
587
588 /*
589 * We don't have to use the _safe() variant here
590 * because we are not invoking the IPI handlers yet.
591 */
545b8c8d 592 llist_for_each_entry(csd, entry, node.llist) {
4b44a21d
PZ
593 switch (CSD_TYPE(csd)) {
594 case CSD_TYPE_ASYNC:
595 case CSD_TYPE_SYNC:
596 case CSD_TYPE_IRQ_WORK:
597 pr_warn("IPI callback %pS sent to offline CPU\n",
598 csd->func);
599 break;
600
a1488664
PZ
601 case CSD_TYPE_TTWU:
602 pr_warn("IPI task-wakeup sent to offline CPU\n");
603 break;
604
4b44a21d
PZ
605 default:
606 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
607 CSD_TYPE(csd));
608 break;
609 }
610 }
a219ccf4 611 }
3d442233 612
52103be0
PZ
613 /*
614 * First; run all SYNC callbacks, people are waiting for us.
615 */
616 prev = NULL;
545b8c8d 617 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
8053871d 618 /* Do we wait until *after* callback? */
4b44a21d
PZ
619 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
620 smp_call_func_t func = csd->func;
621 void *info = csd->info;
622
52103be0 623 if (prev) {
545b8c8d 624 prev->next = &csd_next->node.llist;
52103be0 625 } else {
545b8c8d 626 entry = &csd_next->node.llist;
52103be0 627 }
4b44a21d 628
35feb604 629 csd_lock_record(csd);
8053871d
LT
630 func(info);
631 csd_unlock(csd);
35feb604 632 csd_lock_record(NULL);
8053871d 633 } else {
545b8c8d 634 prev = &csd->node.llist;
8053871d 635 }
3d442233 636 }
47885016 637
a5aabace
JG
638 if (!entry) {
639 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend,
640 0, smp_processor_id(),
641 CFD_SEQ_HDLEND);
a1488664 642 return;
a5aabace 643 }
a1488664 644
47885016 645 /*
52103be0 646 * Second; run all !SYNC callbacks.
47885016 647 */
a1488664 648 prev = NULL;
545b8c8d 649 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
4b44a21d 650 int type = CSD_TYPE(csd);
52103be0 651
a1488664
PZ
652 if (type != CSD_TYPE_TTWU) {
653 if (prev) {
545b8c8d 654 prev->next = &csd_next->node.llist;
a1488664 655 } else {
545b8c8d 656 entry = &csd_next->node.llist;
a1488664 657 }
4b44a21d 658
a1488664
PZ
659 if (type == CSD_TYPE_ASYNC) {
660 smp_call_func_t func = csd->func;
661 void *info = csd->info;
662
35feb604 663 csd_lock_record(csd);
a1488664
PZ
664 csd_unlock(csd);
665 func(info);
35feb604 666 csd_lock_record(NULL);
a1488664
PZ
667 } else if (type == CSD_TYPE_IRQ_WORK) {
668 irq_work_single(csd);
669 }
670
671 } else {
545b8c8d 672 prev = &csd->node.llist;
4b44a21d 673 }
52103be0 674 }
a1488664
PZ
675
676 /*
677 * Third; only CSD_TYPE_TTWU is left, issue those.
678 */
679 if (entry)
680 sched_ttwu_pending(entry);
a5aabace
JG
681
682 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, CFD_SEQ_NOCPU,
683 smp_processor_id(), CFD_SEQ_HDLEND);
3d442233
JA
684}
685
16bf5a5e
TG
686
687/**
688 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
689 * from task context (idle, migration thread)
690 *
691 * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
692 * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
693 * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
694 * handle queued SMP function calls before scheduling.
695 *
696 * The migration thread has to ensure that an eventually pending wakeup has
697 * been handled before it migrates a task.
698 */
699void flush_smp_call_function_queue(void)
b2a02fc4 700{
1a90bfd2 701 unsigned int was_pending;
b2a02fc4
PZ
702 unsigned long flags;
703
704 if (llist_empty(this_cpu_ptr(&call_single_queue)))
705 return;
706
a5aabace
JG
707 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU,
708 smp_processor_id(), CFD_SEQ_IDLE);
b2a02fc4 709 local_irq_save(flags);
1a90bfd2
SAS
710 /* Get the already pending soft interrupts for RT enabled kernels */
711 was_pending = local_softirq_pending();
16bf5a5e 712 __flush_smp_call_function_queue(true);
f9d34595 713 if (local_softirq_pending())
1a90bfd2 714 do_softirq_post_smp_call_flush(was_pending);
f9d34595 715
b2a02fc4 716 local_irq_restore(flags);
3d442233
JA
717}
718
719/*
720 * smp_call_function_single - Run a function on a specific CPU
721 * @func: The function to run. This must be fast and non-blocking.
722 * @info: An arbitrary pointer to pass to the function.
3d442233
JA
723 * @wait: If true, wait until function has completed on other CPUs.
724 *
72f279b2 725 * Returns 0 on success, else a negative status code.
3d442233 726 */
3a5f65df 727int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
8691e5a8 728 int wait)
3d442233 729{
966a9671
YH
730 call_single_data_t *csd;
731 call_single_data_t csd_stack = {
545b8c8d 732 .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
966a9671 733 };
0b13fda1 734 int this_cpu;
8b28499a 735 int err;
3d442233 736
0b13fda1
IM
737 /*
738 * prevent preemption and reschedule on another processor,
739 * as well as CPU removal
740 */
741 this_cpu = get_cpu();
742
269c861b
SS
743 /*
744 * Can deadlock when called with interrupts disabled.
745 * We allow cpu's that are not yet online though, as no one else can
746 * send smp call function interrupt to this cpu and as such deadlocks
747 * can't happen.
748 */
749 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
750 && !oops_in_progress);
3d442233 751
19dbdcb8
PZ
752 /*
753 * When @wait we can deadlock when we interrupt between llist_add() and
754 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
755 * csd_lock() on because the interrupt context uses the same csd
756 * storage.
757 */
758 WARN_ON_ONCE(!in_task());
759
8053871d
LT
760 csd = &csd_stack;
761 if (!wait) {
762 csd = this_cpu_ptr(&csd_data);
763 csd_lock(csd);
764 }
765
4b44a21d
PZ
766 csd->func = func;
767 csd->info = info;
35feb604 768#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
545b8c8d
PZ
769 csd->node.src = smp_processor_id();
770 csd->node.dst = cpu;
e48c15b7 771#endif
4b44a21d
PZ
772
773 err = generic_exec_single(cpu, csd);
8053871d
LT
774
775 if (wait)
776 csd_lock_wait(csd);
3d442233
JA
777
778 put_cpu();
0b13fda1 779
f73be6de 780 return err;
3d442233
JA
781}
782EXPORT_SYMBOL(smp_call_function_single);
783
d7877c03 784/**
49b3bd21 785 * smp_call_function_single_async() - Run an asynchronous function on a
c46fff2a 786 * specific CPU.
d7877c03
FW
787 * @cpu: The CPU to run on.
788 * @csd: Pre-allocated and setup data structure
d7877c03 789 *
c46fff2a
FW
790 * Like smp_call_function_single(), but the call is asynchonous and
791 * can thus be done from contexts with disabled interrupts.
792 *
793 * The caller passes his own pre-allocated data structure
794 * (ie: embedded in an object) and is responsible for synchronizing it
795 * such that the IPIs performed on the @csd are strictly serialized.
796 *
5a18ceca
PX
797 * If the function is called with one csd which has not yet been
798 * processed by previous call to smp_call_function_single_async(), the
799 * function will return immediately with -EBUSY showing that the csd
800 * object is still in progress.
801 *
c46fff2a
FW
802 * NOTE: Be careful, there is unfortunately no current debugging facility to
803 * validate the correctness of this serialization.
49b3bd21
RD
804 *
805 * Return: %0 on success or negative errno value on error
d7877c03 806 */
1139aeb1 807int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
d7877c03
FW
808{
809 int err = 0;
d7877c03 810
fce8ad15 811 preempt_disable();
8053871d 812
545b8c8d 813 if (csd->node.u_flags & CSD_FLAG_LOCK) {
5a18ceca
PX
814 err = -EBUSY;
815 goto out;
816 }
8053871d 817
545b8c8d 818 csd->node.u_flags = CSD_FLAG_LOCK;
8053871d
LT
819 smp_wmb();
820
4b44a21d 821 err = generic_exec_single(cpu, csd);
5a18ceca
PX
822
823out:
fce8ad15 824 preempt_enable();
d7877c03
FW
825
826 return err;
827}
c46fff2a 828EXPORT_SYMBOL_GPL(smp_call_function_single_async);
d7877c03 829
2ea6dec4
RR
830/*
831 * smp_call_function_any - Run a function on any of the given cpus
832 * @mask: The mask of cpus it can run on.
833 * @func: The function to run. This must be fast and non-blocking.
834 * @info: An arbitrary pointer to pass to the function.
835 * @wait: If true, wait until function has completed.
836 *
837 * Returns 0 on success, else a negative status code (if no cpus were online).
2ea6dec4
RR
838 *
839 * Selection preference:
840 * 1) current cpu if in @mask
841 * 2) any cpu of current node if in @mask
842 * 3) any other online cpu in @mask
843 */
844int smp_call_function_any(const struct cpumask *mask,
3a5f65df 845 smp_call_func_t func, void *info, int wait)
2ea6dec4
RR
846{
847 unsigned int cpu;
848 const struct cpumask *nodemask;
849 int ret;
850
851 /* Try for same CPU (cheapest) */
852 cpu = get_cpu();
853 if (cpumask_test_cpu(cpu, mask))
854 goto call;
855
856 /* Try for same node. */
af2422c4 857 nodemask = cpumask_of_node(cpu_to_node(cpu));
2ea6dec4
RR
858 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
859 cpu = cpumask_next_and(cpu, nodemask, mask)) {
860 if (cpu_online(cpu))
861 goto call;
862 }
863
864 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
865 cpu = cpumask_any_and(mask, cpu_online_mask);
866call:
867 ret = smp_call_function_single(cpu, func, info, wait);
868 put_cpu();
869 return ret;
870}
871EXPORT_SYMBOL_GPL(smp_call_function_any);
872
a32a4d8a
NA
873/*
874 * Flags to be used as scf_flags argument of smp_call_function_many_cond().
875 *
876 * %SCF_WAIT: Wait until function execution is completed
877 * %SCF_RUN_LOCAL: Run also locally if local cpu is set in cpumask
878 */
879#define SCF_WAIT (1U << 0)
880#define SCF_RUN_LOCAL (1U << 1)
881
67719ef2
SAS
882static void smp_call_function_many_cond(const struct cpumask *mask,
883 smp_call_func_t func, void *info,
a32a4d8a
NA
884 unsigned int scf_flags,
885 smp_cond_func_t cond_func)
3d442233 886{
a32a4d8a 887 int cpu, last_cpu, this_cpu = smp_processor_id();
e1d12f32 888 struct call_function_data *cfd;
a32a4d8a
NA
889 bool wait = scf_flags & SCF_WAIT;
890 bool run_remote = false;
891 bool run_local = false;
892 int nr_cpus = 0;
893
894 lockdep_assert_preemption_disabled();
3d442233 895
269c861b
SS
896 /*
897 * Can deadlock when called with interrupts disabled.
898 * We allow cpu's that are not yet online though, as no one else can
899 * send smp call function interrupt to this cpu and as such deadlocks
900 * can't happen.
901 */
a32a4d8a
NA
902 if (cpu_online(this_cpu) && !oops_in_progress &&
903 !early_boot_irqs_disabled)
904 lockdep_assert_irqs_enabled();
3d442233 905
19dbdcb8
PZ
906 /*
907 * When @wait we can deadlock when we interrupt between llist_add() and
908 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
909 * csd_lock() on because the interrupt context uses the same csd
910 * storage.
911 */
912 WARN_ON_ONCE(!in_task());
913
a32a4d8a
NA
914 /* Check if we need local execution. */
915 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
916 run_local = true;
917
918 /* Check if we need remote execution, i.e., any CPU excluding this one. */
54b11e6d 919 cpu = cpumask_first_and(mask, cpu_online_mask);
0b13fda1 920 if (cpu == this_cpu)
54b11e6d 921 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
a32a4d8a
NA
922 if (cpu < nr_cpu_ids)
923 run_remote = true;
0b13fda1 924
a32a4d8a
NA
925 if (run_remote) {
926 cfd = this_cpu_ptr(&cfd_data);
927 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
928 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
45a57919 929
a32a4d8a
NA
930 cpumask_clear(cfd->cpumask_ipi);
931 for_each_cpu(cpu, cfd->cpumask) {
a500fc91
IM
932 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
933 call_single_data_t *csd = &pcpu->csd;
9a46ad6d 934
a32a4d8a
NA
935 if (cond_func && !cond_func(cpu, info))
936 continue;
67719ef2 937
a32a4d8a
NA
938 csd_lock(csd);
939 if (wait)
940 csd->node.u_flags |= CSD_TYPE_SYNC;
941 csd->func = func;
942 csd->info = info;
35feb604 943#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
a32a4d8a
NA
944 csd->node.src = smp_processor_id();
945 csd->node.dst = cpu;
e48c15b7 946#endif
a500fc91 947 cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
a32a4d8a
NA
948 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
949 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
950 nr_cpus++;
951 last_cpu = cpu;
723aae25 952
a500fc91
IM
953 cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
954 } else {
955 cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
a32a4d8a
NA
956 }
957 }
958
a500fc91 959 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING);
9a46ad6d 960
a32a4d8a
NA
961 /*
962 * Choose the most efficient way to send an IPI. Note that the
963 * number of CPUs might be zero due to concurrent changes to the
964 * provided mask.
965 */
966 if (nr_cpus == 1)
d43f17a1 967 send_call_function_single_ipi(last_cpu);
a32a4d8a
NA
968 else if (likely(nr_cpus > 1))
969 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
67719ef2 970
a500fc91 971 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED);
9a46ad6d 972 }
561920a0 973
a32a4d8a
NA
974 if (run_local && (!cond_func || cond_func(this_cpu, info))) {
975 unsigned long flags;
976
977 local_irq_save(flags);
978 func(info);
979 local_irq_restore(flags);
980 }
3d442233 981
a32a4d8a 982 if (run_remote && wait) {
e1d12f32 983 for_each_cpu(cpu, cfd->cpumask) {
966a9671 984 call_single_data_t *csd;
e1d12f32 985
de7b09ef 986 csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd;
9a46ad6d
SL
987 csd_lock_wait(csd);
988 }
989 }
3d442233 990}
67719ef2
SAS
991
992/**
a32a4d8a 993 * smp_call_function_many(): Run a function on a set of CPUs.
67719ef2
SAS
994 * @mask: The set of cpus to run on (only runs on online subset).
995 * @func: The function to run. This must be fast and non-blocking.
996 * @info: An arbitrary pointer to pass to the function.
49b3bd21 997 * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
a32a4d8a
NA
998 * (atomically) until function has completed on other CPUs. If
999 * %SCF_RUN_LOCAL is set, the function will also be run locally
1000 * if the local CPU is set in the @cpumask.
67719ef2
SAS
1001 *
1002 * If @wait is true, then returns once @func has returned.
1003 *
1004 * You must not call this function with disabled interrupts or from a
1005 * hardware interrupt handler or from a bottom half handler. Preemption
1006 * must be disabled when calling this function.
1007 */
1008void smp_call_function_many(const struct cpumask *mask,
1009 smp_call_func_t func, void *info, bool wait)
1010{
a32a4d8a 1011 smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
67719ef2 1012}
54b11e6d 1013EXPORT_SYMBOL(smp_call_function_many);
3d442233
JA
1014
1015/**
1016 * smp_call_function(): Run a function on all other CPUs.
1017 * @func: The function to run. This must be fast and non-blocking.
1018 * @info: An arbitrary pointer to pass to the function.
0b13fda1
IM
1019 * @wait: If true, wait (atomically) until function has completed
1020 * on other CPUs.
3d442233 1021 *
54b11e6d 1022 * Returns 0.
3d442233
JA
1023 *
1024 * If @wait is true, then returns once @func has returned; otherwise
72f279b2 1025 * it returns just before the target cpu calls @func.
3d442233
JA
1026 *
1027 * You must not call this function with disabled interrupts or from a
1028 * hardware interrupt handler or from a bottom half handler.
1029 */
caa75932 1030void smp_call_function(smp_call_func_t func, void *info, int wait)
3d442233 1031{
3d442233 1032 preempt_disable();
54b11e6d 1033 smp_call_function_many(cpu_online_mask, func, info, wait);
3d442233 1034 preempt_enable();
3d442233
JA
1035}
1036EXPORT_SYMBOL(smp_call_function);
351f8f8e 1037
34db18a0
AW
1038/* Setup configured maximum number of CPUs to activate */
1039unsigned int setup_max_cpus = NR_CPUS;
1040EXPORT_SYMBOL(setup_max_cpus);
1041
1042
1043/*
1044 * Setup routine for controlling SMP activation
1045 *
1046 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
1047 * activation entirely (the MPS table probe still happens, though).
1048 *
1049 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
1050 * greater than 0, limits the maximum number of CPUs activated in
1051 * SMP mode to <NUM>.
1052 */
1053
1054void __weak arch_disable_smp_support(void) { }
1055
1056static int __init nosmp(char *str)
1057{
1058 setup_max_cpus = 0;
1059 arch_disable_smp_support();
1060
1061 return 0;
1062}
1063
1064early_param("nosmp", nosmp);
1065
1066/* this is hard limit */
1067static int __init nrcpus(char *str)
1068{
1069 int nr_cpus;
1070
58934356 1071 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
38bef8e5 1072 set_nr_cpu_ids(nr_cpus);
34db18a0
AW
1073
1074 return 0;
1075}
1076
1077early_param("nr_cpus", nrcpus);
1078
1079static int __init maxcpus(char *str)
1080{
1081 get_option(&str, &setup_max_cpus);
1082 if (setup_max_cpus == 0)
1083 arch_disable_smp_support();
1084
1085 return 0;
1086}
1087
1088early_param("maxcpus", maxcpus);
1089
6f9c07be 1090#if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS)
34db18a0 1091/* Setup number of possible processor ids */
9b130ad5 1092unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
34db18a0 1093EXPORT_SYMBOL(nr_cpu_ids);
53fc190c 1094#endif
34db18a0
AW
1095
1096/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
1097void __init setup_nr_cpu_ids(void)
1098{
38bef8e5 1099 set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
34db18a0
AW
1100}
1101
1102/* Called by boot processor to activate the rest. */
1103void __init smp_init(void)
1104{
92b23278 1105 int num_nodes, num_cpus;
34db18a0 1106
3bb5d2ee 1107 idle_threads_init();
4cb28ced 1108 cpuhp_threads_init();
3bb5d2ee 1109
51111dce
ME
1110 pr_info("Bringing up secondary CPUs ...\n");
1111
b99a2659 1112 bringup_nonboot_cpus(setup_max_cpus);
34db18a0 1113
92b23278
ME
1114 num_nodes = num_online_nodes();
1115 num_cpus = num_online_cpus();
1116 pr_info("Brought up %d node%s, %d CPU%s\n",
1117 num_nodes, (num_nodes > 1 ? "s" : ""),
1118 num_cpus, (num_cpus > 1 ? "s" : ""));
1119
34db18a0 1120 /* Any cleanup work */
34db18a0
AW
1121 smp_cpus_done(setup_max_cpus);
1122}
1123
b3a7e98e
GBY
1124/*
1125 * on_each_cpu_cond(): Call a function on each processor for which
1126 * the supplied function cond_func returns true, optionally waiting
1127 * for all the required CPUs to finish. This may include the local
1128 * processor.
1129 * @cond_func: A callback function that is passed a cpu id and
7b7b8a2c 1130 * the info parameter. The function is called
b3a7e98e
GBY
1131 * with preemption disabled. The function should
1132 * return a blooean value indicating whether to IPI
1133 * the specified CPU.
1134 * @func: The function to run on all applicable CPUs.
1135 * This must be fast and non-blocking.
1136 * @info: An arbitrary pointer to pass to both functions.
1137 * @wait: If true, wait (atomically) until function has
1138 * completed on other CPUs.
b3a7e98e
GBY
1139 *
1140 * Preemption is disabled to protect against CPUs going offline but not online.
1141 * CPUs going online during the call will not be seen or sent an IPI.
1142 *
1143 * You must not call this function with disabled interrupts or
1144 * from a hardware interrupt handler or from a bottom half handler.
1145 */
5671d814 1146void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
cb923159 1147 void *info, bool wait, const struct cpumask *mask)
b3a7e98e 1148{
a32a4d8a 1149 unsigned int scf_flags = SCF_RUN_LOCAL;
67719ef2 1150
a32a4d8a
NA
1151 if (wait)
1152 scf_flags |= SCF_WAIT;
67719ef2 1153
a32a4d8a
NA
1154 preempt_disable();
1155 smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
1156 preempt_enable();
b3a7e98e 1157}
7d49b28a
RR
1158EXPORT_SYMBOL(on_each_cpu_cond_mask);
1159
f37f435f
TG
1160static void do_nothing(void *unused)
1161{
1162}
1163
1164/**
1165 * kick_all_cpus_sync - Force all cpus out of idle
1166 *
1167 * Used to synchronize the update of pm_idle function pointer. It's
1168 * called after the pointer is updated and returns after the dummy
1169 * callback function has been executed on all cpus. The execution of
1170 * the function can only happen on the remote cpus after they have
1171 * left the idle function which had been called via pm_idle function
1172 * pointer. So it's guaranteed that nothing uses the previous pointer
1173 * anymore.
1174 */
1175void kick_all_cpus_sync(void)
1176{
1177 /* Make sure the change is visible before we kick the cpus */
1178 smp_mb();
1179 smp_call_function(do_nothing, NULL, 1);
1180}
1181EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
c6f4459f
CL
1182
1183/**
1184 * wake_up_all_idle_cpus - break all cpus out of idle
1185 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
1186 * including idle polling cpus, for non-idle cpus, we will do nothing
1187 * for them.
1188 */
1189void wake_up_all_idle_cpus(void)
1190{
1191 int cpu;
1192
96611c26
PZ
1193 for_each_possible_cpu(cpu) {
1194 preempt_disable();
1195 if (cpu != smp_processor_id() && cpu_online(cpu))
1196 wake_up_if_idle(cpu);
1197 preempt_enable();
c6f4459f 1198 }
c6f4459f
CL
1199}
1200EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
df8ce9d7
JG
1201
1202/**
49b3bd21
RD
1203 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1204 * @work: &work_struct
1205 * @done: &completion to signal
1206 * @func: function to call
1207 * @data: function's data argument
1208 * @ret: return value from @func
1209 * @cpu: target CPU (%-1 for any CPU)
df8ce9d7
JG
1210 *
1211 * Used to call a function on a specific cpu and wait for it to return.
1212 * Optionally make sure the call is done on a specified physical cpu via vcpu
1213 * pinning in order to support virtualized environments.
1214 */
1215struct smp_call_on_cpu_struct {
1216 struct work_struct work;
1217 struct completion done;
1218 int (*func)(void *);
1219 void *data;
1220 int ret;
1221 int cpu;
1222};
1223
1224static void smp_call_on_cpu_callback(struct work_struct *work)
1225{
1226 struct smp_call_on_cpu_struct *sscs;
1227
1228 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1229 if (sscs->cpu >= 0)
1230 hypervisor_pin_vcpu(sscs->cpu);
1231 sscs->ret = sscs->func(sscs->data);
1232 if (sscs->cpu >= 0)
1233 hypervisor_pin_vcpu(-1);
1234
1235 complete(&sscs->done);
1236}
1237
1238int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1239{
1240 struct smp_call_on_cpu_struct sscs = {
df8ce9d7
JG
1241 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1242 .func = func,
1243 .data = par,
1244 .cpu = phys ? cpu : -1,
1245 };
1246
8db54949
PZ
1247 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1248
df8ce9d7
JG
1249 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1250 return -ENXIO;
1251
1252 queue_work_on(cpu, system_wq, &sscs.work);
1253 wait_for_completion(&sscs.done);
1254
1255 return sscs.ret;
1256}
1257EXPORT_SYMBOL_GPL(smp_call_on_cpu);