Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3d442233 JA |
2 | /* |
3 | * Generic helpers for smp ipi calls | |
4 | * | |
5 | * (C) Jens Axboe <jens.axboe@oracle.com> 2008 | |
3d442233 | 6 | */ |
ca7dfdbb ME |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
9 | ||
47885016 | 10 | #include <linux/irq_work.h> |
3d442233 | 11 | #include <linux/rcupdate.h> |
59190f42 | 12 | #include <linux/rculist.h> |
641cd4cf | 13 | #include <linux/kernel.h> |
9984de1a | 14 | #include <linux/export.h> |
0b13fda1 IM |
15 | #include <linux/percpu.h> |
16 | #include <linux/init.h> | |
f9d34595 | 17 | #include <linux/interrupt.h> |
5a0e3ad6 | 18 | #include <linux/gfp.h> |
3d442233 | 19 | #include <linux/smp.h> |
8969a5ed | 20 | #include <linux/cpu.h> |
c6f4459f | 21 | #include <linux/sched.h> |
4c822698 | 22 | #include <linux/sched/idle.h> |
47ae4b05 | 23 | #include <linux/hypervisor.h> |
35feb604 PM |
24 | #include <linux/sched/clock.h> |
25 | #include <linux/nmi.h> | |
26 | #include <linux/sched/debug.h> | |
8d0968cc | 27 | #include <linux/jump_label.h> |
3d442233 | 28 | |
3bb5d2ee | 29 | #include "smpboot.h" |
1f8db415 | 30 | #include "sched/smp.h" |
3bb5d2ee | 31 | |
545b8c8d | 32 | #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK) |
3d442233 | 33 | |
a5aabace JG |
34 | #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG |
35 | union cfd_seq_cnt { | |
36 | u64 val; | |
37 | struct { | |
38 | u64 src:16; | |
39 | u64 dst:16; | |
40 | #define CFD_SEQ_NOCPU 0xffff | |
41 | u64 type:4; | |
42 | #define CFD_SEQ_QUEUE 0 | |
43 | #define CFD_SEQ_IPI 1 | |
44 | #define CFD_SEQ_NOIPI 2 | |
45 | #define CFD_SEQ_PING 3 | |
46 | #define CFD_SEQ_PINGED 4 | |
47 | #define CFD_SEQ_HANDLE 5 | |
48 | #define CFD_SEQ_DEQUEUE 6 | |
49 | #define CFD_SEQ_IDLE 7 | |
50 | #define CFD_SEQ_GOTIPI 8 | |
51 | #define CFD_SEQ_HDLEND 9 | |
52 | u64 cnt:28; | |
53 | } u; | |
54 | }; | |
55 | ||
56 | static char *seq_type[] = { | |
57 | [CFD_SEQ_QUEUE] = "queue", | |
58 | [CFD_SEQ_IPI] = "ipi", | |
59 | [CFD_SEQ_NOIPI] = "noipi", | |
60 | [CFD_SEQ_PING] = "ping", | |
61 | [CFD_SEQ_PINGED] = "pinged", | |
62 | [CFD_SEQ_HANDLE] = "handle", | |
63 | [CFD_SEQ_DEQUEUE] = "dequeue (src CPU 0 == empty)", | |
64 | [CFD_SEQ_IDLE] = "idle", | |
65 | [CFD_SEQ_GOTIPI] = "gotipi", | |
66 | [CFD_SEQ_HDLEND] = "hdlend (src CPU 0 == early)", | |
67 | }; | |
68 | ||
69 | struct cfd_seq_local { | |
70 | u64 ping; | |
71 | u64 pinged; | |
72 | u64 handle; | |
73 | u64 dequeue; | |
74 | u64 idle; | |
75 | u64 gotipi; | |
76 | u64 hdlend; | |
77 | }; | |
78 | #endif | |
79 | ||
de7b09ef JG |
80 | struct cfd_percpu { |
81 | call_single_data_t csd; | |
a5aabace JG |
82 | #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG |
83 | u64 seq_queue; | |
84 | u64 seq_ipi; | |
85 | u64 seq_noipi; | |
86 | #endif | |
de7b09ef JG |
87 | }; |
88 | ||
3d442233 | 89 | struct call_function_data { |
de7b09ef | 90 | struct cfd_percpu __percpu *pcpu; |
0b13fda1 | 91 | cpumask_var_t cpumask; |
3fc5b3b6 | 92 | cpumask_var_t cpumask_ipi; |
3d442233 JA |
93 | }; |
94 | ||
a22793c7 | 95 | static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data); |
e03bcb68 | 96 | |
6897fc22 | 97 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); |
8969a5ed | 98 | |
16bf5a5e | 99 | static void __flush_smp_call_function_queue(bool warn_cpu_offline); |
8d056c48 | 100 | |
31487f83 | 101 | int smpcfd_prepare_cpu(unsigned int cpu) |
8969a5ed | 102 | { |
8969a5ed PZ |
103 | struct call_function_data *cfd = &per_cpu(cfd_data, cpu); |
104 | ||
31487f83 RW |
105 | if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, |
106 | cpu_to_node(cpu))) | |
107 | return -ENOMEM; | |
3fc5b3b6 AL |
108 | if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, |
109 | cpu_to_node(cpu))) { | |
110 | free_cpumask_var(cfd->cpumask); | |
111 | return -ENOMEM; | |
112 | } | |
de7b09ef JG |
113 | cfd->pcpu = alloc_percpu(struct cfd_percpu); |
114 | if (!cfd->pcpu) { | |
8969a5ed | 115 | free_cpumask_var(cfd->cpumask); |
3fc5b3b6 | 116 | free_cpumask_var(cfd->cpumask_ipi); |
31487f83 RW |
117 | return -ENOMEM; |
118 | } | |
119 | ||
120 | return 0; | |
8969a5ed PZ |
121 | } |
122 | ||
31487f83 RW |
123 | int smpcfd_dead_cpu(unsigned int cpu) |
124 | { | |
125 | struct call_function_data *cfd = &per_cpu(cfd_data, cpu); | |
126 | ||
127 | free_cpumask_var(cfd->cpumask); | |
3fc5b3b6 | 128 | free_cpumask_var(cfd->cpumask_ipi); |
de7b09ef | 129 | free_percpu(cfd->pcpu); |
31487f83 RW |
130 | return 0; |
131 | } | |
132 | ||
133 | int smpcfd_dying_cpu(unsigned int cpu) | |
134 | { | |
135 | /* | |
136 | * The IPIs for the smp-call-function callbacks queued by other | |
137 | * CPUs might arrive late, either due to hardware latencies or | |
138 | * because this CPU disabled interrupts (inside stop-machine) | |
139 | * before the IPIs were sent. So flush out any pending callbacks | |
140 | * explicitly (without waiting for the IPIs to arrive), to | |
141 | * ensure that the outgoing CPU doesn't go offline with work | |
142 | * still pending. | |
143 | */ | |
16bf5a5e | 144 | __flush_smp_call_function_queue(false); |
afaa653c | 145 | irq_work_run(); |
31487f83 RW |
146 | return 0; |
147 | } | |
8969a5ed | 148 | |
d8ad7d11 | 149 | void __init call_function_init(void) |
3d442233 JA |
150 | { |
151 | int i; | |
152 | ||
6897fc22 CH |
153 | for_each_possible_cpu(i) |
154 | init_llist_head(&per_cpu(call_single_queue, i)); | |
8969a5ed | 155 | |
31487f83 | 156 | smpcfd_prepare_cpu(smp_processor_id()); |
3d442233 JA |
157 | } |
158 | ||
35feb604 PM |
159 | #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG |
160 | ||
8d0968cc | 161 | static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled); |
a5aabace | 162 | static DEFINE_STATIC_KEY_FALSE(csdlock_debug_extended); |
8d0968cc JG |
163 | |
164 | static int __init csdlock_debug(char *str) | |
165 | { | |
166 | unsigned int val = 0; | |
167 | ||
a5aabace JG |
168 | if (str && !strcmp(str, "ext")) { |
169 | val = 1; | |
170 | static_branch_enable(&csdlock_debug_extended); | |
171 | } else | |
172 | get_option(&str, &val); | |
173 | ||
8d0968cc JG |
174 | if (val) |
175 | static_branch_enable(&csdlock_debug_enabled); | |
176 | ||
9c9b26b0 | 177 | return 1; |
8d0968cc | 178 | } |
9c9b26b0 | 179 | __setup("csdlock_debug=", csdlock_debug); |
8d0968cc | 180 | |
35feb604 PM |
181 | static DEFINE_PER_CPU(call_single_data_t *, cur_csd); |
182 | static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func); | |
183 | static DEFINE_PER_CPU(void *, cur_csd_info); | |
a5aabace | 184 | static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local); |
35feb604 | 185 | |
3791a223 PM |
186 | static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */ |
187 | module_param(csd_lock_timeout, ulong, 0444); | |
188 | ||
2b722160 | 189 | static atomic_t csd_bug_count = ATOMIC_INIT(0); |
a5aabace JG |
190 | static u64 cfd_seq; |
191 | ||
192 | #define CFD_SEQ(s, d, t, c) \ | |
193 | (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c } | |
194 | ||
195 | static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type) | |
196 | { | |
197 | union cfd_seq_cnt new, old; | |
198 | ||
199 | new = CFD_SEQ(src, dst, type, 0); | |
200 | ||
201 | do { | |
202 | old.val = READ_ONCE(cfd_seq); | |
203 | new.u.cnt = old.u.cnt + 1; | |
204 | } while (cmpxchg(&cfd_seq, old.val, new.val) != old.val); | |
205 | ||
206 | return old.val; | |
207 | } | |
208 | ||
209 | #define cfd_seq_store(var, src, dst, type) \ | |
210 | do { \ | |
211 | if (static_branch_unlikely(&csdlock_debug_extended)) \ | |
212 | var = cfd_seq_inc(src, dst, type); \ | |
213 | } while (0) | |
35feb604 PM |
214 | |
215 | /* Record current CSD work for current CPU, NULL to erase. */ | |
1139aeb1 | 216 | static void __csd_lock_record(struct __call_single_data *csd) |
35feb604 PM |
217 | { |
218 | if (!csd) { | |
219 | smp_mb(); /* NULL cur_csd after unlock. */ | |
220 | __this_cpu_write(cur_csd, NULL); | |
221 | return; | |
222 | } | |
223 | __this_cpu_write(cur_csd_func, csd->func); | |
224 | __this_cpu_write(cur_csd_info, csd->info); | |
225 | smp_wmb(); /* func and info before csd. */ | |
226 | __this_cpu_write(cur_csd, csd); | |
227 | smp_mb(); /* Update cur_csd before function call. */ | |
228 | /* Or before unlock, as the case may be. */ | |
229 | } | |
230 | ||
1139aeb1 | 231 | static __always_inline void csd_lock_record(struct __call_single_data *csd) |
8d0968cc JG |
232 | { |
233 | if (static_branch_unlikely(&csdlock_debug_enabled)) | |
234 | __csd_lock_record(csd); | |
235 | } | |
236 | ||
1139aeb1 | 237 | static int csd_lock_wait_getcpu(struct __call_single_data *csd) |
35feb604 PM |
238 | { |
239 | unsigned int csd_type; | |
240 | ||
241 | csd_type = CSD_TYPE(csd); | |
242 | if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC) | |
a787bdaf | 243 | return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */ |
35feb604 PM |
244 | return -1; |
245 | } | |
246 | ||
a5aabace JG |
247 | static void cfd_seq_data_add(u64 val, unsigned int src, unsigned int dst, |
248 | unsigned int type, union cfd_seq_cnt *data, | |
249 | unsigned int *n_data, unsigned int now) | |
250 | { | |
251 | union cfd_seq_cnt new[2]; | |
252 | unsigned int i, j, k; | |
253 | ||
254 | new[0].val = val; | |
255 | new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1); | |
256 | ||
257 | for (i = 0; i < 2; i++) { | |
258 | if (new[i].u.cnt <= now) | |
259 | new[i].u.cnt |= 0x80000000U; | |
260 | for (j = 0; j < *n_data; j++) { | |
261 | if (new[i].u.cnt == data[j].u.cnt) { | |
262 | /* Direct read value trumps generated one. */ | |
263 | if (i == 0) | |
264 | data[j].val = new[i].val; | |
265 | break; | |
266 | } | |
267 | if (new[i].u.cnt < data[j].u.cnt) { | |
268 | for (k = *n_data; k > j; k--) | |
269 | data[k].val = data[k - 1].val; | |
270 | data[j].val = new[i].val; | |
271 | (*n_data)++; | |
272 | break; | |
273 | } | |
274 | } | |
275 | if (j == *n_data) { | |
276 | data[j].val = new[i].val; | |
277 | (*n_data)++; | |
278 | } | |
279 | } | |
280 | } | |
281 | ||
282 | static const char *csd_lock_get_type(unsigned int type) | |
283 | { | |
284 | return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type]; | |
285 | } | |
286 | ||
1139aeb1 | 287 | static void csd_lock_print_extended(struct __call_single_data *csd, int cpu) |
a5aabace JG |
288 | { |
289 | struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu); | |
290 | unsigned int srccpu = csd->node.src; | |
291 | struct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu); | |
292 | struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); | |
293 | unsigned int now; | |
294 | union cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)]; | |
295 | unsigned int n_data = 0, i; | |
296 | ||
297 | data[0].val = READ_ONCE(cfd_seq); | |
298 | now = data[0].u.cnt; | |
299 | ||
300 | cfd_seq_data_add(pcpu->seq_queue, srccpu, cpu, CFD_SEQ_QUEUE, data, &n_data, now); | |
301 | cfd_seq_data_add(pcpu->seq_ipi, srccpu, cpu, CFD_SEQ_IPI, data, &n_data, now); | |
302 | cfd_seq_data_add(pcpu->seq_noipi, srccpu, cpu, CFD_SEQ_NOIPI, data, &n_data, now); | |
303 | ||
304 | cfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING, data, &n_data, now); | |
305 | cfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now); | |
306 | ||
307 | cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now); | |
308 | cfd_seq_data_add(seq->gotipi, CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI, data, &n_data, now); | |
309 | cfd_seq_data_add(seq->handle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE, data, &n_data, now); | |
310 | cfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now); | |
311 | cfd_seq_data_add(seq->hdlend, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND, data, &n_data, now); | |
312 | ||
313 | for (i = 0; i < n_data; i++) { | |
314 | pr_alert("\tcsd: cnt(%07x): %04x->%04x %s\n", | |
315 | data[i].u.cnt & ~0x80000000U, data[i].u.src, | |
316 | data[i].u.dst, csd_lock_get_type(data[i].u.type)); | |
317 | } | |
318 | pr_alert("\tcsd: cnt now: %07x\n", now); | |
319 | } | |
320 | ||
35feb604 PM |
321 | /* |
322 | * Complain if too much time spent waiting. Note that only | |
323 | * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU, | |
324 | * so waiting on other types gets much less information. | |
325 | */ | |
1139aeb1 | 326 | static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id) |
35feb604 PM |
327 | { |
328 | int cpu = -1; | |
329 | int cpux; | |
330 | bool firsttime; | |
331 | u64 ts2, ts_delta; | |
332 | call_single_data_t *cpu_cur_csd; | |
545b8c8d | 333 | unsigned int flags = READ_ONCE(csd->node.u_flags); |
3791a223 | 334 | unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC; |
35feb604 PM |
335 | |
336 | if (!(flags & CSD_FLAG_LOCK)) { | |
337 | if (!unlikely(*bug_id)) | |
338 | return true; | |
339 | cpu = csd_lock_wait_getcpu(csd); | |
340 | pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n", | |
341 | *bug_id, raw_smp_processor_id(), cpu); | |
342 | return true; | |
343 | } | |
344 | ||
345 | ts2 = sched_clock(); | |
346 | ts_delta = ts2 - *ts1; | |
3791a223 | 347 | if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0)) |
35feb604 PM |
348 | return false; |
349 | ||
350 | firsttime = !*bug_id; | |
351 | if (firsttime) | |
352 | *bug_id = atomic_inc_return(&csd_bug_count); | |
353 | cpu = csd_lock_wait_getcpu(csd); | |
354 | if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu)) | |
355 | cpux = 0; | |
356 | else | |
357 | cpux = cpu; | |
358 | cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */ | |
359 | pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n", | |
360 | firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0, | |
361 | cpu, csd->func, csd->info); | |
362 | if (cpu_cur_csd && csd != cpu_cur_csd) { | |
363 | pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n", | |
364 | *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)), | |
365 | READ_ONCE(per_cpu(cur_csd_info, cpux))); | |
366 | } else { | |
367 | pr_alert("\tcsd: CSD lock (#%d) %s.\n", | |
368 | *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request"); | |
369 | } | |
370 | if (cpu >= 0) { | |
a5aabace JG |
371 | if (static_branch_unlikely(&csdlock_debug_extended)) |
372 | csd_lock_print_extended(csd, cpu); | |
35feb604 PM |
373 | if (!trigger_single_cpu_backtrace(cpu)) |
374 | dump_cpu_task(cpu); | |
375 | if (!cpu_cur_csd) { | |
376 | pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu); | |
377 | arch_send_call_function_single_ipi(cpu); | |
378 | } | |
379 | } | |
380 | dump_stack(); | |
381 | *ts1 = ts2; | |
382 | ||
383 | return false; | |
384 | } | |
385 | ||
8969a5ed PZ |
386 | /* |
387 | * csd_lock/csd_unlock used to serialize access to per-cpu csd resources | |
388 | * | |
0b13fda1 IM |
389 | * For non-synchronous ipi calls the csd can still be in use by the |
390 | * previous function call. For multi-cpu calls its even more interesting | |
391 | * as we'll have to ensure no other cpu is observing our csd. | |
8969a5ed | 392 | */ |
1139aeb1 | 393 | static void __csd_lock_wait(struct __call_single_data *csd) |
35feb604 PM |
394 | { |
395 | int bug_id = 0; | |
396 | u64 ts0, ts1; | |
397 | ||
398 | ts1 = ts0 = sched_clock(); | |
399 | for (;;) { | |
400 | if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id)) | |
401 | break; | |
402 | cpu_relax(); | |
403 | } | |
404 | smp_acquire__after_ctrl_dep(); | |
405 | } | |
406 | ||
1139aeb1 | 407 | static __always_inline void csd_lock_wait(struct __call_single_data *csd) |
8d0968cc JG |
408 | { |
409 | if (static_branch_unlikely(&csdlock_debug_enabled)) { | |
410 | __csd_lock_wait(csd); | |
411 | return; | |
412 | } | |
413 | ||
414 | smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); | |
415 | } | |
a5aabace JG |
416 | |
417 | static void __smp_call_single_queue_debug(int cpu, struct llist_node *node) | |
418 | { | |
419 | unsigned int this_cpu = smp_processor_id(); | |
420 | struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local); | |
421 | struct call_function_data *cfd = this_cpu_ptr(&cfd_data); | |
422 | struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); | |
423 | ||
424 | cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE); | |
425 | if (llist_add(node, &per_cpu(call_single_queue, cpu))) { | |
426 | cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI); | |
427 | cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING); | |
428 | send_call_function_single_ipi(cpu); | |
429 | cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED); | |
430 | } else { | |
431 | cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI); | |
432 | } | |
433 | } | |
35feb604 | 434 | #else |
a5aabace JG |
435 | #define cfd_seq_store(var, src, dst, type) |
436 | ||
1139aeb1 | 437 | static void csd_lock_record(struct __call_single_data *csd) |
35feb604 PM |
438 | { |
439 | } | |
440 | ||
1139aeb1 | 441 | static __always_inline void csd_lock_wait(struct __call_single_data *csd) |
8969a5ed | 442 | { |
545b8c8d | 443 | smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); |
6e275637 | 444 | } |
35feb604 | 445 | #endif |
6e275637 | 446 | |
1139aeb1 | 447 | static __always_inline void csd_lock(struct __call_single_data *csd) |
6e275637 | 448 | { |
e1d12f32 | 449 | csd_lock_wait(csd); |
545b8c8d | 450 | csd->node.u_flags |= CSD_FLAG_LOCK; |
8969a5ed PZ |
451 | |
452 | /* | |
0b13fda1 IM |
453 | * prevent CPU from reordering the above assignment |
454 | * to ->flags with any subsequent assignments to other | |
966a9671 | 455 | * fields of the specified call_single_data_t structure: |
8969a5ed | 456 | */ |
8053871d | 457 | smp_wmb(); |
8969a5ed PZ |
458 | } |
459 | ||
1139aeb1 | 460 | static __always_inline void csd_unlock(struct __call_single_data *csd) |
8969a5ed | 461 | { |
545b8c8d | 462 | WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); |
0b13fda1 | 463 | |
8969a5ed | 464 | /* |
0b13fda1 | 465 | * ensure we're all done before releasing data: |
8969a5ed | 466 | */ |
545b8c8d | 467 | smp_store_release(&csd->node.u_flags, 0); |
3d442233 JA |
468 | } |
469 | ||
966a9671 | 470 | static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); |
8b28499a | 471 | |
4b44a21d PZ |
472 | void __smp_call_single_queue(int cpu, struct llist_node *node) |
473 | { | |
a5aabace JG |
474 | #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG |
475 | if (static_branch_unlikely(&csdlock_debug_extended)) { | |
476 | unsigned int type; | |
477 | ||
478 | type = CSD_TYPE(container_of(node, call_single_data_t, | |
479 | node.llist)); | |
480 | if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) { | |
481 | __smp_call_single_queue_debug(cpu, node); | |
482 | return; | |
483 | } | |
484 | } | |
485 | #endif | |
486 | ||
4b44a21d PZ |
487 | /* |
488 | * The list addition should be visible before sending the IPI | |
489 | * handler locks the list to pull the entry off it because of | |
490 | * normal cache coherency rules implied by spinlocks. | |
491 | * | |
492 | * If IPIs can go out of order to the cache coherency protocol | |
493 | * in an architecture, sufficient synchronisation should be added | |
494 | * to arch code to make it appear to obey cache coherency WRT | |
495 | * locking and barrier primitives. Generic code isn't really | |
496 | * equipped to do the right thing... | |
497 | */ | |
498 | if (llist_add(node, &per_cpu(call_single_queue, cpu))) | |
499 | send_call_function_single_ipi(cpu); | |
500 | } | |
501 | ||
3d442233 | 502 | /* |
966a9671 | 503 | * Insert a previously allocated call_single_data_t element |
0b13fda1 IM |
504 | * for execution on the given CPU. data must already have |
505 | * ->func, ->info, and ->flags set. | |
3d442233 | 506 | */ |
1139aeb1 | 507 | static int generic_exec_single(int cpu, struct __call_single_data *csd) |
3d442233 | 508 | { |
8b28499a | 509 | if (cpu == smp_processor_id()) { |
4b44a21d PZ |
510 | smp_call_func_t func = csd->func; |
511 | void *info = csd->info; | |
8053871d LT |
512 | unsigned long flags; |
513 | ||
514 | /* | |
515 | * We can unlock early even for the synchronous on-stack case, | |
516 | * since we're doing this from the same CPU.. | |
517 | */ | |
35feb604 | 518 | csd_lock_record(csd); |
8053871d | 519 | csd_unlock(csd); |
8b28499a FW |
520 | local_irq_save(flags); |
521 | func(info); | |
35feb604 | 522 | csd_lock_record(NULL); |
8b28499a FW |
523 | local_irq_restore(flags); |
524 | return 0; | |
525 | } | |
526 | ||
5224b961 LT |
527 | if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
528 | csd_unlock(csd); | |
8b28499a | 529 | return -ENXIO; |
5224b961 | 530 | } |
8b28499a | 531 | |
545b8c8d | 532 | __smp_call_single_queue(cpu, &csd->node.llist); |
3d442233 | 533 | |
8b28499a | 534 | return 0; |
3d442233 JA |
535 | } |
536 | ||
8d056c48 SB |
537 | /** |
538 | * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks | |
539 | * | |
540 | * Invoked by arch to handle an IPI for call function single. | |
541 | * Must be called with interrupts disabled. | |
3d442233 JA |
542 | */ |
543 | void generic_smp_call_function_single_interrupt(void) | |
544 | { | |
a5aabace JG |
545 | cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU, |
546 | smp_processor_id(), CFD_SEQ_GOTIPI); | |
16bf5a5e | 547 | __flush_smp_call_function_queue(true); |
8d056c48 SB |
548 | } |
549 | ||
550 | /** | |
16bf5a5e | 551 | * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks |
8d056c48 SB |
552 | * |
553 | * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an | |
554 | * offline CPU. Skip this check if set to 'false'. | |
555 | * | |
556 | * Flush any pending smp-call-function callbacks queued on this CPU. This is | |
557 | * invoked by the generic IPI handler, as well as by a CPU about to go offline, | |
558 | * to ensure that all pending IPI callbacks are run before it goes completely | |
559 | * offline. | |
560 | * | |
561 | * Loop through the call_single_queue and run all the queued callbacks. | |
562 | * Must be called with interrupts disabled. | |
563 | */ | |
16bf5a5e | 564 | static void __flush_smp_call_function_queue(bool warn_cpu_offline) |
8d056c48 | 565 | { |
966a9671 | 566 | call_single_data_t *csd, *csd_next; |
52103be0 PZ |
567 | struct llist_node *entry, *prev; |
568 | struct llist_head *head; | |
a219ccf4 SB |
569 | static bool warned; |
570 | ||
83efcbd0 | 571 | lockdep_assert_irqs_disabled(); |
8d056c48 | 572 | |
bb964a92 | 573 | head = this_cpu_ptr(&call_single_queue); |
a5aabace JG |
574 | cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->handle, CFD_SEQ_NOCPU, |
575 | smp_processor_id(), CFD_SEQ_HANDLE); | |
8d056c48 | 576 | entry = llist_del_all(head); |
a5aabace JG |
577 | cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->dequeue, |
578 | /* Special meaning of source cpu: 0 == queue empty */ | |
579 | entry ? CFD_SEQ_NOCPU : 0, | |
580 | smp_processor_id(), CFD_SEQ_DEQUEUE); | |
a219ccf4 | 581 | entry = llist_reverse_order(entry); |
3d442233 | 582 | |
8d056c48 SB |
583 | /* There shouldn't be any pending callbacks on an offline CPU. */ |
584 | if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && | |
9e949a38 | 585 | !warned && entry != NULL)) { |
a219ccf4 SB |
586 | warned = true; |
587 | WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); | |
588 | ||
589 | /* | |
590 | * We don't have to use the _safe() variant here | |
591 | * because we are not invoking the IPI handlers yet. | |
592 | */ | |
545b8c8d | 593 | llist_for_each_entry(csd, entry, node.llist) { |
4b44a21d PZ |
594 | switch (CSD_TYPE(csd)) { |
595 | case CSD_TYPE_ASYNC: | |
596 | case CSD_TYPE_SYNC: | |
597 | case CSD_TYPE_IRQ_WORK: | |
598 | pr_warn("IPI callback %pS sent to offline CPU\n", | |
599 | csd->func); | |
600 | break; | |
601 | ||
a1488664 PZ |
602 | case CSD_TYPE_TTWU: |
603 | pr_warn("IPI task-wakeup sent to offline CPU\n"); | |
604 | break; | |
605 | ||
4b44a21d PZ |
606 | default: |
607 | pr_warn("IPI callback, unknown type %d, sent to offline CPU\n", | |
608 | CSD_TYPE(csd)); | |
609 | break; | |
610 | } | |
611 | } | |
a219ccf4 | 612 | } |
3d442233 | 613 | |
52103be0 PZ |
614 | /* |
615 | * First; run all SYNC callbacks, people are waiting for us. | |
616 | */ | |
617 | prev = NULL; | |
545b8c8d | 618 | llist_for_each_entry_safe(csd, csd_next, entry, node.llist) { |
8053871d | 619 | /* Do we wait until *after* callback? */ |
4b44a21d PZ |
620 | if (CSD_TYPE(csd) == CSD_TYPE_SYNC) { |
621 | smp_call_func_t func = csd->func; | |
622 | void *info = csd->info; | |
623 | ||
52103be0 | 624 | if (prev) { |
545b8c8d | 625 | prev->next = &csd_next->node.llist; |
52103be0 | 626 | } else { |
545b8c8d | 627 | entry = &csd_next->node.llist; |
52103be0 | 628 | } |
4b44a21d | 629 | |
35feb604 | 630 | csd_lock_record(csd); |
8053871d LT |
631 | func(info); |
632 | csd_unlock(csd); | |
35feb604 | 633 | csd_lock_record(NULL); |
8053871d | 634 | } else { |
545b8c8d | 635 | prev = &csd->node.llist; |
8053871d | 636 | } |
3d442233 | 637 | } |
47885016 | 638 | |
a5aabace JG |
639 | if (!entry) { |
640 | cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, | |
641 | 0, smp_processor_id(), | |
642 | CFD_SEQ_HDLEND); | |
a1488664 | 643 | return; |
a5aabace | 644 | } |
a1488664 | 645 | |
47885016 | 646 | /* |
52103be0 | 647 | * Second; run all !SYNC callbacks. |
47885016 | 648 | */ |
a1488664 | 649 | prev = NULL; |
545b8c8d | 650 | llist_for_each_entry_safe(csd, csd_next, entry, node.llist) { |
4b44a21d | 651 | int type = CSD_TYPE(csd); |
52103be0 | 652 | |
a1488664 PZ |
653 | if (type != CSD_TYPE_TTWU) { |
654 | if (prev) { | |
545b8c8d | 655 | prev->next = &csd_next->node.llist; |
a1488664 | 656 | } else { |
545b8c8d | 657 | entry = &csd_next->node.llist; |
a1488664 | 658 | } |
4b44a21d | 659 | |
a1488664 PZ |
660 | if (type == CSD_TYPE_ASYNC) { |
661 | smp_call_func_t func = csd->func; | |
662 | void *info = csd->info; | |
663 | ||
35feb604 | 664 | csd_lock_record(csd); |
a1488664 PZ |
665 | csd_unlock(csd); |
666 | func(info); | |
35feb604 | 667 | csd_lock_record(NULL); |
a1488664 PZ |
668 | } else if (type == CSD_TYPE_IRQ_WORK) { |
669 | irq_work_single(csd); | |
670 | } | |
671 | ||
672 | } else { | |
545b8c8d | 673 | prev = &csd->node.llist; |
4b44a21d | 674 | } |
52103be0 | 675 | } |
a1488664 PZ |
676 | |
677 | /* | |
678 | * Third; only CSD_TYPE_TTWU is left, issue those. | |
679 | */ | |
680 | if (entry) | |
681 | sched_ttwu_pending(entry); | |
a5aabace JG |
682 | |
683 | cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, CFD_SEQ_NOCPU, | |
684 | smp_processor_id(), CFD_SEQ_HDLEND); | |
3d442233 JA |
685 | } |
686 | ||
16bf5a5e TG |
687 | |
688 | /** | |
689 | * flush_smp_call_function_queue - Flush pending smp-call-function callbacks | |
690 | * from task context (idle, migration thread) | |
691 | * | |
692 | * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it | |
693 | * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by | |
694 | * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to | |
695 | * handle queued SMP function calls before scheduling. | |
696 | * | |
697 | * The migration thread has to ensure that an eventually pending wakeup has | |
698 | * been handled before it migrates a task. | |
699 | */ | |
700 | void flush_smp_call_function_queue(void) | |
b2a02fc4 | 701 | { |
1a90bfd2 | 702 | unsigned int was_pending; |
b2a02fc4 PZ |
703 | unsigned long flags; |
704 | ||
705 | if (llist_empty(this_cpu_ptr(&call_single_queue))) | |
706 | return; | |
707 | ||
a5aabace JG |
708 | cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU, |
709 | smp_processor_id(), CFD_SEQ_IDLE); | |
b2a02fc4 | 710 | local_irq_save(flags); |
1a90bfd2 SAS |
711 | /* Get the already pending soft interrupts for RT enabled kernels */ |
712 | was_pending = local_softirq_pending(); | |
16bf5a5e | 713 | __flush_smp_call_function_queue(true); |
f9d34595 | 714 | if (local_softirq_pending()) |
1a90bfd2 | 715 | do_softirq_post_smp_call_flush(was_pending); |
f9d34595 | 716 | |
b2a02fc4 | 717 | local_irq_restore(flags); |
3d442233 JA |
718 | } |
719 | ||
720 | /* | |
721 | * smp_call_function_single - Run a function on a specific CPU | |
722 | * @func: The function to run. This must be fast and non-blocking. | |
723 | * @info: An arbitrary pointer to pass to the function. | |
3d442233 JA |
724 | * @wait: If true, wait until function has completed on other CPUs. |
725 | * | |
72f279b2 | 726 | * Returns 0 on success, else a negative status code. |
3d442233 | 727 | */ |
3a5f65df | 728 | int smp_call_function_single(int cpu, smp_call_func_t func, void *info, |
8691e5a8 | 729 | int wait) |
3d442233 | 730 | { |
966a9671 YH |
731 | call_single_data_t *csd; |
732 | call_single_data_t csd_stack = { | |
545b8c8d | 733 | .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, }, |
966a9671 | 734 | }; |
0b13fda1 | 735 | int this_cpu; |
8b28499a | 736 | int err; |
3d442233 | 737 | |
0b13fda1 IM |
738 | /* |
739 | * prevent preemption and reschedule on another processor, | |
740 | * as well as CPU removal | |
741 | */ | |
742 | this_cpu = get_cpu(); | |
743 | ||
269c861b SS |
744 | /* |
745 | * Can deadlock when called with interrupts disabled. | |
746 | * We allow cpu's that are not yet online though, as no one else can | |
747 | * send smp call function interrupt to this cpu and as such deadlocks | |
748 | * can't happen. | |
749 | */ | |
750 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | |
751 | && !oops_in_progress); | |
3d442233 | 752 | |
19dbdcb8 PZ |
753 | /* |
754 | * When @wait we can deadlock when we interrupt between llist_add() and | |
755 | * arch_send_call_function_ipi*(); when !@wait we can deadlock due to | |
756 | * csd_lock() on because the interrupt context uses the same csd | |
757 | * storage. | |
758 | */ | |
759 | WARN_ON_ONCE(!in_task()); | |
760 | ||
8053871d LT |
761 | csd = &csd_stack; |
762 | if (!wait) { | |
763 | csd = this_cpu_ptr(&csd_data); | |
764 | csd_lock(csd); | |
765 | } | |
766 | ||
4b44a21d PZ |
767 | csd->func = func; |
768 | csd->info = info; | |
35feb604 | 769 | #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG |
545b8c8d PZ |
770 | csd->node.src = smp_processor_id(); |
771 | csd->node.dst = cpu; | |
e48c15b7 | 772 | #endif |
4b44a21d PZ |
773 | |
774 | err = generic_exec_single(cpu, csd); | |
8053871d LT |
775 | |
776 | if (wait) | |
777 | csd_lock_wait(csd); | |
3d442233 JA |
778 | |
779 | put_cpu(); | |
0b13fda1 | 780 | |
f73be6de | 781 | return err; |
3d442233 JA |
782 | } |
783 | EXPORT_SYMBOL(smp_call_function_single); | |
784 | ||
d7877c03 | 785 | /** |
49b3bd21 | 786 | * smp_call_function_single_async() - Run an asynchronous function on a |
c46fff2a | 787 | * specific CPU. |
d7877c03 FW |
788 | * @cpu: The CPU to run on. |
789 | * @csd: Pre-allocated and setup data structure | |
d7877c03 | 790 | * |
c46fff2a FW |
791 | * Like smp_call_function_single(), but the call is asynchonous and |
792 | * can thus be done from contexts with disabled interrupts. | |
793 | * | |
794 | * The caller passes his own pre-allocated data structure | |
795 | * (ie: embedded in an object) and is responsible for synchronizing it | |
796 | * such that the IPIs performed on the @csd are strictly serialized. | |
797 | * | |
5a18ceca PX |
798 | * If the function is called with one csd which has not yet been |
799 | * processed by previous call to smp_call_function_single_async(), the | |
800 | * function will return immediately with -EBUSY showing that the csd | |
801 | * object is still in progress. | |
802 | * | |
c46fff2a FW |
803 | * NOTE: Be careful, there is unfortunately no current debugging facility to |
804 | * validate the correctness of this serialization. | |
49b3bd21 RD |
805 | * |
806 | * Return: %0 on success or negative errno value on error | |
d7877c03 | 807 | */ |
1139aeb1 | 808 | int smp_call_function_single_async(int cpu, struct __call_single_data *csd) |
d7877c03 FW |
809 | { |
810 | int err = 0; | |
d7877c03 | 811 | |
fce8ad15 | 812 | preempt_disable(); |
8053871d | 813 | |
545b8c8d | 814 | if (csd->node.u_flags & CSD_FLAG_LOCK) { |
5a18ceca PX |
815 | err = -EBUSY; |
816 | goto out; | |
817 | } | |
8053871d | 818 | |
545b8c8d | 819 | csd->node.u_flags = CSD_FLAG_LOCK; |
8053871d LT |
820 | smp_wmb(); |
821 | ||
4b44a21d | 822 | err = generic_exec_single(cpu, csd); |
5a18ceca PX |
823 | |
824 | out: | |
fce8ad15 | 825 | preempt_enable(); |
d7877c03 FW |
826 | |
827 | return err; | |
828 | } | |
c46fff2a | 829 | EXPORT_SYMBOL_GPL(smp_call_function_single_async); |
d7877c03 | 830 | |
2ea6dec4 RR |
831 | /* |
832 | * smp_call_function_any - Run a function on any of the given cpus | |
833 | * @mask: The mask of cpus it can run on. | |
834 | * @func: The function to run. This must be fast and non-blocking. | |
835 | * @info: An arbitrary pointer to pass to the function. | |
836 | * @wait: If true, wait until function has completed. | |
837 | * | |
838 | * Returns 0 on success, else a negative status code (if no cpus were online). | |
2ea6dec4 RR |
839 | * |
840 | * Selection preference: | |
841 | * 1) current cpu if in @mask | |
842 | * 2) any cpu of current node if in @mask | |
843 | * 3) any other online cpu in @mask | |
844 | */ | |
845 | int smp_call_function_any(const struct cpumask *mask, | |
3a5f65df | 846 | smp_call_func_t func, void *info, int wait) |
2ea6dec4 RR |
847 | { |
848 | unsigned int cpu; | |
849 | const struct cpumask *nodemask; | |
850 | int ret; | |
851 | ||
852 | /* Try for same CPU (cheapest) */ | |
853 | cpu = get_cpu(); | |
854 | if (cpumask_test_cpu(cpu, mask)) | |
855 | goto call; | |
856 | ||
857 | /* Try for same node. */ | |
af2422c4 | 858 | nodemask = cpumask_of_node(cpu_to_node(cpu)); |
2ea6dec4 RR |
859 | for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; |
860 | cpu = cpumask_next_and(cpu, nodemask, mask)) { | |
861 | if (cpu_online(cpu)) | |
862 | goto call; | |
863 | } | |
864 | ||
865 | /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ | |
866 | cpu = cpumask_any_and(mask, cpu_online_mask); | |
867 | call: | |
868 | ret = smp_call_function_single(cpu, func, info, wait); | |
869 | put_cpu(); | |
870 | return ret; | |
871 | } | |
872 | EXPORT_SYMBOL_GPL(smp_call_function_any); | |
873 | ||
a32a4d8a NA |
874 | /* |
875 | * Flags to be used as scf_flags argument of smp_call_function_many_cond(). | |
876 | * | |
877 | * %SCF_WAIT: Wait until function execution is completed | |
878 | * %SCF_RUN_LOCAL: Run also locally if local cpu is set in cpumask | |
879 | */ | |
880 | #define SCF_WAIT (1U << 0) | |
881 | #define SCF_RUN_LOCAL (1U << 1) | |
882 | ||
67719ef2 SAS |
883 | static void smp_call_function_many_cond(const struct cpumask *mask, |
884 | smp_call_func_t func, void *info, | |
a32a4d8a NA |
885 | unsigned int scf_flags, |
886 | smp_cond_func_t cond_func) | |
3d442233 | 887 | { |
a32a4d8a | 888 | int cpu, last_cpu, this_cpu = smp_processor_id(); |
e1d12f32 | 889 | struct call_function_data *cfd; |
a32a4d8a NA |
890 | bool wait = scf_flags & SCF_WAIT; |
891 | bool run_remote = false; | |
892 | bool run_local = false; | |
893 | int nr_cpus = 0; | |
894 | ||
895 | lockdep_assert_preemption_disabled(); | |
3d442233 | 896 | |
269c861b SS |
897 | /* |
898 | * Can deadlock when called with interrupts disabled. | |
899 | * We allow cpu's that are not yet online though, as no one else can | |
900 | * send smp call function interrupt to this cpu and as such deadlocks | |
901 | * can't happen. | |
902 | */ | |
a32a4d8a NA |
903 | if (cpu_online(this_cpu) && !oops_in_progress && |
904 | !early_boot_irqs_disabled) | |
905 | lockdep_assert_irqs_enabled(); | |
3d442233 | 906 | |
19dbdcb8 PZ |
907 | /* |
908 | * When @wait we can deadlock when we interrupt between llist_add() and | |
909 | * arch_send_call_function_ipi*(); when !@wait we can deadlock due to | |
910 | * csd_lock() on because the interrupt context uses the same csd | |
911 | * storage. | |
912 | */ | |
913 | WARN_ON_ONCE(!in_task()); | |
914 | ||
a32a4d8a NA |
915 | /* Check if we need local execution. */ |
916 | if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask)) | |
917 | run_local = true; | |
918 | ||
919 | /* Check if we need remote execution, i.e., any CPU excluding this one. */ | |
54b11e6d | 920 | cpu = cpumask_first_and(mask, cpu_online_mask); |
0b13fda1 | 921 | if (cpu == this_cpu) |
54b11e6d | 922 | cpu = cpumask_next_and(cpu, mask, cpu_online_mask); |
a32a4d8a NA |
923 | if (cpu < nr_cpu_ids) |
924 | run_remote = true; | |
0b13fda1 | 925 | |
a32a4d8a NA |
926 | if (run_remote) { |
927 | cfd = this_cpu_ptr(&cfd_data); | |
928 | cpumask_and(cfd->cpumask, mask, cpu_online_mask); | |
929 | __cpumask_clear_cpu(this_cpu, cfd->cpumask); | |
45a57919 | 930 | |
a32a4d8a NA |
931 | cpumask_clear(cfd->cpumask_ipi); |
932 | for_each_cpu(cpu, cfd->cpumask) { | |
a500fc91 IM |
933 | struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); |
934 | call_single_data_t *csd = &pcpu->csd; | |
9a46ad6d | 935 | |
a32a4d8a NA |
936 | if (cond_func && !cond_func(cpu, info)) |
937 | continue; | |
67719ef2 | 938 | |
a32a4d8a NA |
939 | csd_lock(csd); |
940 | if (wait) | |
941 | csd->node.u_flags |= CSD_TYPE_SYNC; | |
942 | csd->func = func; | |
943 | csd->info = info; | |
35feb604 | 944 | #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG |
a32a4d8a NA |
945 | csd->node.src = smp_processor_id(); |
946 | csd->node.dst = cpu; | |
e48c15b7 | 947 | #endif |
a500fc91 | 948 | cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE); |
a32a4d8a NA |
949 | if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) { |
950 | __cpumask_set_cpu(cpu, cfd->cpumask_ipi); | |
951 | nr_cpus++; | |
952 | last_cpu = cpu; | |
723aae25 | 953 | |
a500fc91 IM |
954 | cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI); |
955 | } else { | |
956 | cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI); | |
a32a4d8a NA |
957 | } |
958 | } | |
959 | ||
a500fc91 | 960 | cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING); |
9a46ad6d | 961 | |
a32a4d8a NA |
962 | /* |
963 | * Choose the most efficient way to send an IPI. Note that the | |
964 | * number of CPUs might be zero due to concurrent changes to the | |
965 | * provided mask. | |
966 | */ | |
967 | if (nr_cpus == 1) | |
d43f17a1 | 968 | send_call_function_single_ipi(last_cpu); |
a32a4d8a NA |
969 | else if (likely(nr_cpus > 1)) |
970 | arch_send_call_function_ipi_mask(cfd->cpumask_ipi); | |
67719ef2 | 971 | |
a500fc91 | 972 | cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED); |
9a46ad6d | 973 | } |
561920a0 | 974 | |
a32a4d8a NA |
975 | if (run_local && (!cond_func || cond_func(this_cpu, info))) { |
976 | unsigned long flags; | |
977 | ||
978 | local_irq_save(flags); | |
979 | func(info); | |
980 | local_irq_restore(flags); | |
981 | } | |
3d442233 | 982 | |
a32a4d8a | 983 | if (run_remote && wait) { |
e1d12f32 | 984 | for_each_cpu(cpu, cfd->cpumask) { |
966a9671 | 985 | call_single_data_t *csd; |
e1d12f32 | 986 | |
de7b09ef | 987 | csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd; |
9a46ad6d SL |
988 | csd_lock_wait(csd); |
989 | } | |
990 | } | |
3d442233 | 991 | } |
67719ef2 SAS |
992 | |
993 | /** | |
a32a4d8a | 994 | * smp_call_function_many(): Run a function on a set of CPUs. |
67719ef2 SAS |
995 | * @mask: The set of cpus to run on (only runs on online subset). |
996 | * @func: The function to run. This must be fast and non-blocking. | |
997 | * @info: An arbitrary pointer to pass to the function. | |
49b3bd21 | 998 | * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait |
a32a4d8a NA |
999 | * (atomically) until function has completed on other CPUs. If |
1000 | * %SCF_RUN_LOCAL is set, the function will also be run locally | |
1001 | * if the local CPU is set in the @cpumask. | |
67719ef2 SAS |
1002 | * |
1003 | * If @wait is true, then returns once @func has returned. | |
1004 | * | |
1005 | * You must not call this function with disabled interrupts or from a | |
1006 | * hardware interrupt handler or from a bottom half handler. Preemption | |
1007 | * must be disabled when calling this function. | |
1008 | */ | |
1009 | void smp_call_function_many(const struct cpumask *mask, | |
1010 | smp_call_func_t func, void *info, bool wait) | |
1011 | { | |
a32a4d8a | 1012 | smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL); |
67719ef2 | 1013 | } |
54b11e6d | 1014 | EXPORT_SYMBOL(smp_call_function_many); |
3d442233 JA |
1015 | |
1016 | /** | |
1017 | * smp_call_function(): Run a function on all other CPUs. | |
1018 | * @func: The function to run. This must be fast and non-blocking. | |
1019 | * @info: An arbitrary pointer to pass to the function. | |
0b13fda1 IM |
1020 | * @wait: If true, wait (atomically) until function has completed |
1021 | * on other CPUs. | |
3d442233 | 1022 | * |
54b11e6d | 1023 | * Returns 0. |
3d442233 JA |
1024 | * |
1025 | * If @wait is true, then returns once @func has returned; otherwise | |
72f279b2 | 1026 | * it returns just before the target cpu calls @func. |
3d442233 JA |
1027 | * |
1028 | * You must not call this function with disabled interrupts or from a | |
1029 | * hardware interrupt handler or from a bottom half handler. | |
1030 | */ | |
caa75932 | 1031 | void smp_call_function(smp_call_func_t func, void *info, int wait) |
3d442233 | 1032 | { |
3d442233 | 1033 | preempt_disable(); |
54b11e6d | 1034 | smp_call_function_many(cpu_online_mask, func, info, wait); |
3d442233 | 1035 | preempt_enable(); |
3d442233 JA |
1036 | } |
1037 | EXPORT_SYMBOL(smp_call_function); | |
351f8f8e | 1038 | |
34db18a0 AW |
1039 | /* Setup configured maximum number of CPUs to activate */ |
1040 | unsigned int setup_max_cpus = NR_CPUS; | |
1041 | EXPORT_SYMBOL(setup_max_cpus); | |
1042 | ||
1043 | ||
1044 | /* | |
1045 | * Setup routine for controlling SMP activation | |
1046 | * | |
1047 | * Command-line option of "nosmp" or "maxcpus=0" will disable SMP | |
1048 | * activation entirely (the MPS table probe still happens, though). | |
1049 | * | |
1050 | * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer | |
1051 | * greater than 0, limits the maximum number of CPUs activated in | |
1052 | * SMP mode to <NUM>. | |
1053 | */ | |
1054 | ||
1055 | void __weak arch_disable_smp_support(void) { } | |
1056 | ||
1057 | static int __init nosmp(char *str) | |
1058 | { | |
1059 | setup_max_cpus = 0; | |
1060 | arch_disable_smp_support(); | |
1061 | ||
1062 | return 0; | |
1063 | } | |
1064 | ||
1065 | early_param("nosmp", nosmp); | |
1066 | ||
1067 | /* this is hard limit */ | |
1068 | static int __init nrcpus(char *str) | |
1069 | { | |
1070 | int nr_cpus; | |
1071 | ||
58934356 | 1072 | if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids) |
34db18a0 AW |
1073 | nr_cpu_ids = nr_cpus; |
1074 | ||
1075 | return 0; | |
1076 | } | |
1077 | ||
1078 | early_param("nr_cpus", nrcpus); | |
1079 | ||
1080 | static int __init maxcpus(char *str) | |
1081 | { | |
1082 | get_option(&str, &setup_max_cpus); | |
1083 | if (setup_max_cpus == 0) | |
1084 | arch_disable_smp_support(); | |
1085 | ||
1086 | return 0; | |
1087 | } | |
1088 | ||
1089 | early_param("maxcpus", maxcpus); | |
1090 | ||
1091 | /* Setup number of possible processor ids */ | |
9b130ad5 | 1092 | unsigned int nr_cpu_ids __read_mostly = NR_CPUS; |
34db18a0 AW |
1093 | EXPORT_SYMBOL(nr_cpu_ids); |
1094 | ||
1095 | /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ | |
1096 | void __init setup_nr_cpu_ids(void) | |
1097 | { | |
1098 | nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; | |
1099 | } | |
1100 | ||
1101 | /* Called by boot processor to activate the rest. */ | |
1102 | void __init smp_init(void) | |
1103 | { | |
92b23278 | 1104 | int num_nodes, num_cpus; |
34db18a0 | 1105 | |
3bb5d2ee | 1106 | idle_threads_init(); |
4cb28ced | 1107 | cpuhp_threads_init(); |
3bb5d2ee | 1108 | |
51111dce ME |
1109 | pr_info("Bringing up secondary CPUs ...\n"); |
1110 | ||
b99a2659 | 1111 | bringup_nonboot_cpus(setup_max_cpus); |
34db18a0 | 1112 | |
92b23278 ME |
1113 | num_nodes = num_online_nodes(); |
1114 | num_cpus = num_online_cpus(); | |
1115 | pr_info("Brought up %d node%s, %d CPU%s\n", | |
1116 | num_nodes, (num_nodes > 1 ? "s" : ""), | |
1117 | num_cpus, (num_cpus > 1 ? "s" : "")); | |
1118 | ||
34db18a0 | 1119 | /* Any cleanup work */ |
34db18a0 AW |
1120 | smp_cpus_done(setup_max_cpus); |
1121 | } | |
1122 | ||
b3a7e98e GBY |
1123 | /* |
1124 | * on_each_cpu_cond(): Call a function on each processor for which | |
1125 | * the supplied function cond_func returns true, optionally waiting | |
1126 | * for all the required CPUs to finish. This may include the local | |
1127 | * processor. | |
1128 | * @cond_func: A callback function that is passed a cpu id and | |
7b7b8a2c | 1129 | * the info parameter. The function is called |
b3a7e98e GBY |
1130 | * with preemption disabled. The function should |
1131 | * return a blooean value indicating whether to IPI | |
1132 | * the specified CPU. | |
1133 | * @func: The function to run on all applicable CPUs. | |
1134 | * This must be fast and non-blocking. | |
1135 | * @info: An arbitrary pointer to pass to both functions. | |
1136 | * @wait: If true, wait (atomically) until function has | |
1137 | * completed on other CPUs. | |
b3a7e98e GBY |
1138 | * |
1139 | * Preemption is disabled to protect against CPUs going offline but not online. | |
1140 | * CPUs going online during the call will not be seen or sent an IPI. | |
1141 | * | |
1142 | * You must not call this function with disabled interrupts or | |
1143 | * from a hardware interrupt handler or from a bottom half handler. | |
1144 | */ | |
5671d814 | 1145 | void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, |
cb923159 | 1146 | void *info, bool wait, const struct cpumask *mask) |
b3a7e98e | 1147 | { |
a32a4d8a | 1148 | unsigned int scf_flags = SCF_RUN_LOCAL; |
67719ef2 | 1149 | |
a32a4d8a NA |
1150 | if (wait) |
1151 | scf_flags |= SCF_WAIT; | |
67719ef2 | 1152 | |
a32a4d8a NA |
1153 | preempt_disable(); |
1154 | smp_call_function_many_cond(mask, func, info, scf_flags, cond_func); | |
1155 | preempt_enable(); | |
b3a7e98e | 1156 | } |
7d49b28a RR |
1157 | EXPORT_SYMBOL(on_each_cpu_cond_mask); |
1158 | ||
f37f435f TG |
1159 | static void do_nothing(void *unused) |
1160 | { | |
1161 | } | |
1162 | ||
1163 | /** | |
1164 | * kick_all_cpus_sync - Force all cpus out of idle | |
1165 | * | |
1166 | * Used to synchronize the update of pm_idle function pointer. It's | |
1167 | * called after the pointer is updated and returns after the dummy | |
1168 | * callback function has been executed on all cpus. The execution of | |
1169 | * the function can only happen on the remote cpus after they have | |
1170 | * left the idle function which had been called via pm_idle function | |
1171 | * pointer. So it's guaranteed that nothing uses the previous pointer | |
1172 | * anymore. | |
1173 | */ | |
1174 | void kick_all_cpus_sync(void) | |
1175 | { | |
1176 | /* Make sure the change is visible before we kick the cpus */ | |
1177 | smp_mb(); | |
1178 | smp_call_function(do_nothing, NULL, 1); | |
1179 | } | |
1180 | EXPORT_SYMBOL_GPL(kick_all_cpus_sync); | |
c6f4459f CL |
1181 | |
1182 | /** | |
1183 | * wake_up_all_idle_cpus - break all cpus out of idle | |
1184 | * wake_up_all_idle_cpus try to break all cpus which is in idle state even | |
1185 | * including idle polling cpus, for non-idle cpus, we will do nothing | |
1186 | * for them. | |
1187 | */ | |
1188 | void wake_up_all_idle_cpus(void) | |
1189 | { | |
1190 | int cpu; | |
1191 | ||
96611c26 PZ |
1192 | for_each_possible_cpu(cpu) { |
1193 | preempt_disable(); | |
1194 | if (cpu != smp_processor_id() && cpu_online(cpu)) | |
1195 | wake_up_if_idle(cpu); | |
1196 | preempt_enable(); | |
c6f4459f | 1197 | } |
c6f4459f CL |
1198 | } |
1199 | EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); | |
df8ce9d7 JG |
1200 | |
1201 | /** | |
49b3bd21 RD |
1202 | * struct smp_call_on_cpu_struct - Call a function on a specific CPU |
1203 | * @work: &work_struct | |
1204 | * @done: &completion to signal | |
1205 | * @func: function to call | |
1206 | * @data: function's data argument | |
1207 | * @ret: return value from @func | |
1208 | * @cpu: target CPU (%-1 for any CPU) | |
df8ce9d7 JG |
1209 | * |
1210 | * Used to call a function on a specific cpu and wait for it to return. | |
1211 | * Optionally make sure the call is done on a specified physical cpu via vcpu | |
1212 | * pinning in order to support virtualized environments. | |
1213 | */ | |
1214 | struct smp_call_on_cpu_struct { | |
1215 | struct work_struct work; | |
1216 | struct completion done; | |
1217 | int (*func)(void *); | |
1218 | void *data; | |
1219 | int ret; | |
1220 | int cpu; | |
1221 | }; | |
1222 | ||
1223 | static void smp_call_on_cpu_callback(struct work_struct *work) | |
1224 | { | |
1225 | struct smp_call_on_cpu_struct *sscs; | |
1226 | ||
1227 | sscs = container_of(work, struct smp_call_on_cpu_struct, work); | |
1228 | if (sscs->cpu >= 0) | |
1229 | hypervisor_pin_vcpu(sscs->cpu); | |
1230 | sscs->ret = sscs->func(sscs->data); | |
1231 | if (sscs->cpu >= 0) | |
1232 | hypervisor_pin_vcpu(-1); | |
1233 | ||
1234 | complete(&sscs->done); | |
1235 | } | |
1236 | ||
1237 | int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) | |
1238 | { | |
1239 | struct smp_call_on_cpu_struct sscs = { | |
df8ce9d7 JG |
1240 | .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), |
1241 | .func = func, | |
1242 | .data = par, | |
1243 | .cpu = phys ? cpu : -1, | |
1244 | }; | |
1245 | ||
8db54949 PZ |
1246 | INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); |
1247 | ||
df8ce9d7 JG |
1248 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) |
1249 | return -ENXIO; | |
1250 | ||
1251 | queue_work_on(cpu, system_wq, &sscs.work); | |
1252 | wait_for_completion(&sscs.done); | |
1253 | ||
1254 | return sscs.ret; | |
1255 | } | |
1256 | EXPORT_SYMBOL_GPL(smp_call_on_cpu); |