rtc: hctosys: use function name in the error log
[linux-2.6-block.git] / kernel / smp.c
CommitLineData
3d442233
JA
1/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
3d442233 5 */
47885016 6#include <linux/irq_work.h>
3d442233 7#include <linux/rcupdate.h>
59190f42 8#include <linux/rculist.h>
641cd4cf 9#include <linux/kernel.h>
9984de1a 10#include <linux/export.h>
0b13fda1
IM
11#include <linux/percpu.h>
12#include <linux/init.h>
5a0e3ad6 13#include <linux/gfp.h>
3d442233 14#include <linux/smp.h>
8969a5ed 15#include <linux/cpu.h>
c6f4459f 16#include <linux/sched.h>
3d442233 17
3bb5d2ee
SS
18#include "smpboot.h"
19
3d442233 20enum {
6e275637 21 CSD_FLAG_LOCK = 0x01,
c84a83e2 22 CSD_FLAG_WAIT = 0x02,
3d442233
JA
23};
24
25struct call_function_data {
9a46ad6d 26 struct call_single_data __percpu *csd;
0b13fda1 27 cpumask_var_t cpumask;
3d442233
JA
28};
29
e03bcb68
MM
30static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
31
6897fc22 32static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
8969a5ed 33
8d056c48
SB
34static void flush_smp_call_function_queue(bool warn_cpu_offline);
35
8969a5ed
PZ
36static int
37hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
38{
39 long cpu = (long)hcpu;
40 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
41
42 switch (action) {
43 case CPU_UP_PREPARE:
44 case CPU_UP_PREPARE_FROZEN:
eaa95840 45 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
8969a5ed 46 cpu_to_node(cpu)))
80b5184c 47 return notifier_from_errno(-ENOMEM);
9a46ad6d
SL
48 cfd->csd = alloc_percpu(struct call_single_data);
49 if (!cfd->csd) {
50 free_cpumask_var(cfd->cpumask);
51 return notifier_from_errno(-ENOMEM);
52 }
8969a5ed
PZ
53 break;
54
69dd647f 55#ifdef CONFIG_HOTPLUG_CPU
8969a5ed
PZ
56 case CPU_UP_CANCELED:
57 case CPU_UP_CANCELED_FROZEN:
8d056c48 58 /* Fall-through to the CPU_DEAD[_FROZEN] case. */
8969a5ed
PZ
59
60 case CPU_DEAD:
61 case CPU_DEAD_FROZEN:
62 free_cpumask_var(cfd->cpumask);
9a46ad6d 63 free_percpu(cfd->csd);
8969a5ed 64 break;
8d056c48
SB
65
66 case CPU_DYING:
67 case CPU_DYING_FROZEN:
68 /*
69 * The IPIs for the smp-call-function callbacks queued by other
70 * CPUs might arrive late, either due to hardware latencies or
71 * because this CPU disabled interrupts (inside stop-machine)
72 * before the IPIs were sent. So flush out any pending callbacks
73 * explicitly (without waiting for the IPIs to arrive), to
74 * ensure that the outgoing CPU doesn't go offline with work
75 * still pending.
76 */
77 flush_smp_call_function_queue(false);
78 break;
8969a5ed
PZ
79#endif
80 };
81
82 return NOTIFY_OK;
83}
84
0db0628d 85static struct notifier_block hotplug_cfd_notifier = {
0b13fda1 86 .notifier_call = hotplug_cfd,
8969a5ed
PZ
87};
88
d8ad7d11 89void __init call_function_init(void)
3d442233 90{
8969a5ed 91 void *cpu = (void *)(long)smp_processor_id();
3d442233
JA
92 int i;
93
6897fc22
CH
94 for_each_possible_cpu(i)
95 init_llist_head(&per_cpu(call_single_queue, i));
8969a5ed
PZ
96
97 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
98 register_cpu_notifier(&hotplug_cfd_notifier);
3d442233
JA
99}
100
8969a5ed
PZ
101/*
102 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
103 *
0b13fda1
IM
104 * For non-synchronous ipi calls the csd can still be in use by the
105 * previous function call. For multi-cpu calls its even more interesting
106 * as we'll have to ensure no other cpu is observing our csd.
8969a5ed 107 */
e1d12f32 108static void csd_lock_wait(struct call_single_data *csd)
8969a5ed 109{
e1d12f32 110 while (csd->flags & CSD_FLAG_LOCK)
8969a5ed 111 cpu_relax();
6e275637
PZ
112}
113
e1d12f32 114static void csd_lock(struct call_single_data *csd)
6e275637 115{
e1d12f32
AM
116 csd_lock_wait(csd);
117 csd->flags |= CSD_FLAG_LOCK;
8969a5ed
PZ
118
119 /*
0b13fda1
IM
120 * prevent CPU from reordering the above assignment
121 * to ->flags with any subsequent assignments to other
122 * fields of the specified call_single_data structure:
8969a5ed 123 */
8969a5ed
PZ
124 smp_mb();
125}
126
e1d12f32 127static void csd_unlock(struct call_single_data *csd)
8969a5ed 128{
c84a83e2 129 WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
0b13fda1 130
8969a5ed 131 /*
0b13fda1 132 * ensure we're all done before releasing data:
8969a5ed
PZ
133 */
134 smp_mb();
0b13fda1 135
e1d12f32 136 csd->flags &= ~CSD_FLAG_LOCK;
3d442233
JA
137}
138
8b28499a
FW
139static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
140
3d442233 141/*
0b13fda1
IM
142 * Insert a previously allocated call_single_data element
143 * for execution on the given CPU. data must already have
144 * ->func, ->info, and ->flags set.
3d442233 145 */
8b28499a
FW
146static int generic_exec_single(int cpu, struct call_single_data *csd,
147 smp_call_func_t func, void *info, int wait)
3d442233 148{
8b28499a
FW
149 struct call_single_data csd_stack = { .flags = 0 };
150 unsigned long flags;
151
152
153 if (cpu == smp_processor_id()) {
154 local_irq_save(flags);
155 func(info);
156 local_irq_restore(flags);
157 return 0;
158 }
159
160
161 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
162 return -ENXIO;
163
164
165 if (!csd) {
166 csd = &csd_stack;
167 if (!wait)
bb964a92 168 csd = this_cpu_ptr(&csd_data);
8b28499a
FW
169 }
170
171 csd_lock(csd);
172
173 csd->func = func;
174 csd->info = info;
175
c84a83e2
JA
176 if (wait)
177 csd->flags |= CSD_FLAG_WAIT;
178
561920a0 179 /*
15d0d3b3
NP
180 * The list addition should be visible before sending the IPI
181 * handler locks the list to pull the entry off it because of
182 * normal cache coherency rules implied by spinlocks.
183 *
184 * If IPIs can go out of order to the cache coherency protocol
185 * in an architecture, sufficient synchronisation should be added
186 * to arch code to make it appear to obey cache coherency WRT
0b13fda1
IM
187 * locking and barrier primitives. Generic code isn't really
188 * equipped to do the right thing...
561920a0 189 */
6897fc22 190 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
3d442233
JA
191 arch_send_call_function_single_ipi(cpu);
192
193 if (wait)
e1d12f32 194 csd_lock_wait(csd);
8b28499a
FW
195
196 return 0;
3d442233
JA
197}
198
8d056c48
SB
199/**
200 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
201 *
202 * Invoked by arch to handle an IPI for call function single.
203 * Must be called with interrupts disabled.
3d442233
JA
204 */
205void generic_smp_call_function_single_interrupt(void)
206{
8d056c48
SB
207 flush_smp_call_function_queue(true);
208}
209
210/**
211 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
212 *
213 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
214 * offline CPU. Skip this check if set to 'false'.
215 *
216 * Flush any pending smp-call-function callbacks queued on this CPU. This is
217 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
218 * to ensure that all pending IPI callbacks are run before it goes completely
219 * offline.
220 *
221 * Loop through the call_single_queue and run all the queued callbacks.
222 * Must be called with interrupts disabled.
223 */
224static void flush_smp_call_function_queue(bool warn_cpu_offline)
225{
226 struct llist_head *head;
5fd77595
JK
227 struct llist_node *entry;
228 struct call_single_data *csd, *csd_next;
a219ccf4
SB
229 static bool warned;
230
8d056c48
SB
231 WARN_ON(!irqs_disabled());
232
bb964a92 233 head = this_cpu_ptr(&call_single_queue);
8d056c48 234 entry = llist_del_all(head);
a219ccf4 235 entry = llist_reverse_order(entry);
3d442233 236
8d056c48
SB
237 /* There shouldn't be any pending callbacks on an offline CPU. */
238 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
239 !warned && !llist_empty(head))) {
a219ccf4
SB
240 warned = true;
241 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
242
243 /*
244 * We don't have to use the _safe() variant here
245 * because we are not invoking the IPI handlers yet.
246 */
247 llist_for_each_entry(csd, entry, llist)
248 pr_warn("IPI callback %pS sent to offline CPU\n",
249 csd->func);
250 }
3d442233 251
5fd77595 252 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
e1d12f32 253 csd->func(csd->info);
46591962 254 csd_unlock(csd);
3d442233 255 }
47885016
FW
256
257 /*
258 * Handle irq works queued remotely by irq_work_queue_on().
259 * Smp functions above are typically synchronous so they
260 * better run first since some other CPUs may be busy waiting
261 * for them.
262 */
263 irq_work_run();
3d442233
JA
264}
265
266/*
267 * smp_call_function_single - Run a function on a specific CPU
268 * @func: The function to run. This must be fast and non-blocking.
269 * @info: An arbitrary pointer to pass to the function.
3d442233
JA
270 * @wait: If true, wait until function has completed on other CPUs.
271 *
72f279b2 272 * Returns 0 on success, else a negative status code.
3d442233 273 */
3a5f65df 274int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
8691e5a8 275 int wait)
3d442233 276{
0b13fda1 277 int this_cpu;
8b28499a 278 int err;
3d442233 279
0b13fda1
IM
280 /*
281 * prevent preemption and reschedule on another processor,
282 * as well as CPU removal
283 */
284 this_cpu = get_cpu();
285
269c861b
SS
286 /*
287 * Can deadlock when called with interrupts disabled.
288 * We allow cpu's that are not yet online though, as no one else can
289 * send smp call function interrupt to this cpu and as such deadlocks
290 * can't happen.
291 */
292 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
293 && !oops_in_progress);
3d442233 294
8b28499a 295 err = generic_exec_single(cpu, NULL, func, info, wait);
3d442233
JA
296
297 put_cpu();
0b13fda1 298
f73be6de 299 return err;
3d442233
JA
300}
301EXPORT_SYMBOL(smp_call_function_single);
302
d7877c03 303/**
c46fff2a
FW
304 * smp_call_function_single_async(): Run an asynchronous function on a
305 * specific CPU.
d7877c03
FW
306 * @cpu: The CPU to run on.
307 * @csd: Pre-allocated and setup data structure
d7877c03 308 *
c46fff2a
FW
309 * Like smp_call_function_single(), but the call is asynchonous and
310 * can thus be done from contexts with disabled interrupts.
311 *
312 * The caller passes his own pre-allocated data structure
313 * (ie: embedded in an object) and is responsible for synchronizing it
314 * such that the IPIs performed on the @csd are strictly serialized.
315 *
316 * NOTE: Be careful, there is unfortunately no current debugging facility to
317 * validate the correctness of this serialization.
d7877c03 318 */
c46fff2a 319int smp_call_function_single_async(int cpu, struct call_single_data *csd)
d7877c03
FW
320{
321 int err = 0;
d7877c03 322
fce8ad15
FW
323 preempt_disable();
324 err = generic_exec_single(cpu, csd, csd->func, csd->info, 0);
325 preempt_enable();
d7877c03
FW
326
327 return err;
328}
c46fff2a 329EXPORT_SYMBOL_GPL(smp_call_function_single_async);
d7877c03 330
2ea6dec4
RR
331/*
332 * smp_call_function_any - Run a function on any of the given cpus
333 * @mask: The mask of cpus it can run on.
334 * @func: The function to run. This must be fast and non-blocking.
335 * @info: An arbitrary pointer to pass to the function.
336 * @wait: If true, wait until function has completed.
337 *
338 * Returns 0 on success, else a negative status code (if no cpus were online).
2ea6dec4
RR
339 *
340 * Selection preference:
341 * 1) current cpu if in @mask
342 * 2) any cpu of current node if in @mask
343 * 3) any other online cpu in @mask
344 */
345int smp_call_function_any(const struct cpumask *mask,
3a5f65df 346 smp_call_func_t func, void *info, int wait)
2ea6dec4
RR
347{
348 unsigned int cpu;
349 const struct cpumask *nodemask;
350 int ret;
351
352 /* Try for same CPU (cheapest) */
353 cpu = get_cpu();
354 if (cpumask_test_cpu(cpu, mask))
355 goto call;
356
357 /* Try for same node. */
af2422c4 358 nodemask = cpumask_of_node(cpu_to_node(cpu));
2ea6dec4
RR
359 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
360 cpu = cpumask_next_and(cpu, nodemask, mask)) {
361 if (cpu_online(cpu))
362 goto call;
363 }
364
365 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
366 cpu = cpumask_any_and(mask, cpu_online_mask);
367call:
368 ret = smp_call_function_single(cpu, func, info, wait);
369 put_cpu();
370 return ret;
371}
372EXPORT_SYMBOL_GPL(smp_call_function_any);
373
3d442233 374/**
54b11e6d
RR
375 * smp_call_function_many(): Run a function on a set of other CPUs.
376 * @mask: The set of cpus to run on (only runs on online subset).
3d442233
JA
377 * @func: The function to run. This must be fast and non-blocking.
378 * @info: An arbitrary pointer to pass to the function.
0b13fda1
IM
379 * @wait: If true, wait (atomically) until function has completed
380 * on other CPUs.
3d442233 381 *
72f279b2 382 * If @wait is true, then returns once @func has returned.
3d442233
JA
383 *
384 * You must not call this function with disabled interrupts or from a
385 * hardware interrupt handler or from a bottom half handler. Preemption
386 * must be disabled when calling this function.
387 */
54b11e6d 388void smp_call_function_many(const struct cpumask *mask,
3a5f65df 389 smp_call_func_t func, void *info, bool wait)
3d442233 390{
e1d12f32 391 struct call_function_data *cfd;
9a46ad6d 392 int cpu, next_cpu, this_cpu = smp_processor_id();
3d442233 393
269c861b
SS
394 /*
395 * Can deadlock when called with interrupts disabled.
396 * We allow cpu's that are not yet online though, as no one else can
397 * send smp call function interrupt to this cpu and as such deadlocks
398 * can't happen.
399 */
400 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
bd924e8c 401 && !oops_in_progress && !early_boot_irqs_disabled);
3d442233 402
723aae25 403 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
54b11e6d 404 cpu = cpumask_first_and(mask, cpu_online_mask);
0b13fda1 405 if (cpu == this_cpu)
54b11e6d 406 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
0b13fda1 407
54b11e6d
RR
408 /* No online cpus? We're done. */
409 if (cpu >= nr_cpu_ids)
410 return;
411
412 /* Do we have another CPU which isn't us? */
413 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
0b13fda1 414 if (next_cpu == this_cpu)
54b11e6d
RR
415 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
416
417 /* Fastpath: do that cpu by itself. */
418 if (next_cpu >= nr_cpu_ids) {
419 smp_call_function_single(cpu, func, info, wait);
420 return;
3d442233
JA
421 }
422
bb964a92 423 cfd = this_cpu_ptr(&cfd_data);
45a57919 424
e1d12f32
AM
425 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
426 cpumask_clear_cpu(this_cpu, cfd->cpumask);
723aae25
MM
427
428 /* Some callers race with other cpus changing the passed mask */
e1d12f32 429 if (unlikely(!cpumask_weight(cfd->cpumask)))
723aae25 430 return;
3d442233 431
e1d12f32
AM
432 for_each_cpu(cpu, cfd->cpumask) {
433 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
9a46ad6d
SL
434
435 csd_lock(csd);
436 csd->func = func;
437 csd->info = info;
6897fc22 438 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
9a46ad6d 439 }
561920a0 440
3d442233 441 /* Send a message to all CPUs in the map */
73f94550 442 arch_send_call_function_ipi_mask(cfd->cpumask);
3d442233 443
9a46ad6d 444 if (wait) {
e1d12f32
AM
445 for_each_cpu(cpu, cfd->cpumask) {
446 struct call_single_data *csd;
447
448 csd = per_cpu_ptr(cfd->csd, cpu);
9a46ad6d
SL
449 csd_lock_wait(csd);
450 }
451 }
3d442233 452}
54b11e6d 453EXPORT_SYMBOL(smp_call_function_many);
3d442233
JA
454
455/**
456 * smp_call_function(): Run a function on all other CPUs.
457 * @func: The function to run. This must be fast and non-blocking.
458 * @info: An arbitrary pointer to pass to the function.
0b13fda1
IM
459 * @wait: If true, wait (atomically) until function has completed
460 * on other CPUs.
3d442233 461 *
54b11e6d 462 * Returns 0.
3d442233
JA
463 *
464 * If @wait is true, then returns once @func has returned; otherwise
72f279b2 465 * it returns just before the target cpu calls @func.
3d442233
JA
466 *
467 * You must not call this function with disabled interrupts or from a
468 * hardware interrupt handler or from a bottom half handler.
469 */
3a5f65df 470int smp_call_function(smp_call_func_t func, void *info, int wait)
3d442233 471{
3d442233 472 preempt_disable();
54b11e6d 473 smp_call_function_many(cpu_online_mask, func, info, wait);
3d442233 474 preempt_enable();
0b13fda1 475
54b11e6d 476 return 0;
3d442233
JA
477}
478EXPORT_SYMBOL(smp_call_function);
351f8f8e 479
34db18a0
AW
480/* Setup configured maximum number of CPUs to activate */
481unsigned int setup_max_cpus = NR_CPUS;
482EXPORT_SYMBOL(setup_max_cpus);
483
484
485/*
486 * Setup routine for controlling SMP activation
487 *
488 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
489 * activation entirely (the MPS table probe still happens, though).
490 *
491 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
492 * greater than 0, limits the maximum number of CPUs activated in
493 * SMP mode to <NUM>.
494 */
495
496void __weak arch_disable_smp_support(void) { }
497
498static int __init nosmp(char *str)
499{
500 setup_max_cpus = 0;
501 arch_disable_smp_support();
502
503 return 0;
504}
505
506early_param("nosmp", nosmp);
507
508/* this is hard limit */
509static int __init nrcpus(char *str)
510{
511 int nr_cpus;
512
513 get_option(&str, &nr_cpus);
514 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
515 nr_cpu_ids = nr_cpus;
516
517 return 0;
518}
519
520early_param("nr_cpus", nrcpus);
521
522static int __init maxcpus(char *str)
523{
524 get_option(&str, &setup_max_cpus);
525 if (setup_max_cpus == 0)
526 arch_disable_smp_support();
527
528 return 0;
529}
530
531early_param("maxcpus", maxcpus);
532
533/* Setup number of possible processor ids */
534int nr_cpu_ids __read_mostly = NR_CPUS;
535EXPORT_SYMBOL(nr_cpu_ids);
536
537/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
538void __init setup_nr_cpu_ids(void)
539{
540 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
541}
542
a17bce4d
BP
543void __weak smp_announce(void)
544{
545 printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
546}
547
34db18a0
AW
548/* Called by boot processor to activate the rest. */
549void __init smp_init(void)
550{
551 unsigned int cpu;
552
3bb5d2ee
SS
553 idle_threads_init();
554
34db18a0
AW
555 /* FIXME: This should be done in userspace --RR */
556 for_each_present_cpu(cpu) {
557 if (num_online_cpus() >= setup_max_cpus)
558 break;
559 if (!cpu_online(cpu))
560 cpu_up(cpu);
561 }
562
563 /* Any cleanup work */
a17bce4d 564 smp_announce();
34db18a0
AW
565 smp_cpus_done(setup_max_cpus);
566}
567
351f8f8e 568/*
bd924e8c
TH
569 * Call a function on all processors. May be used during early boot while
570 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
571 * of local_irq_disable/enable().
351f8f8e
AW
572 */
573int on_each_cpu(void (*func) (void *info), void *info, int wait)
574{
bd924e8c 575 unsigned long flags;
351f8f8e
AW
576 int ret = 0;
577
578 preempt_disable();
579 ret = smp_call_function(func, info, wait);
bd924e8c 580 local_irq_save(flags);
351f8f8e 581 func(info);
bd924e8c 582 local_irq_restore(flags);
351f8f8e
AW
583 preempt_enable();
584 return ret;
585}
586EXPORT_SYMBOL(on_each_cpu);
3fc498f1
GBY
587
588/**
589 * on_each_cpu_mask(): Run a function on processors specified by
590 * cpumask, which may include the local processor.
591 * @mask: The set of cpus to run on (only runs on online subset).
592 * @func: The function to run. This must be fast and non-blocking.
593 * @info: An arbitrary pointer to pass to the function.
594 * @wait: If true, wait (atomically) until function has completed
595 * on other CPUs.
596 *
597 * If @wait is true, then returns once @func has returned.
598 *
202da400
DD
599 * You must not call this function with disabled interrupts or from a
600 * hardware interrupt handler or from a bottom half handler. The
601 * exception is that it may be used during early boot while
602 * early_boot_irqs_disabled is set.
3fc498f1
GBY
603 */
604void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
605 void *info, bool wait)
606{
607 int cpu = get_cpu();
608
609 smp_call_function_many(mask, func, info, wait);
610 if (cpumask_test_cpu(cpu, mask)) {
202da400
DD
611 unsigned long flags;
612 local_irq_save(flags);
3fc498f1 613 func(info);
202da400 614 local_irq_restore(flags);
3fc498f1
GBY
615 }
616 put_cpu();
617}
618EXPORT_SYMBOL(on_each_cpu_mask);
b3a7e98e
GBY
619
620/*
621 * on_each_cpu_cond(): Call a function on each processor for which
622 * the supplied function cond_func returns true, optionally waiting
623 * for all the required CPUs to finish. This may include the local
624 * processor.
625 * @cond_func: A callback function that is passed a cpu id and
626 * the the info parameter. The function is called
627 * with preemption disabled. The function should
628 * return a blooean value indicating whether to IPI
629 * the specified CPU.
630 * @func: The function to run on all applicable CPUs.
631 * This must be fast and non-blocking.
632 * @info: An arbitrary pointer to pass to both functions.
633 * @wait: If true, wait (atomically) until function has
634 * completed on other CPUs.
635 * @gfp_flags: GFP flags to use when allocating the cpumask
636 * used internally by the function.
637 *
638 * The function might sleep if the GFP flags indicates a non
639 * atomic allocation is allowed.
640 *
641 * Preemption is disabled to protect against CPUs going offline but not online.
642 * CPUs going online during the call will not be seen or sent an IPI.
643 *
644 * You must not call this function with disabled interrupts or
645 * from a hardware interrupt handler or from a bottom half handler.
646 */
647void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
648 smp_call_func_t func, void *info, bool wait,
649 gfp_t gfp_flags)
650{
651 cpumask_var_t cpus;
652 int cpu, ret;
653
654 might_sleep_if(gfp_flags & __GFP_WAIT);
655
656 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
657 preempt_disable();
658 for_each_online_cpu(cpu)
659 if (cond_func(cpu, info))
660 cpumask_set_cpu(cpu, cpus);
661 on_each_cpu_mask(cpus, func, info, wait);
662 preempt_enable();
663 free_cpumask_var(cpus);
664 } else {
665 /*
666 * No free cpumask, bother. No matter, we'll
667 * just have to IPI them one by one.
668 */
669 preempt_disable();
670 for_each_online_cpu(cpu)
671 if (cond_func(cpu, info)) {
672 ret = smp_call_function_single(cpu, func,
673 info, wait);
618fde87 674 WARN_ON_ONCE(ret);
b3a7e98e
GBY
675 }
676 preempt_enable();
677 }
678}
679EXPORT_SYMBOL(on_each_cpu_cond);
f37f435f
TG
680
681static void do_nothing(void *unused)
682{
683}
684
685/**
686 * kick_all_cpus_sync - Force all cpus out of idle
687 *
688 * Used to synchronize the update of pm_idle function pointer. It's
689 * called after the pointer is updated and returns after the dummy
690 * callback function has been executed on all cpus. The execution of
691 * the function can only happen on the remote cpus after they have
692 * left the idle function which had been called via pm_idle function
693 * pointer. So it's guaranteed that nothing uses the previous pointer
694 * anymore.
695 */
696void kick_all_cpus_sync(void)
697{
698 /* Make sure the change is visible before we kick the cpus */
699 smp_mb();
700 smp_call_function(do_nothing, NULL, 1);
701}
702EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
c6f4459f
CL
703
704/**
705 * wake_up_all_idle_cpus - break all cpus out of idle
706 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
707 * including idle polling cpus, for non-idle cpus, we will do nothing
708 * for them.
709 */
710void wake_up_all_idle_cpus(void)
711{
712 int cpu;
713
714 preempt_disable();
715 for_each_online_cpu(cpu) {
716 if (cpu == smp_processor_id())
717 continue;
718
719 wake_up_if_idle(cpu);
720 }
721 preempt_enable();
722}
723EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);