soundwire: cadence: further simplify low-level xfer_msg_defer() callback
[linux-block.git] / kernel / stop_machine.c
CommitLineData
6ff3f917 1// SPDX-License-Identifier: GPL-2.0-or-later
1142d810
TH
2/*
3 * kernel/stop_machine.c
4 *
5 * Copyright (C) 2008, 2005 IBM Corporation.
6 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
7 * Copyright (C) 2010 SUSE Linux Products GmbH
8 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
e5582ca2 9 */
b1fc5833 10#include <linux/compiler.h>
1142d810 11#include <linux/completion.h>
1da177e4 12#include <linux/cpu.h>
1142d810 13#include <linux/init.h>
ee527cd3 14#include <linux/kthread.h>
9984de1a 15#include <linux/export.h>
1142d810 16#include <linux/percpu.h>
ee527cd3
PB
17#include <linux/sched.h>
18#include <linux/stop_machine.h>
a12bb444 19#include <linux/interrupt.h>
1142d810 20#include <linux/kallsyms.h>
14e568e7 21#include <linux/smpboot.h>
60063497 22#include <linux/atomic.h>
ce4f06dc 23#include <linux/nmi.h>
0b26351b 24#include <linux/sched/wake_q.h>
1142d810
TH
25
26/*
27 * Structure to determine completion condition and record errors. May
28 * be shared by works on different cpus.
29 */
30struct cpu_stop_done {
31 atomic_t nr_todo; /* nr left to execute */
1142d810
TH
32 int ret; /* collected return value */
33 struct completion completion; /* fired if nr_todo reaches 0 */
34};
35
36/* the actual stopper, one per every possible cpu, enabled on online cpus */
37struct cpu_stopper {
02cb7aa9
ON
38 struct task_struct *thread;
39
de5b55c1 40 raw_spinlock_t lock;
878ae127 41 bool enabled; /* is this stopper enabled? */
1142d810 42 struct list_head works; /* list of pending works */
02cb7aa9
ON
43
44 struct cpu_stop_work stop_work; /* for stop_cpus */
a8b62fd0
PZ
45 unsigned long caller;
46 cpu_stop_fn_t fn;
1142d810
TH
47};
48
49static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
f445027e 50static bool stop_machine_initialized = false;
1142d810 51
a8b62fd0
PZ
52void print_stop_info(const char *log_lvl, struct task_struct *task)
53{
54 /*
55 * If @task is a stopper task, it cannot migrate and task_cpu() is
56 * stable.
57 */
58 struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task));
59
60 if (task != stopper->thread)
61 return;
62
63 printk("%sStopper: %pS <- %pS\n", log_lvl, stopper->fn, (void *)stopper->caller);
64}
65
e6253970
ON
66/* static data for stop_cpus */
67static DEFINE_MUTEX(stop_cpus_mutex);
68static bool stop_cpus_in_progress;
7053ea1a 69
1142d810
TH
70static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
71{
72 memset(done, 0, sizeof(*done));
73 atomic_set(&done->nr_todo, nr_todo);
74 init_completion(&done->completion);
75}
76
77/* signal completion unless @done is NULL */
6fa3b826 78static void cpu_stop_signal_done(struct cpu_stop_done *done)
1142d810 79{
dd2e3121
ON
80 if (atomic_dec_and_test(&done->nr_todo))
81 complete(&done->completion);
1142d810
TH
82}
83
5caa1c08 84static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
0b26351b
PZ
85 struct cpu_stop_work *work,
86 struct wake_q_head *wakeq)
5caa1c08
ON
87{
88 list_add_tail(&work->list, &stopper->works);
0b26351b 89 wake_q_add(wakeq, stopper->thread);
5caa1c08
ON
90}
91
1142d810 92/* queue @work to @stopper. if offline, @work is completed immediately */
1b034bd9 93static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
1142d810 94{
860a0ffa 95 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
0b26351b 96 DEFINE_WAKE_Q(wakeq);
1142d810 97 unsigned long flags;
1b034bd9 98 bool enabled;
1142d810 99
cfd35514 100 preempt_disable();
de5b55c1 101 raw_spin_lock_irqsave(&stopper->lock, flags);
1b034bd9
ON
102 enabled = stopper->enabled;
103 if (enabled)
0b26351b 104 __cpu_stop_queue_work(stopper, work, &wakeq);
dd2e3121 105 else if (work->done)
6fa3b826 106 cpu_stop_signal_done(work->done);
de5b55c1 107 raw_spin_unlock_irqrestore(&stopper->lock, flags);
1b034bd9 108
0b26351b 109 wake_up_q(&wakeq);
cfd35514 110 preempt_enable();
0b26351b 111
1b034bd9 112 return enabled;
1142d810
TH
113}
114
115/**
116 * stop_one_cpu - stop a cpu
117 * @cpu: cpu to stop
118 * @fn: function to execute
119 * @arg: argument to @fn
120 *
121 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
122 * the highest priority preempting any task on the cpu and
123 * monopolizing it. This function returns after the execution is
124 * complete.
125 *
126 * This function doesn't guarantee @cpu stays online till @fn
127 * completes. If @cpu goes down in the middle, execution may happen
128 * partially or fully on different cpus. @fn should either be ready
129 * for that or the caller should ensure that @cpu stays online until
130 * this function completes.
131 *
132 * CONTEXT:
133 * Might sleep.
134 *
135 * RETURNS:
136 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
137 * otherwise, the return value of @fn.
138 */
139int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
140{
141 struct cpu_stop_done done;
a8b62fd0 142 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ };
1142d810
TH
143
144 cpu_stop_init_done(&done, 1);
958c5f84
ON
145 if (!cpu_stop_queue_work(cpu, &work))
146 return -ENOENT;
bf89a304
CC
147 /*
148 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
149 * cycle by doing a preemption:
150 */
151 cond_resched();
1142d810 152 wait_for_completion(&done.completion);
958c5f84 153 return done.ret;
1142d810
TH
154}
155
1be0bd77
PZ
156/* This controls the threads on each CPU. */
157enum multi_stop_state {
158 /* Dummy starting state for thread. */
159 MULTI_STOP_NONE,
160 /* Awaiting everyone to be scheduled. */
161 MULTI_STOP_PREPARE,
162 /* Disable interrupts. */
163 MULTI_STOP_DISABLE_IRQ,
164 /* Run the function */
165 MULTI_STOP_RUN,
166 /* Exit */
167 MULTI_STOP_EXIT,
168};
169
170struct multi_stop_data {
9a301f22 171 cpu_stop_fn_t fn;
1be0bd77
PZ
172 void *data;
173 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
174 unsigned int num_threads;
175 const struct cpumask *active_cpus;
176
177 enum multi_stop_state state;
178 atomic_t thread_ack;
179};
180
181static void set_state(struct multi_stop_data *msdata,
182 enum multi_stop_state newstate)
183{
184 /* Reset ack counter. */
185 atomic_set(&msdata->thread_ack, msdata->num_threads);
186 smp_wmb();
b1fc5833 187 WRITE_ONCE(msdata->state, newstate);
1be0bd77
PZ
188}
189
190/* Last one to ack a state moves to the next state. */
191static void ack_state(struct multi_stop_data *msdata)
192{
193 if (atomic_dec_and_test(&msdata->thread_ack))
194 set_state(msdata, msdata->state + 1);
195}
196
4230e2de 197notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
4ecf0a43
HC
198{
199 cpu_relax();
200}
201
1be0bd77
PZ
202/* This is the cpu_stop function which stops the CPU. */
203static int multi_cpu_stop(void *data)
204{
205 struct multi_stop_data *msdata = data;
b1fc5833 206 enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
1be0bd77 207 int cpu = smp_processor_id(), err = 0;
38f2c691 208 const struct cpumask *cpumask;
1be0bd77
PZ
209 unsigned long flags;
210 bool is_active;
211
212 /*
213 * When called from stop_machine_from_inactive_cpu(), irq might
214 * already be disabled. Save the state and restore it on exit.
215 */
216 local_save_flags(flags);
217
38f2c691
MS
218 if (!msdata->active_cpus) {
219 cpumask = cpu_online_mask;
220 is_active = cpu == cpumask_first(cpumask);
221 } else {
222 cpumask = msdata->active_cpus;
223 is_active = cpumask_test_cpu(cpu, cpumask);
224 }
1be0bd77
PZ
225
226 /* Simple state machine */
227 do {
228 /* Chill out and ensure we re-read multi_stop_state. */
4ecf0a43 229 stop_machine_yield(cpumask);
b1fc5833
MR
230 newstate = READ_ONCE(msdata->state);
231 if (newstate != curstate) {
232 curstate = newstate;
1be0bd77
PZ
233 switch (curstate) {
234 case MULTI_STOP_DISABLE_IRQ:
235 local_irq_disable();
236 hard_irq_disable();
237 break;
238 case MULTI_STOP_RUN:
239 if (is_active)
240 err = msdata->fn(msdata->data);
241 break;
242 default:
243 break;
244 }
245 ack_state(msdata);
ce4f06dc
ON
246 } else if (curstate > MULTI_STOP_PREPARE) {
247 /*
248 * At this stage all other CPUs we depend on must spin
249 * in the same loop. Any reason for hard-lockup should
250 * be detected and reported on their side.
251 */
252 touch_nmi_watchdog();
1be0bd77 253 }
366237e7 254 rcu_momentary_dyntick_idle();
1be0bd77
PZ
255 } while (curstate != MULTI_STOP_EXIT);
256
257 local_irq_restore(flags);
258 return err;
259}
260
5caa1c08
ON
261static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
262 int cpu2, struct cpu_stop_work *work2)
263{
d8bc8535
ON
264 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
265 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
0b26351b 266 DEFINE_WAKE_Q(wakeq);
d8bc8535 267 int err;
b80a2bfc 268
e6253970 269retry:
b80a2bfc
PZ
270 /*
271 * The waking up of stopper threads has to happen in the same
272 * scheduling context as the queueing. Otherwise, there is a
273 * possibility of one of the above stoppers being woken up by another
274 * CPU, and preempting us. This will cause us to not wake up the other
275 * stopper forever.
276 */
277 preempt_disable();
de5b55c1
TG
278 raw_spin_lock_irq(&stopper1->lock);
279 raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
d8bc8535 280
b80a2bfc
PZ
281 if (!stopper1->enabled || !stopper2->enabled) {
282 err = -ENOENT;
d8bc8535 283 goto unlock;
b80a2bfc
PZ
284 }
285
e6253970
ON
286 /*
287 * Ensure that if we race with __stop_cpus() the stoppers won't get
288 * queued up in reverse order leading to system deadlock.
289 *
290 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
291 * queued a work on cpu1 but not on cpu2, we hold both locks.
292 *
293 * It can be falsely true but it is safe to spin until it is cleared,
294 * queue_stop_cpus_work() does everything under preempt_disable().
295 */
b80a2bfc
PZ
296 if (unlikely(stop_cpus_in_progress)) {
297 err = -EDEADLK;
298 goto unlock;
299 }
d8bc8535
ON
300
301 err = 0;
0b26351b
PZ
302 __cpu_stop_queue_work(stopper1, work1, &wakeq);
303 __cpu_stop_queue_work(stopper2, work2, &wakeq);
b80a2bfc 304
d8bc8535 305unlock:
de5b55c1
TG
306 raw_spin_unlock(&stopper2->lock);
307 raw_spin_unlock_irq(&stopper1->lock);
5caa1c08 308
e6253970 309 if (unlikely(err == -EDEADLK)) {
b80a2bfc
PZ
310 preempt_enable();
311
e6253970
ON
312 while (stop_cpus_in_progress)
313 cpu_relax();
b80a2bfc 314
e6253970
ON
315 goto retry;
316 }
0b26351b 317
b80a2bfc
PZ
318 wake_up_q(&wakeq);
319 preempt_enable();
0b26351b 320
d8bc8535 321 return err;
5caa1c08 322}
1be0bd77
PZ
323/**
324 * stop_two_cpus - stops two cpus
325 * @cpu1: the cpu to stop
326 * @cpu2: the other cpu to stop
327 * @fn: function to execute
328 * @arg: argument to @fn
329 *
330 * Stops both the current and specified CPU and runs @fn on one of them.
331 *
332 * returns when both are completed.
333 */
334int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
335{
1be0bd77
PZ
336 struct cpu_stop_done done;
337 struct cpu_stop_work work1, work2;
6acce3ef
PZ
338 struct multi_stop_data msdata;
339
6acce3ef 340 msdata = (struct multi_stop_data){
1be0bd77
PZ
341 .fn = fn,
342 .data = arg,
343 .num_threads = 2,
344 .active_cpus = cpumask_of(cpu1),
345 };
346
347 work1 = work2 = (struct cpu_stop_work){
348 .fn = multi_cpu_stop,
349 .arg = &msdata,
a8b62fd0
PZ
350 .done = &done,
351 .caller = _RET_IP_,
1be0bd77
PZ
352 };
353
1be0bd77
PZ
354 cpu_stop_init_done(&done, 2);
355 set_state(&msdata, MULTI_STOP_PREPARE);
356
5caa1c08
ON
357 if (cpu1 > cpu2)
358 swap(cpu1, cpu2);
6a190051 359 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
5caa1c08 360 return -ENOENT;
1be0bd77
PZ
361
362 wait_for_completion(&done.completion);
6a190051 363 return done.ret;
1be0bd77
PZ
364}
365
1142d810
TH
366/**
367 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
368 * @cpu: cpu to stop
369 * @fn: function to execute
370 * @arg: argument to @fn
cf250040 371 * @work_buf: pointer to cpu_stop_work structure
1142d810
TH
372 *
373 * Similar to stop_one_cpu() but doesn't wait for completion. The
374 * caller is responsible for ensuring @work_buf is currently unused
375 * and will remain untouched until stopper starts executing @fn.
376 *
377 * CONTEXT:
378 * Don't care.
1b034bd9
ON
379 *
380 * RETURNS:
381 * true if cpu_stop_work was queued successfully and @fn will be called,
382 * false otherwise.
1142d810 383 */
1b034bd9 384bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
1142d810
TH
385 struct cpu_stop_work *work_buf)
386{
a8b62fd0 387 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, };
1b034bd9 388 return cpu_stop_queue_work(cpu, work_buf);
1142d810
TH
389}
390
4aff1ca6 391static bool queue_stop_cpus_work(const struct cpumask *cpumask,
fd7355ba
TH
392 cpu_stop_fn_t fn, void *arg,
393 struct cpu_stop_done *done)
1142d810
TH
394{
395 struct cpu_stop_work *work;
1142d810 396 unsigned int cpu;
4aff1ca6 397 bool queued = false;
1142d810 398
1142d810
TH
399 /*
400 * Disable preemption while queueing to avoid getting
401 * preempted by a stopper which might wait for other stoppers
402 * to enter @fn which can lead to deadlock.
403 */
e6253970
ON
404 preempt_disable();
405 stop_cpus_in_progress = true;
99d84bf8 406 barrier();
b377c2a0
ON
407 for_each_cpu(cpu, cpumask) {
408 work = &per_cpu(cpu_stopper.stop_work, cpu);
409 work->fn = fn;
410 work->arg = arg;
411 work->done = done;
2a2f80ff 412 work->caller = _RET_IP_;
4aff1ca6
ON
413 if (cpu_stop_queue_work(cpu, work))
414 queued = true;
b377c2a0 415 }
99d84bf8 416 barrier();
e6253970
ON
417 stop_cpus_in_progress = false;
418 preempt_enable();
4aff1ca6
ON
419
420 return queued;
fd7355ba 421}
1142d810 422
fd7355ba
TH
423static int __stop_cpus(const struct cpumask *cpumask,
424 cpu_stop_fn_t fn, void *arg)
425{
426 struct cpu_stop_done done;
427
428 cpu_stop_init_done(&done, cpumask_weight(cpumask));
4aff1ca6
ON
429 if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
430 return -ENOENT;
1142d810 431 wait_for_completion(&done.completion);
4aff1ca6 432 return done.ret;
1142d810
TH
433}
434
435/**
436 * stop_cpus - stop multiple cpus
437 * @cpumask: cpus to stop
438 * @fn: function to execute
439 * @arg: argument to @fn
440 *
441 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
442 * @fn is run in a process context with the highest priority
443 * preempting any task on the cpu and monopolizing it. This function
444 * returns after all executions are complete.
445 *
446 * This function doesn't guarantee the cpus in @cpumask stay online
447 * till @fn completes. If some cpus go down in the middle, execution
448 * on the cpu may happen partially or fully on different cpus. @fn
449 * should either be ready for that or the caller should ensure that
450 * the cpus stay online until this function completes.
451 *
452 * All stop_cpus() calls are serialized making it safe for @fn to wait
453 * for all cpus to start executing it.
454 *
455 * CONTEXT:
456 * Might sleep.
457 *
458 * RETURNS:
459 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
460 * @cpumask were offline; otherwise, 0 if all executions of @fn
461 * returned 0, any non zero return value if any returned non zero.
462 */
35f4cd96 463static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
1142d810
TH
464{
465 int ret;
466
467 /* static works are used, process one request at a time */
468 mutex_lock(&stop_cpus_mutex);
469 ret = __stop_cpus(cpumask, fn, arg);
470 mutex_unlock(&stop_cpus_mutex);
471 return ret;
472}
473
14e568e7
TG
474static int cpu_stop_should_run(unsigned int cpu)
475{
476 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
477 unsigned long flags;
478 int run;
479
de5b55c1 480 raw_spin_lock_irqsave(&stopper->lock, flags);
14e568e7 481 run = !list_empty(&stopper->works);
de5b55c1 482 raw_spin_unlock_irqrestore(&stopper->lock, flags);
14e568e7
TG
483 return run;
484}
485
486static void cpu_stopper_thread(unsigned int cpu)
1142d810 487{
14e568e7 488 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
1142d810 489 struct cpu_stop_work *work;
1142d810
TH
490
491repeat:
1142d810 492 work = NULL;
de5b55c1 493 raw_spin_lock_irq(&stopper->lock);
1142d810
TH
494 if (!list_empty(&stopper->works)) {
495 work = list_first_entry(&stopper->works,
496 struct cpu_stop_work, list);
497 list_del_init(&work->list);
498 }
de5b55c1 499 raw_spin_unlock_irq(&stopper->lock);
1142d810
TH
500
501 if (work) {
502 cpu_stop_fn_t fn = work->fn;
503 void *arg = work->arg;
504 struct cpu_stop_done *done = work->done;
accaf6ea 505 int ret;
1142d810 506
accaf6ea 507 /* cpu stop callbacks must not sleep, make in_atomic() == T */
a8b62fd0
PZ
508 stopper->caller = work->caller;
509 stopper->fn = fn;
accaf6ea 510 preempt_count_inc();
1142d810 511 ret = fn(arg);
dd2e3121
ON
512 if (done) {
513 if (ret)
514 done->ret = ret;
515 cpu_stop_signal_done(done);
516 }
accaf6ea 517 preempt_count_dec();
a8b62fd0
PZ
518 stopper->fn = NULL;
519 stopper->caller = 0;
1142d810 520 WARN_ONCE(preempt_count(),
d75f773c 521 "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
14e568e7
TG
522 goto repeat;
523 }
1142d810
TH
524}
525
233e7f26
ON
526void stop_machine_park(int cpu)
527{
528 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
529 /*
530 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
531 * the pending works before it parks, until then it is fine to queue
532 * the new works.
533 */
534 stopper->enabled = false;
535 kthread_park(stopper->thread);
536}
537
14e568e7
TG
538static void cpu_stop_create(unsigned int cpu)
539{
02cb7aa9 540 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
14e568e7
TG
541}
542
543static void cpu_stop_park(unsigned int cpu)
1142d810 544{
1142d810 545 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
1142d810 546
233e7f26 547 WARN_ON(!list_empty(&stopper->works));
14e568e7 548}
1142d810 549
c00166d8
ON
550void stop_machine_unpark(int cpu)
551{
552 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
553
f0cf16cb 554 stopper->enabled = true;
c00166d8
ON
555 kthread_unpark(stopper->thread);
556}
557
14e568e7 558static struct smp_hotplug_thread cpu_stop_threads = {
02cb7aa9 559 .store = &cpu_stopper.thread,
14e568e7
TG
560 .thread_should_run = cpu_stop_should_run,
561 .thread_fn = cpu_stopper_thread,
562 .thread_comm = "migration/%u",
563 .create = cpu_stop_create,
14e568e7 564 .park = cpu_stop_park,
14e568e7 565 .selfparking = true,
1142d810
TH
566};
567
568static int __init cpu_stop_init(void)
569{
1142d810 570 unsigned int cpu;
1142d810
TH
571
572 for_each_possible_cpu(cpu) {
573 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
574
de5b55c1 575 raw_spin_lock_init(&stopper->lock);
1142d810
TH
576 INIT_LIST_HEAD(&stopper->works);
577 }
578
14e568e7 579 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
c00166d8 580 stop_machine_unpark(raw_smp_processor_id());
f445027e 581 stop_machine_initialized = true;
1142d810
TH
582 return 0;
583}
584early_initcall(cpu_stop_init);
1da177e4 585
fe5595c0
SAS
586int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
587 const struct cpumask *cpus)
1da177e4 588{
1be0bd77
PZ
589 struct multi_stop_data msdata = {
590 .fn = fn,
591 .data = data,
592 .num_threads = num_online_cpus(),
593 .active_cpus = cpus,
594 };
3fc1f1e2 595
fe5595c0
SAS
596 lockdep_assert_cpus_held();
597
f445027e
JF
598 if (!stop_machine_initialized) {
599 /*
600 * Handle the case where stop_machine() is called
601 * early in boot before stop_machine() has been
602 * initialized.
603 */
604 unsigned long flags;
605 int ret;
606
1be0bd77 607 WARN_ON_ONCE(msdata.num_threads != 1);
f445027e
JF
608
609 local_irq_save(flags);
610 hard_irq_disable();
611 ret = (*fn)(data);
612 local_irq_restore(flags);
613
614 return ret;
615 }
616
3fc1f1e2 617 /* Set the initial state and stop all online cpus. */
1be0bd77
PZ
618 set_state(&msdata, MULTI_STOP_PREPARE);
619 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
1da177e4
LT
620}
621
9a301f22 622int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
1da177e4 623{
1da177e4
LT
624 int ret;
625
626 /* No CPUs can come up or down during this. */
fe5595c0
SAS
627 cpus_read_lock();
628 ret = stop_machine_cpuslocked(fn, data, cpus);
629 cpus_read_unlock();
1da177e4
LT
630 return ret;
631}
eeec4fad 632EXPORT_SYMBOL_GPL(stop_machine);
bbf1bb3e 633
2760f5a4
PZ
634#ifdef CONFIG_SCHED_SMT
635int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data)
636{
637 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
638
639 struct multi_stop_data msdata = {
640 .fn = fn,
641 .data = data,
642 .num_threads = cpumask_weight(smt_mask),
643 .active_cpus = smt_mask,
644 };
645
646 lockdep_assert_cpus_held();
647
648 /* Set the initial state and stop all online cpus. */
649 set_state(&msdata, MULTI_STOP_PREPARE);
650 return stop_cpus(smt_mask, multi_cpu_stop, &msdata);
651}
652EXPORT_SYMBOL_GPL(stop_core_cpuslocked);
653#endif
654
f740e6cd
TH
655/**
656 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
657 * @fn: the function to run
658 * @data: the data ptr for the @fn()
659 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
660 *
661 * This is identical to stop_machine() but can be called from a CPU which
662 * is not active. The local CPU is in the process of hotplug (so no other
663 * CPU hotplug can start) and not marked active and doesn't have enough
664 * context to sleep.
665 *
666 * This function provides stop_machine() functionality for such state by
667 * using busy-wait for synchronization and executing @fn directly for local
668 * CPU.
669 *
670 * CONTEXT:
671 * Local CPU is inactive. Temporarily stops all active CPUs.
672 *
673 * RETURNS:
674 * 0 if all executions of @fn returned 0, any non zero return value if any
675 * returned non zero.
676 */
9a301f22 677int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
f740e6cd
TH
678 const struct cpumask *cpus)
679{
1be0bd77 680 struct multi_stop_data msdata = { .fn = fn, .data = data,
f740e6cd
TH
681 .active_cpus = cpus };
682 struct cpu_stop_done done;
683 int ret;
684
685 /* Local CPU must be inactive and CPU hotplug in progress. */
686 BUG_ON(cpu_active(raw_smp_processor_id()));
1be0bd77 687 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
f740e6cd
TH
688
689 /* No proper task established and can't sleep - busy wait for lock. */
690 while (!mutex_trylock(&stop_cpus_mutex))
691 cpu_relax();
692
693 /* Schedule work on other CPUs and execute directly for local CPU */
1be0bd77 694 set_state(&msdata, MULTI_STOP_PREPARE);
f740e6cd 695 cpu_stop_init_done(&done, num_active_cpus());
1be0bd77 696 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
f740e6cd 697 &done);
1be0bd77 698 ret = multi_cpu_stop(&msdata);
f740e6cd
TH
699
700 /* Busy wait for completion. */
701 while (!completion_done(&done.completion))
702 cpu_relax();
703
704 mutex_unlock(&stop_cpus_mutex);
705 return ret ?: done.ret;
706}