Commit | Line | Data |
---|---|---|
1142d810 TH |
1 | /* |
2 | * kernel/stop_machine.c | |
3 | * | |
4 | * Copyright (C) 2008, 2005 IBM Corporation. | |
5 | * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au | |
6 | * Copyright (C) 2010 SUSE Linux Products GmbH | |
7 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> | |
8 | * | |
9 | * This file is released under the GPLv2 and any later version. | |
e5582ca2 | 10 | */ |
1142d810 | 11 | #include <linux/completion.h> |
1da177e4 | 12 | #include <linux/cpu.h> |
1142d810 | 13 | #include <linux/init.h> |
ee527cd3 | 14 | #include <linux/kthread.h> |
9984de1a | 15 | #include <linux/export.h> |
1142d810 | 16 | #include <linux/percpu.h> |
ee527cd3 PB |
17 | #include <linux/sched.h> |
18 | #include <linux/stop_machine.h> | |
a12bb444 | 19 | #include <linux/interrupt.h> |
1142d810 | 20 | #include <linux/kallsyms.h> |
14e568e7 | 21 | #include <linux/smpboot.h> |
60063497 | 22 | #include <linux/atomic.h> |
ce4f06dc | 23 | #include <linux/nmi.h> |
0b26351b | 24 | #include <linux/sched/wake_q.h> |
1142d810 TH |
25 | |
26 | /* | |
27 | * Structure to determine completion condition and record errors. May | |
28 | * be shared by works on different cpus. | |
29 | */ | |
30 | struct cpu_stop_done { | |
31 | atomic_t nr_todo; /* nr left to execute */ | |
1142d810 TH |
32 | int ret; /* collected return value */ |
33 | struct completion completion; /* fired if nr_todo reaches 0 */ | |
34 | }; | |
35 | ||
36 | /* the actual stopper, one per every possible cpu, enabled on online cpus */ | |
37 | struct cpu_stopper { | |
02cb7aa9 ON |
38 | struct task_struct *thread; |
39 | ||
de5b55c1 | 40 | raw_spinlock_t lock; |
878ae127 | 41 | bool enabled; /* is this stopper enabled? */ |
1142d810 | 42 | struct list_head works; /* list of pending works */ |
02cb7aa9 ON |
43 | |
44 | struct cpu_stop_work stop_work; /* for stop_cpus */ | |
1142d810 TH |
45 | }; |
46 | ||
47 | static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); | |
f445027e | 48 | static bool stop_machine_initialized = false; |
1142d810 | 49 | |
e6253970 ON |
50 | /* static data for stop_cpus */ |
51 | static DEFINE_MUTEX(stop_cpus_mutex); | |
52 | static bool stop_cpus_in_progress; | |
7053ea1a | 53 | |
1142d810 TH |
54 | static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) |
55 | { | |
56 | memset(done, 0, sizeof(*done)); | |
57 | atomic_set(&done->nr_todo, nr_todo); | |
58 | init_completion(&done->completion); | |
59 | } | |
60 | ||
61 | /* signal completion unless @done is NULL */ | |
6fa3b826 | 62 | static void cpu_stop_signal_done(struct cpu_stop_done *done) |
1142d810 | 63 | { |
dd2e3121 ON |
64 | if (atomic_dec_and_test(&done->nr_todo)) |
65 | complete(&done->completion); | |
1142d810 TH |
66 | } |
67 | ||
5caa1c08 | 68 | static void __cpu_stop_queue_work(struct cpu_stopper *stopper, |
0b26351b PZ |
69 | struct cpu_stop_work *work, |
70 | struct wake_q_head *wakeq) | |
5caa1c08 ON |
71 | { |
72 | list_add_tail(&work->list, &stopper->works); | |
0b26351b | 73 | wake_q_add(wakeq, stopper->thread); |
5caa1c08 ON |
74 | } |
75 | ||
1142d810 | 76 | /* queue @work to @stopper. if offline, @work is completed immediately */ |
1b034bd9 | 77 | static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) |
1142d810 | 78 | { |
860a0ffa | 79 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
0b26351b | 80 | DEFINE_WAKE_Q(wakeq); |
1142d810 | 81 | unsigned long flags; |
1b034bd9 | 82 | bool enabled; |
1142d810 | 83 | |
de5b55c1 | 84 | raw_spin_lock_irqsave(&stopper->lock, flags); |
1b034bd9 ON |
85 | enabled = stopper->enabled; |
86 | if (enabled) | |
0b26351b | 87 | __cpu_stop_queue_work(stopper, work, &wakeq); |
dd2e3121 | 88 | else if (work->done) |
6fa3b826 | 89 | cpu_stop_signal_done(work->done); |
de5b55c1 | 90 | raw_spin_unlock_irqrestore(&stopper->lock, flags); |
1b034bd9 | 91 | |
0b26351b PZ |
92 | wake_up_q(&wakeq); |
93 | ||
1b034bd9 | 94 | return enabled; |
1142d810 TH |
95 | } |
96 | ||
97 | /** | |
98 | * stop_one_cpu - stop a cpu | |
99 | * @cpu: cpu to stop | |
100 | * @fn: function to execute | |
101 | * @arg: argument to @fn | |
102 | * | |
103 | * Execute @fn(@arg) on @cpu. @fn is run in a process context with | |
104 | * the highest priority preempting any task on the cpu and | |
105 | * monopolizing it. This function returns after the execution is | |
106 | * complete. | |
107 | * | |
108 | * This function doesn't guarantee @cpu stays online till @fn | |
109 | * completes. If @cpu goes down in the middle, execution may happen | |
110 | * partially or fully on different cpus. @fn should either be ready | |
111 | * for that or the caller should ensure that @cpu stays online until | |
112 | * this function completes. | |
113 | * | |
114 | * CONTEXT: | |
115 | * Might sleep. | |
116 | * | |
117 | * RETURNS: | |
118 | * -ENOENT if @fn(@arg) was not executed because @cpu was offline; | |
119 | * otherwise, the return value of @fn. | |
120 | */ | |
121 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) | |
122 | { | |
123 | struct cpu_stop_done done; | |
124 | struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; | |
125 | ||
126 | cpu_stop_init_done(&done, 1); | |
958c5f84 ON |
127 | if (!cpu_stop_queue_work(cpu, &work)) |
128 | return -ENOENT; | |
bf89a304 CC |
129 | /* |
130 | * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup | |
131 | * cycle by doing a preemption: | |
132 | */ | |
133 | cond_resched(); | |
1142d810 | 134 | wait_for_completion(&done.completion); |
958c5f84 | 135 | return done.ret; |
1142d810 TH |
136 | } |
137 | ||
1be0bd77 PZ |
138 | /* This controls the threads on each CPU. */ |
139 | enum multi_stop_state { | |
140 | /* Dummy starting state for thread. */ | |
141 | MULTI_STOP_NONE, | |
142 | /* Awaiting everyone to be scheduled. */ | |
143 | MULTI_STOP_PREPARE, | |
144 | /* Disable interrupts. */ | |
145 | MULTI_STOP_DISABLE_IRQ, | |
146 | /* Run the function */ | |
147 | MULTI_STOP_RUN, | |
148 | /* Exit */ | |
149 | MULTI_STOP_EXIT, | |
150 | }; | |
151 | ||
152 | struct multi_stop_data { | |
9a301f22 | 153 | cpu_stop_fn_t fn; |
1be0bd77 PZ |
154 | void *data; |
155 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ | |
156 | unsigned int num_threads; | |
157 | const struct cpumask *active_cpus; | |
158 | ||
159 | enum multi_stop_state state; | |
160 | atomic_t thread_ack; | |
161 | }; | |
162 | ||
163 | static void set_state(struct multi_stop_data *msdata, | |
164 | enum multi_stop_state newstate) | |
165 | { | |
166 | /* Reset ack counter. */ | |
167 | atomic_set(&msdata->thread_ack, msdata->num_threads); | |
168 | smp_wmb(); | |
169 | msdata->state = newstate; | |
170 | } | |
171 | ||
172 | /* Last one to ack a state moves to the next state. */ | |
173 | static void ack_state(struct multi_stop_data *msdata) | |
174 | { | |
175 | if (atomic_dec_and_test(&msdata->thread_ack)) | |
176 | set_state(msdata, msdata->state + 1); | |
177 | } | |
178 | ||
179 | /* This is the cpu_stop function which stops the CPU. */ | |
180 | static int multi_cpu_stop(void *data) | |
181 | { | |
182 | struct multi_stop_data *msdata = data; | |
183 | enum multi_stop_state curstate = MULTI_STOP_NONE; | |
184 | int cpu = smp_processor_id(), err = 0; | |
185 | unsigned long flags; | |
186 | bool is_active; | |
187 | ||
188 | /* | |
189 | * When called from stop_machine_from_inactive_cpu(), irq might | |
190 | * already be disabled. Save the state and restore it on exit. | |
191 | */ | |
192 | local_save_flags(flags); | |
193 | ||
194 | if (!msdata->active_cpus) | |
195 | is_active = cpu == cpumask_first(cpu_online_mask); | |
196 | else | |
197 | is_active = cpumask_test_cpu(cpu, msdata->active_cpus); | |
198 | ||
199 | /* Simple state machine */ | |
200 | do { | |
201 | /* Chill out and ensure we re-read multi_stop_state. */ | |
bf0d31c0 | 202 | cpu_relax_yield(); |
1be0bd77 PZ |
203 | if (msdata->state != curstate) { |
204 | curstate = msdata->state; | |
205 | switch (curstate) { | |
206 | case MULTI_STOP_DISABLE_IRQ: | |
207 | local_irq_disable(); | |
208 | hard_irq_disable(); | |
209 | break; | |
210 | case MULTI_STOP_RUN: | |
211 | if (is_active) | |
212 | err = msdata->fn(msdata->data); | |
213 | break; | |
214 | default: | |
215 | break; | |
216 | } | |
217 | ack_state(msdata); | |
ce4f06dc ON |
218 | } else if (curstate > MULTI_STOP_PREPARE) { |
219 | /* | |
220 | * At this stage all other CPUs we depend on must spin | |
221 | * in the same loop. Any reason for hard-lockup should | |
222 | * be detected and reported on their side. | |
223 | */ | |
224 | touch_nmi_watchdog(); | |
1be0bd77 PZ |
225 | } |
226 | } while (curstate != MULTI_STOP_EXIT); | |
227 | ||
228 | local_irq_restore(flags); | |
229 | return err; | |
230 | } | |
231 | ||
5caa1c08 ON |
232 | static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, |
233 | int cpu2, struct cpu_stop_work *work2) | |
234 | { | |
d8bc8535 ON |
235 | struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); |
236 | struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); | |
0b26351b | 237 | DEFINE_WAKE_Q(wakeq); |
d8bc8535 | 238 | int err; |
b80a2bfc | 239 | |
e6253970 | 240 | retry: |
b80a2bfc PZ |
241 | /* |
242 | * The waking up of stopper threads has to happen in the same | |
243 | * scheduling context as the queueing. Otherwise, there is a | |
244 | * possibility of one of the above stoppers being woken up by another | |
245 | * CPU, and preempting us. This will cause us to not wake up the other | |
246 | * stopper forever. | |
247 | */ | |
248 | preempt_disable(); | |
de5b55c1 TG |
249 | raw_spin_lock_irq(&stopper1->lock); |
250 | raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); | |
d8bc8535 | 251 | |
b80a2bfc PZ |
252 | if (!stopper1->enabled || !stopper2->enabled) { |
253 | err = -ENOENT; | |
d8bc8535 | 254 | goto unlock; |
b80a2bfc PZ |
255 | } |
256 | ||
e6253970 ON |
257 | /* |
258 | * Ensure that if we race with __stop_cpus() the stoppers won't get | |
259 | * queued up in reverse order leading to system deadlock. | |
260 | * | |
261 | * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has | |
262 | * queued a work on cpu1 but not on cpu2, we hold both locks. | |
263 | * | |
264 | * It can be falsely true but it is safe to spin until it is cleared, | |
265 | * queue_stop_cpus_work() does everything under preempt_disable(). | |
266 | */ | |
b80a2bfc PZ |
267 | if (unlikely(stop_cpus_in_progress)) { |
268 | err = -EDEADLK; | |
269 | goto unlock; | |
270 | } | |
d8bc8535 ON |
271 | |
272 | err = 0; | |
0b26351b PZ |
273 | __cpu_stop_queue_work(stopper1, work1, &wakeq); |
274 | __cpu_stop_queue_work(stopper2, work2, &wakeq); | |
b80a2bfc | 275 | |
d8bc8535 | 276 | unlock: |
de5b55c1 TG |
277 | raw_spin_unlock(&stopper2->lock); |
278 | raw_spin_unlock_irq(&stopper1->lock); | |
5caa1c08 | 279 | |
e6253970 | 280 | if (unlikely(err == -EDEADLK)) { |
b80a2bfc PZ |
281 | preempt_enable(); |
282 | ||
e6253970 ON |
283 | while (stop_cpus_in_progress) |
284 | cpu_relax(); | |
b80a2bfc | 285 | |
e6253970 ON |
286 | goto retry; |
287 | } | |
0b26351b | 288 | |
b80a2bfc PZ |
289 | wake_up_q(&wakeq); |
290 | preempt_enable(); | |
0b26351b | 291 | |
d8bc8535 | 292 | return err; |
5caa1c08 | 293 | } |
1be0bd77 PZ |
294 | /** |
295 | * stop_two_cpus - stops two cpus | |
296 | * @cpu1: the cpu to stop | |
297 | * @cpu2: the other cpu to stop | |
298 | * @fn: function to execute | |
299 | * @arg: argument to @fn | |
300 | * | |
301 | * Stops both the current and specified CPU and runs @fn on one of them. | |
302 | * | |
303 | * returns when both are completed. | |
304 | */ | |
305 | int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) | |
306 | { | |
1be0bd77 PZ |
307 | struct cpu_stop_done done; |
308 | struct cpu_stop_work work1, work2; | |
6acce3ef PZ |
309 | struct multi_stop_data msdata; |
310 | ||
6acce3ef | 311 | msdata = (struct multi_stop_data){ |
1be0bd77 PZ |
312 | .fn = fn, |
313 | .data = arg, | |
314 | .num_threads = 2, | |
315 | .active_cpus = cpumask_of(cpu1), | |
316 | }; | |
317 | ||
318 | work1 = work2 = (struct cpu_stop_work){ | |
319 | .fn = multi_cpu_stop, | |
320 | .arg = &msdata, | |
321 | .done = &done | |
322 | }; | |
323 | ||
1be0bd77 PZ |
324 | cpu_stop_init_done(&done, 2); |
325 | set_state(&msdata, MULTI_STOP_PREPARE); | |
326 | ||
5caa1c08 ON |
327 | if (cpu1 > cpu2) |
328 | swap(cpu1, cpu2); | |
6a190051 | 329 | if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) |
5caa1c08 | 330 | return -ENOENT; |
1be0bd77 PZ |
331 | |
332 | wait_for_completion(&done.completion); | |
6a190051 | 333 | return done.ret; |
1be0bd77 PZ |
334 | } |
335 | ||
1142d810 TH |
336 | /** |
337 | * stop_one_cpu_nowait - stop a cpu but don't wait for completion | |
338 | * @cpu: cpu to stop | |
339 | * @fn: function to execute | |
340 | * @arg: argument to @fn | |
cf250040 | 341 | * @work_buf: pointer to cpu_stop_work structure |
1142d810 TH |
342 | * |
343 | * Similar to stop_one_cpu() but doesn't wait for completion. The | |
344 | * caller is responsible for ensuring @work_buf is currently unused | |
345 | * and will remain untouched until stopper starts executing @fn. | |
346 | * | |
347 | * CONTEXT: | |
348 | * Don't care. | |
1b034bd9 ON |
349 | * |
350 | * RETURNS: | |
351 | * true if cpu_stop_work was queued successfully and @fn will be called, | |
352 | * false otherwise. | |
1142d810 | 353 | */ |
1b034bd9 | 354 | bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, |
1142d810 TH |
355 | struct cpu_stop_work *work_buf) |
356 | { | |
357 | *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; | |
1b034bd9 | 358 | return cpu_stop_queue_work(cpu, work_buf); |
1142d810 TH |
359 | } |
360 | ||
4aff1ca6 | 361 | static bool queue_stop_cpus_work(const struct cpumask *cpumask, |
fd7355ba TH |
362 | cpu_stop_fn_t fn, void *arg, |
363 | struct cpu_stop_done *done) | |
1142d810 TH |
364 | { |
365 | struct cpu_stop_work *work; | |
1142d810 | 366 | unsigned int cpu; |
4aff1ca6 | 367 | bool queued = false; |
1142d810 | 368 | |
1142d810 TH |
369 | /* |
370 | * Disable preemption while queueing to avoid getting | |
371 | * preempted by a stopper which might wait for other stoppers | |
372 | * to enter @fn which can lead to deadlock. | |
373 | */ | |
e6253970 ON |
374 | preempt_disable(); |
375 | stop_cpus_in_progress = true; | |
b377c2a0 ON |
376 | for_each_cpu(cpu, cpumask) { |
377 | work = &per_cpu(cpu_stopper.stop_work, cpu); | |
378 | work->fn = fn; | |
379 | work->arg = arg; | |
380 | work->done = done; | |
4aff1ca6 ON |
381 | if (cpu_stop_queue_work(cpu, work)) |
382 | queued = true; | |
b377c2a0 | 383 | } |
e6253970 ON |
384 | stop_cpus_in_progress = false; |
385 | preempt_enable(); | |
4aff1ca6 ON |
386 | |
387 | return queued; | |
fd7355ba | 388 | } |
1142d810 | 389 | |
fd7355ba TH |
390 | static int __stop_cpus(const struct cpumask *cpumask, |
391 | cpu_stop_fn_t fn, void *arg) | |
392 | { | |
393 | struct cpu_stop_done done; | |
394 | ||
395 | cpu_stop_init_done(&done, cpumask_weight(cpumask)); | |
4aff1ca6 ON |
396 | if (!queue_stop_cpus_work(cpumask, fn, arg, &done)) |
397 | return -ENOENT; | |
1142d810 | 398 | wait_for_completion(&done.completion); |
4aff1ca6 | 399 | return done.ret; |
1142d810 TH |
400 | } |
401 | ||
402 | /** | |
403 | * stop_cpus - stop multiple cpus | |
404 | * @cpumask: cpus to stop | |
405 | * @fn: function to execute | |
406 | * @arg: argument to @fn | |
407 | * | |
408 | * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, | |
409 | * @fn is run in a process context with the highest priority | |
410 | * preempting any task on the cpu and monopolizing it. This function | |
411 | * returns after all executions are complete. | |
412 | * | |
413 | * This function doesn't guarantee the cpus in @cpumask stay online | |
414 | * till @fn completes. If some cpus go down in the middle, execution | |
415 | * on the cpu may happen partially or fully on different cpus. @fn | |
416 | * should either be ready for that or the caller should ensure that | |
417 | * the cpus stay online until this function completes. | |
418 | * | |
419 | * All stop_cpus() calls are serialized making it safe for @fn to wait | |
420 | * for all cpus to start executing it. | |
421 | * | |
422 | * CONTEXT: | |
423 | * Might sleep. | |
424 | * | |
425 | * RETURNS: | |
426 | * -ENOENT if @fn(@arg) was not executed at all because all cpus in | |
427 | * @cpumask were offline; otherwise, 0 if all executions of @fn | |
428 | * returned 0, any non zero return value if any returned non zero. | |
429 | */ | |
430 | int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) | |
431 | { | |
432 | int ret; | |
433 | ||
434 | /* static works are used, process one request at a time */ | |
435 | mutex_lock(&stop_cpus_mutex); | |
436 | ret = __stop_cpus(cpumask, fn, arg); | |
437 | mutex_unlock(&stop_cpus_mutex); | |
438 | return ret; | |
439 | } | |
440 | ||
441 | /** | |
442 | * try_stop_cpus - try to stop multiple cpus | |
443 | * @cpumask: cpus to stop | |
444 | * @fn: function to execute | |
445 | * @arg: argument to @fn | |
446 | * | |
447 | * Identical to stop_cpus() except that it fails with -EAGAIN if | |
448 | * someone else is already using the facility. | |
449 | * | |
450 | * CONTEXT: | |
451 | * Might sleep. | |
452 | * | |
453 | * RETURNS: | |
454 | * -EAGAIN if someone else is already stopping cpus, -ENOENT if | |
455 | * @fn(@arg) was not executed at all because all cpus in @cpumask were | |
456 | * offline; otherwise, 0 if all executions of @fn returned 0, any non | |
457 | * zero return value if any returned non zero. | |
458 | */ | |
459 | int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) | |
460 | { | |
461 | int ret; | |
462 | ||
463 | /* static works are used, process one request at a time */ | |
464 | if (!mutex_trylock(&stop_cpus_mutex)) | |
465 | return -EAGAIN; | |
466 | ret = __stop_cpus(cpumask, fn, arg); | |
467 | mutex_unlock(&stop_cpus_mutex); | |
468 | return ret; | |
469 | } | |
470 | ||
14e568e7 TG |
471 | static int cpu_stop_should_run(unsigned int cpu) |
472 | { | |
473 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | |
474 | unsigned long flags; | |
475 | int run; | |
476 | ||
de5b55c1 | 477 | raw_spin_lock_irqsave(&stopper->lock, flags); |
14e568e7 | 478 | run = !list_empty(&stopper->works); |
de5b55c1 | 479 | raw_spin_unlock_irqrestore(&stopper->lock, flags); |
14e568e7 TG |
480 | return run; |
481 | } | |
482 | ||
483 | static void cpu_stopper_thread(unsigned int cpu) | |
1142d810 | 484 | { |
14e568e7 | 485 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
1142d810 | 486 | struct cpu_stop_work *work; |
1142d810 TH |
487 | |
488 | repeat: | |
1142d810 | 489 | work = NULL; |
de5b55c1 | 490 | raw_spin_lock_irq(&stopper->lock); |
1142d810 TH |
491 | if (!list_empty(&stopper->works)) { |
492 | work = list_first_entry(&stopper->works, | |
493 | struct cpu_stop_work, list); | |
494 | list_del_init(&work->list); | |
495 | } | |
de5b55c1 | 496 | raw_spin_unlock_irq(&stopper->lock); |
1142d810 TH |
497 | |
498 | if (work) { | |
499 | cpu_stop_fn_t fn = work->fn; | |
500 | void *arg = work->arg; | |
501 | struct cpu_stop_done *done = work->done; | |
accaf6ea | 502 | int ret; |
1142d810 | 503 | |
accaf6ea ON |
504 | /* cpu stop callbacks must not sleep, make in_atomic() == T */ |
505 | preempt_count_inc(); | |
1142d810 | 506 | ret = fn(arg); |
dd2e3121 ON |
507 | if (done) { |
508 | if (ret) | |
509 | done->ret = ret; | |
510 | cpu_stop_signal_done(done); | |
511 | } | |
accaf6ea | 512 | preempt_count_dec(); |
1142d810 | 513 | WARN_ONCE(preempt_count(), |
accaf6ea | 514 | "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg); |
14e568e7 TG |
515 | goto repeat; |
516 | } | |
1142d810 TH |
517 | } |
518 | ||
233e7f26 ON |
519 | void stop_machine_park(int cpu) |
520 | { | |
521 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | |
522 | /* | |
523 | * Lockless. cpu_stopper_thread() will take stopper->lock and flush | |
524 | * the pending works before it parks, until then it is fine to queue | |
525 | * the new works. | |
526 | */ | |
527 | stopper->enabled = false; | |
528 | kthread_park(stopper->thread); | |
529 | } | |
530 | ||
34f971f6 PZ |
531 | extern void sched_set_stop_task(int cpu, struct task_struct *stop); |
532 | ||
14e568e7 TG |
533 | static void cpu_stop_create(unsigned int cpu) |
534 | { | |
02cb7aa9 | 535 | sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); |
14e568e7 TG |
536 | } |
537 | ||
538 | static void cpu_stop_park(unsigned int cpu) | |
1142d810 | 539 | { |
1142d810 | 540 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
1142d810 | 541 | |
233e7f26 | 542 | WARN_ON(!list_empty(&stopper->works)); |
14e568e7 | 543 | } |
1142d810 | 544 | |
c00166d8 ON |
545 | void stop_machine_unpark(int cpu) |
546 | { | |
547 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | |
548 | ||
f0cf16cb | 549 | stopper->enabled = true; |
c00166d8 ON |
550 | kthread_unpark(stopper->thread); |
551 | } | |
552 | ||
14e568e7 | 553 | static struct smp_hotplug_thread cpu_stop_threads = { |
02cb7aa9 | 554 | .store = &cpu_stopper.thread, |
14e568e7 TG |
555 | .thread_should_run = cpu_stop_should_run, |
556 | .thread_fn = cpu_stopper_thread, | |
557 | .thread_comm = "migration/%u", | |
558 | .create = cpu_stop_create, | |
14e568e7 | 559 | .park = cpu_stop_park, |
14e568e7 | 560 | .selfparking = true, |
1142d810 TH |
561 | }; |
562 | ||
563 | static int __init cpu_stop_init(void) | |
564 | { | |
1142d810 | 565 | unsigned int cpu; |
1142d810 TH |
566 | |
567 | for_each_possible_cpu(cpu) { | |
568 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | |
569 | ||
de5b55c1 | 570 | raw_spin_lock_init(&stopper->lock); |
1142d810 TH |
571 | INIT_LIST_HEAD(&stopper->works); |
572 | } | |
573 | ||
14e568e7 | 574 | BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); |
c00166d8 | 575 | stop_machine_unpark(raw_smp_processor_id()); |
f445027e | 576 | stop_machine_initialized = true; |
1142d810 TH |
577 | return 0; |
578 | } | |
579 | early_initcall(cpu_stop_init); | |
1da177e4 | 580 | |
fe5595c0 SAS |
581 | int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, |
582 | const struct cpumask *cpus) | |
1da177e4 | 583 | { |
1be0bd77 PZ |
584 | struct multi_stop_data msdata = { |
585 | .fn = fn, | |
586 | .data = data, | |
587 | .num_threads = num_online_cpus(), | |
588 | .active_cpus = cpus, | |
589 | }; | |
3fc1f1e2 | 590 | |
fe5595c0 SAS |
591 | lockdep_assert_cpus_held(); |
592 | ||
f445027e JF |
593 | if (!stop_machine_initialized) { |
594 | /* | |
595 | * Handle the case where stop_machine() is called | |
596 | * early in boot before stop_machine() has been | |
597 | * initialized. | |
598 | */ | |
599 | unsigned long flags; | |
600 | int ret; | |
601 | ||
1be0bd77 | 602 | WARN_ON_ONCE(msdata.num_threads != 1); |
f445027e JF |
603 | |
604 | local_irq_save(flags); | |
605 | hard_irq_disable(); | |
606 | ret = (*fn)(data); | |
607 | local_irq_restore(flags); | |
608 | ||
609 | return ret; | |
610 | } | |
611 | ||
3fc1f1e2 | 612 | /* Set the initial state and stop all online cpus. */ |
1be0bd77 PZ |
613 | set_state(&msdata, MULTI_STOP_PREPARE); |
614 | return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); | |
1da177e4 LT |
615 | } |
616 | ||
9a301f22 | 617 | int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) |
1da177e4 | 618 | { |
1da177e4 LT |
619 | int ret; |
620 | ||
621 | /* No CPUs can come up or down during this. */ | |
fe5595c0 SAS |
622 | cpus_read_lock(); |
623 | ret = stop_machine_cpuslocked(fn, data, cpus); | |
624 | cpus_read_unlock(); | |
1da177e4 LT |
625 | return ret; |
626 | } | |
eeec4fad | 627 | EXPORT_SYMBOL_GPL(stop_machine); |
bbf1bb3e | 628 | |
f740e6cd TH |
629 | /** |
630 | * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU | |
631 | * @fn: the function to run | |
632 | * @data: the data ptr for the @fn() | |
633 | * @cpus: the cpus to run the @fn() on (NULL = any online cpu) | |
634 | * | |
635 | * This is identical to stop_machine() but can be called from a CPU which | |
636 | * is not active. The local CPU is in the process of hotplug (so no other | |
637 | * CPU hotplug can start) and not marked active and doesn't have enough | |
638 | * context to sleep. | |
639 | * | |
640 | * This function provides stop_machine() functionality for such state by | |
641 | * using busy-wait for synchronization and executing @fn directly for local | |
642 | * CPU. | |
643 | * | |
644 | * CONTEXT: | |
645 | * Local CPU is inactive. Temporarily stops all active CPUs. | |
646 | * | |
647 | * RETURNS: | |
648 | * 0 if all executions of @fn returned 0, any non zero return value if any | |
649 | * returned non zero. | |
650 | */ | |
9a301f22 | 651 | int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, |
f740e6cd TH |
652 | const struct cpumask *cpus) |
653 | { | |
1be0bd77 | 654 | struct multi_stop_data msdata = { .fn = fn, .data = data, |
f740e6cd TH |
655 | .active_cpus = cpus }; |
656 | struct cpu_stop_done done; | |
657 | int ret; | |
658 | ||
659 | /* Local CPU must be inactive and CPU hotplug in progress. */ | |
660 | BUG_ON(cpu_active(raw_smp_processor_id())); | |
1be0bd77 | 661 | msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ |
f740e6cd TH |
662 | |
663 | /* No proper task established and can't sleep - busy wait for lock. */ | |
664 | while (!mutex_trylock(&stop_cpus_mutex)) | |
665 | cpu_relax(); | |
666 | ||
667 | /* Schedule work on other CPUs and execute directly for local CPU */ | |
1be0bd77 | 668 | set_state(&msdata, MULTI_STOP_PREPARE); |
f740e6cd | 669 | cpu_stop_init_done(&done, num_active_cpus()); |
1be0bd77 | 670 | queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, |
f740e6cd | 671 | &done); |
1be0bd77 | 672 | ret = multi_cpu_stop(&msdata); |
f740e6cd TH |
673 | |
674 | /* Busy wait for completion. */ | |
675 | while (!completion_done(&done.completion)) | |
676 | cpu_relax(); | |
677 | ||
678 | mutex_unlock(&stop_cpus_mutex); | |
679 | return ret ?: done.ret; | |
680 | } |