Merge tag 'i2c-for-6.4-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / kernel / smpboot.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
38498a67
TG
2/*
3 * Common SMP CPU bringup/teardown functions
4 */
f97f8f06 5#include <linux/cpu.h>
29d5e047
TG
6#include <linux/err.h>
7#include <linux/smp.h>
8038dad7 8#include <linux/delay.h>
38498a67 9#include <linux/init.h>
f97f8f06
TG
10#include <linux/list.h>
11#include <linux/slab.h>
29d5e047 12#include <linux/sched.h>
29930025 13#include <linux/sched/task.h>
f97f8f06 14#include <linux/export.h>
29d5e047 15#include <linux/percpu.h>
f97f8f06
TG
16#include <linux/kthread.h>
17#include <linux/smpboot.h>
38498a67
TG
18
19#include "smpboot.h"
20
3180d89b
PM
21#ifdef CONFIG_SMP
22
29d5e047 23#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
29d5e047
TG
24/*
25 * For the hotplug case we keep the task structs around and reuse
26 * them.
27 */
28static DEFINE_PER_CPU(struct task_struct *, idle_threads);
29
0db0628d 30struct task_struct *idle_thread_get(unsigned int cpu)
29d5e047
TG
31{
32 struct task_struct *tsk = per_cpu(idle_threads, cpu);
33
34 if (!tsk)
3bb5d2ee 35 return ERR_PTR(-ENOMEM);
29d5e047
TG
36 return tsk;
37}
38
3bb5d2ee 39void __init idle_thread_set_boot_cpu(void)
29d5e047 40{
3bb5d2ee 41 per_cpu(idle_threads, smp_processor_id()) = current;
29d5e047
TG
42}
43
4a70d2d9
SB
44/**
45 * idle_init - Initialize the idle thread for a cpu
46 * @cpu: The cpu for which the idle thread should be initialized
47 *
48 * Creates the thread if it does not exist.
49 */
a1833a54 50static __always_inline void idle_init(unsigned int cpu)
29d5e047 51{
3bb5d2ee
SS
52 struct task_struct *tsk = per_cpu(idle_threads, cpu);
53
54 if (!tsk) {
55 tsk = fork_idle(cpu);
56 if (IS_ERR(tsk))
57 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
58 else
59 per_cpu(idle_threads, cpu) = tsk;
60 }
29d5e047
TG
61}
62
63/**
4a70d2d9 64 * idle_threads_init - Initialize idle threads for all cpus
29d5e047 65 */
3bb5d2ee 66void __init idle_threads_init(void)
29d5e047 67{
ee74d132
SB
68 unsigned int cpu, boot_cpu;
69
70 boot_cpu = smp_processor_id();
29d5e047 71
3bb5d2ee 72 for_each_possible_cpu(cpu) {
ee74d132 73 if (cpu != boot_cpu)
3bb5d2ee 74 idle_init(cpu);
29d5e047 75 }
29d5e047 76}
29d5e047 77#endif
f97f8f06 78
3180d89b
PM
79#endif /* #ifdef CONFIG_SMP */
80
f97f8f06
TG
81static LIST_HEAD(hotplug_threads);
82static DEFINE_MUTEX(smpboot_threads_lock);
83
84struct smpboot_thread_data {
85 unsigned int cpu;
86 unsigned int status;
87 struct smp_hotplug_thread *ht;
88};
89
90enum {
91 HP_THREAD_NONE = 0,
92 HP_THREAD_ACTIVE,
93 HP_THREAD_PARKED,
94};
95
96/**
97 * smpboot_thread_fn - percpu hotplug thread loop function
98 * @data: thread data pointer
99 *
100 * Checks for thread stop and park conditions. Calls the necessary
101 * setup, cleanup, park and unpark functions for the registered
102 * thread.
103 *
104 * Returns 1 when the thread should exit, 0 otherwise.
105 */
106static int smpboot_thread_fn(void *data)
107{
108 struct smpboot_thread_data *td = data;
109 struct smp_hotplug_thread *ht = td->ht;
110
111 while (1) {
112 set_current_state(TASK_INTERRUPTIBLE);
113 preempt_disable();
114 if (kthread_should_stop()) {
7d4d2696 115 __set_current_state(TASK_RUNNING);
f97f8f06 116 preempt_enable();
3dd08c0c
FW
117 /* cleanup must mirror setup */
118 if (ht->cleanup && td->status != HP_THREAD_NONE)
f97f8f06
TG
119 ht->cleanup(td->cpu, cpu_online(td->cpu));
120 kfree(td);
121 return 0;
122 }
123
124 if (kthread_should_park()) {
125 __set_current_state(TASK_RUNNING);
be6a2e4c 126 preempt_enable();
f97f8f06
TG
127 if (ht->park && td->status == HP_THREAD_ACTIVE) {
128 BUG_ON(td->cpu != smp_processor_id());
129 ht->park(td->cpu);
130 td->status = HP_THREAD_PARKED;
131 }
132 kthread_parkme();
133 /* We might have been woken for stop */
134 continue;
135 }
136
dc893e19 137 BUG_ON(td->cpu != smp_processor_id());
f97f8f06
TG
138
139 /* Check for state change setup */
140 switch (td->status) {
141 case HP_THREAD_NONE:
7d4d2696 142 __set_current_state(TASK_RUNNING);
f97f8f06
TG
143 preempt_enable();
144 if (ht->setup)
145 ht->setup(td->cpu);
146 td->status = HP_THREAD_ACTIVE;
7d4d2696
PZ
147 continue;
148
f97f8f06 149 case HP_THREAD_PARKED:
7d4d2696 150 __set_current_state(TASK_RUNNING);
f97f8f06
TG
151 preempt_enable();
152 if (ht->unpark)
153 ht->unpark(td->cpu);
154 td->status = HP_THREAD_ACTIVE;
7d4d2696 155 continue;
f97f8f06
TG
156 }
157
158 if (!ht->thread_should_run(td->cpu)) {
7d4d2696 159 preempt_enable_no_resched();
f97f8f06
TG
160 schedule();
161 } else {
7d4d2696 162 __set_current_state(TASK_RUNNING);
f97f8f06
TG
163 preempt_enable();
164 ht->thread_fn(td->cpu);
165 }
166 }
167}
168
169static int
170__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
171{
172 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
173 struct smpboot_thread_data *td;
174
175 if (tsk)
176 return 0;
177
178 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
179 if (!td)
180 return -ENOMEM;
181 td->cpu = cpu;
182 td->ht = ht;
183
184 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
185 ht->thread_comm);
186 if (IS_ERR(tsk)) {
187 kfree(td);
188 return PTR_ERR(tsk);
189 }
ac687e6e 190 kthread_set_per_cpu(tsk, cpu);
a65d4096
PM
191 /*
192 * Park the thread so that it could start right on the CPU
193 * when it is available.
194 */
195 kthread_park(tsk);
f97f8f06
TG
196 get_task_struct(tsk);
197 *per_cpu_ptr(ht->store, cpu) = tsk;
f2530dc7
TG
198 if (ht->create) {
199 /*
200 * Make sure that the task has actually scheduled out
201 * into park position, before calling the create
202 * callback. At least the migration thread callback
203 * requires that the task is off the runqueue.
204 */
205 if (!wait_task_inactive(tsk, TASK_PARKED))
206 WARN_ON(1);
207 else
208 ht->create(cpu);
209 }
f97f8f06
TG
210 return 0;
211}
212
213int smpboot_create_threads(unsigned int cpu)
214{
215 struct smp_hotplug_thread *cur;
216 int ret = 0;
217
218 mutex_lock(&smpboot_threads_lock);
219 list_for_each_entry(cur, &hotplug_threads, list) {
220 ret = __smpboot_create_thread(cur, cpu);
221 if (ret)
222 break;
223 }
224 mutex_unlock(&smpboot_threads_lock);
225 return ret;
226}
227
228static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
229{
230 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
231
c00166d8
ON
232 if (!ht->selfparking)
233 kthread_unpark(tsk);
f97f8f06
TG
234}
235
931ef163 236int smpboot_unpark_threads(unsigned int cpu)
f97f8f06
TG
237{
238 struct smp_hotplug_thread *cur;
239
240 mutex_lock(&smpboot_threads_lock);
241 list_for_each_entry(cur, &hotplug_threads, list)
167a8867 242 smpboot_unpark_thread(cur, cpu);
f97f8f06 243 mutex_unlock(&smpboot_threads_lock);
931ef163 244 return 0;
f97f8f06
TG
245}
246
247static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
248{
249 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
250
7d7e499f 251 if (tsk && !ht->selfparking)
f97f8f06
TG
252 kthread_park(tsk);
253}
254
931ef163 255int smpboot_park_threads(unsigned int cpu)
f97f8f06
TG
256{
257 struct smp_hotplug_thread *cur;
258
259 mutex_lock(&smpboot_threads_lock);
260 list_for_each_entry_reverse(cur, &hotplug_threads, list)
261 smpboot_park_thread(cur, cpu);
262 mutex_unlock(&smpboot_threads_lock);
931ef163 263 return 0;
f97f8f06
TG
264}
265
266static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
267{
268 unsigned int cpu;
269
270 /* We need to destroy also the parked threads of offline cpus */
271 for_each_possible_cpu(cpu) {
272 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
273
274 if (tsk) {
275 kthread_stop(tsk);
276 put_task_struct(tsk);
277 *per_cpu_ptr(ht->store, cpu) = NULL;
278 }
279 }
280}
281
282/**
167a8867 283 * smpboot_register_percpu_thread - Register a per_cpu thread related
230ec939 284 * to hotplug
f97f8f06
TG
285 * @plug_thread: Hotplug thread descriptor
286 *
287 * Creates and starts the threads on all online cpus.
288 */
167a8867 289int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
f97f8f06
TG
290{
291 unsigned int cpu;
292 int ret = 0;
293
844d8787 294 cpus_read_lock();
f97f8f06
TG
295 mutex_lock(&smpboot_threads_lock);
296 for_each_online_cpu(cpu) {
297 ret = __smpboot_create_thread(plug_thread, cpu);
298 if (ret) {
299 smpboot_destroy_threads(plug_thread);
300 goto out;
301 }
167a8867 302 smpboot_unpark_thread(plug_thread, cpu);
f97f8f06
TG
303 }
304 list_add(&plug_thread->list, &hotplug_threads);
305out:
306 mutex_unlock(&smpboot_threads_lock);
844d8787 307 cpus_read_unlock();
f97f8f06
TG
308 return ret;
309}
167a8867 310EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
f97f8f06
TG
311
312/**
313 * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
314 * @plug_thread: Hotplug thread descriptor
315 *
316 * Stops all threads on all possible cpus.
317 */
318void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
319{
844d8787 320 cpus_read_lock();
f97f8f06
TG
321 mutex_lock(&smpboot_threads_lock);
322 list_del(&plug_thread->list);
323 smpboot_destroy_threads(plug_thread);
324 mutex_unlock(&smpboot_threads_lock);
844d8787 325 cpus_read_unlock();
f97f8f06
TG
326}
327EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
8038dad7
PM
328
329static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
330
331/*
332 * Called to poll specified CPU's state, for example, when waiting for
333 * a CPU to come online.
334 */
335int cpu_report_state(int cpu)
336{
337 return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
338}
339
340/*
341 * If CPU has died properly, set its state to CPU_UP_PREPARE and
342 * return success. Otherwise, return -EBUSY if the CPU died after
343 * cpu_wait_death() timed out. And yet otherwise again, return -EAGAIN
344 * if cpu_wait_death() timed out and the CPU still hasn't gotten around
345 * to dying. In the latter two cases, the CPU might not be set up
346 * properly, but it is up to the arch-specific code to decide.
347 * Finally, -EIO indicates an unanticipated problem.
348 *
349 * Note that it is permissible to omit this call entirely, as is
350 * done in architectures that do no CPU-hotplug error checking.
351 */
352int cpu_check_up_prepare(int cpu)
353{
354 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
355 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
356 return 0;
357 }
358
359 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
360
361 case CPU_POST_DEAD:
362
363 /* The CPU died properly, so just start it up again. */
364 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
365 return 0;
366
367 case CPU_DEAD_FROZEN:
368
369 /*
370 * Timeout during CPU death, so let caller know.
371 * The outgoing CPU completed its processing, but after
372 * cpu_wait_death() timed out and reported the error. The
373 * caller is free to proceed, in which case the state
374 * will be reset properly by cpu_set_state_online().
375 * Proceeding despite this -EBUSY return makes sense
376 * for systems where the outgoing CPUs take themselves
377 * offline, with no post-death manipulation required from
378 * a surviving CPU.
379 */
380 return -EBUSY;
381
382 case CPU_BROKEN:
383
384 /*
385 * The most likely reason we got here is that there was
386 * a timeout during CPU death, and the outgoing CPU never
387 * did complete its processing. This could happen on
388 * a virtualized system if the outgoing VCPU gets preempted
389 * for more than five seconds, and the user attempts to
390 * immediately online that same CPU. Trying again later
391 * might return -EBUSY above, hence -EAGAIN.
392 */
393 return -EAGAIN;
394
c7dfb259
LM
395 case CPU_UP_PREPARE:
396 /*
397 * Timeout while waiting for the CPU to show up. Allow to try
398 * again later.
399 */
400 return 0;
401
8038dad7
PM
402 default:
403
404 /* Should not happen. Famous last words. */
405 return -EIO;
406 }
407}
408
409/*
410 * Mark the specified CPU online.
411 *
412 * Note that it is permissible to omit this call entirely, as is
413 * done in architectures that do no CPU-hotplug error checking.
414 */
415void cpu_set_state_online(int cpu)
416{
417 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
418}
419
420#ifdef CONFIG_HOTPLUG_CPU
421
422/*
423 * Wait for the specified CPU to exit the idle loop and die.
424 */
425bool cpu_wait_death(unsigned int cpu, int seconds)
426{
427 int jf_left = seconds * HZ;
428 int oldstate;
429 bool ret = true;
430 int sleep_jf = 1;
431
432 might_sleep();
433
434 /* The outgoing CPU will normally get done quite quickly. */
435 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
9a15193e 436 goto update_state_early;
8038dad7
PM
437 udelay(5);
438
439 /* But if the outgoing CPU dawdles, wait increasingly long times. */
440 while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
441 schedule_timeout_uninterruptible(sleep_jf);
442 jf_left -= sleep_jf;
443 if (jf_left <= 0)
444 break;
445 sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
446 }
9a15193e 447update_state_early:
8038dad7 448 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
9a15193e 449update_state:
8038dad7
PM
450 if (oldstate == CPU_DEAD) {
451 /* Outgoing CPU died normally, update state. */
452 smp_mb(); /* atomic_read() before update. */
453 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
454 } else {
455 /* Outgoing CPU still hasn't died, set state accordingly. */
9a15193e
UB
456 if (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
457 &oldstate, CPU_BROKEN))
8038dad7
PM
458 goto update_state;
459 ret = false;
460 }
461 return ret;
462}
463
464/*
465 * Called by the outgoing CPU to report its successful death. Return
466 * false if this report follows the surviving CPU's timing out.
467 *
468 * A separate "CPU_DEAD_FROZEN" is used when the surviving CPU
469 * timed out. This approach allows architectures to omit calls to
470 * cpu_check_up_prepare() and cpu_set_state_online() without defeating
471 * the next cpu_wait_death()'s polling loop.
472 */
473bool cpu_report_death(void)
474{
475 int oldstate;
476 int newstate;
477 int cpu = smp_processor_id();
478
9a15193e 479 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
8038dad7 480 do {
8038dad7
PM
481 if (oldstate != CPU_BROKEN)
482 newstate = CPU_DEAD;
483 else
484 newstate = CPU_DEAD_FROZEN;
9a15193e
UB
485 } while (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
486 &oldstate, newstate));
8038dad7
PM
487 return newstate == CPU_DEAD;
488}
489
490#endif /* #ifdef CONFIG_HOTPLUG_CPU */