Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / kernel / sched / core.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
391e43da 3 * kernel/sched/core.c
1da177e4 4 *
04746ed8 5 * Core kernel CPU scheduler code
1da177e4
LT
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
04746ed8 8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
1da177e4 9 */
e66f6481
IM
10#include <linux/highmem.h>
11#include <linux/hrtimer_api.h>
12#include <linux/ktime_api.h>
13#include <linux/sched/signal.h>
14#include <linux/syscalls_api.h>
15#include <linux/debug_locks.h>
16#include <linux/prefetch.h>
17#include <linux/capability.h>
18#include <linux/pgtable_api.h>
19#include <linux/wait_bit.h>
20#include <linux/jiffies.h>
21#include <linux/spinlock_api.h>
22#include <linux/cpumask_api.h>
23#include <linux/lockdep_api.h>
24#include <linux/hardirq.h>
25#include <linux/softirq.h>
26#include <linux/refcount_api.h>
27#include <linux/topology.h>
28#include <linux/sched/clock.h>
29#include <linux/sched/cond_resched.h>
d664e399 30#include <linux/sched/cputime.h>
e66f6481 31#include <linux/sched/debug.h>
d664e399
TG
32#include <linux/sched/hotplug.h>
33#include <linux/sched/init.h>
e66f6481
IM
34#include <linux/sched/isolation.h>
35#include <linux/sched/loadavg.h>
36#include <linux/sched/mm.h>
37#include <linux/sched/nohz.h>
38#include <linux/sched/rseq_api.h>
39#include <linux/sched/rt.h>
1da177e4 40
6a5850d1 41#include <linux/blkdev.h>
e66f6481
IM
42#include <linux/context_tracking.h>
43#include <linux/cpuset.h>
44#include <linux/delayacct.h>
45#include <linux/init_task.h>
46#include <linux/interrupt.h>
47#include <linux/ioprio.h>
48#include <linux/kallsyms.h>
0ed557aa 49#include <linux/kcov.h>
e66f6481
IM
50#include <linux/kprobes.h>
51#include <linux/llist_api.h>
52#include <linux/mmu_context.h>
53#include <linux/mmzone.h>
54#include <linux/mutex_api.h>
55#include <linux/nmi.h>
56#include <linux/nospec.h>
57#include <linux/perf_event_api.h>
58#include <linux/profile.h>
59#include <linux/psi.h>
60#include <linux/rcuwait_api.h>
932562a6 61#include <linux/rseq.h>
e66f6481 62#include <linux/sched/wake_q.h>
d08b9f0c 63#include <linux/scs.h>
e66f6481
IM
64#include <linux/slab.h>
65#include <linux/syscalls.h>
66#include <linux/vtime.h>
67#include <linux/wait_api.h>
68#include <linux/workqueue_api.h>
676e8cf7 69#include <linux/livepatch_sched.h>
e66f6481
IM
70
71#ifdef CONFIG_PREEMPT_DYNAMIC
a7b2553b
IM
72# ifdef CONFIG_GENERIC_ENTRY
73# include <linux/entry-common.h>
74# endif
e66f6481
IM
75#endif
76
77#include <uapi/linux/sched/types.h>
0ed557aa 78
bc1cca97 79#include <asm/irq_regs.h>
96f951ed 80#include <asm/switch_to.h>
5517d86b 81#include <asm/tlb.h>
1da177e4 82
9d246053 83#define CREATE_TRACE_POINTS
e66f6481 84#include <linux/sched/rseq_api.h>
9d246053 85#include <trace/events/sched.h>
cc9cb0a7 86#include <trace/events/ipi.h>
9d246053
PA
87#undef CREATE_TRACE_POINTS
88
325ea10c 89#include "sched.h"
b9e9c6ca 90#include "stats.h"
6e0534f2 91
e66f6481 92#include "autogroup.h"
91c27493 93#include "pelt.h"
1f8db415 94#include "smp.h"
1da177e4 95
ea138446 96#include "../workqueue_internal.h"
ed29b0b4 97#include "../../io_uring/io-wq.h"
29d5e047 98#include "../smpboot.h"
91c27493 99
68e2d17c 100EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
cc9cb0a7
VS
101EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
102
a056a5be
QY
103/*
104 * Export tracepoints that act as a bare tracehook (ie: have no trace event
105 * associated with them) to allow external modules to probe them.
106 */
107EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
108EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
109EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
110EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
111EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
d4dbc991 112EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
51cf18c9 113EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
a056a5be 114EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
4581bea8
VD
115EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
116EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
9d246053 117EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
15874a3d 118EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
a056a5be 119
029632fb 120DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 121
bf5c91ba
IM
122/*
123 * Debugging: various feature bits
765cc3a4
PB
124 *
125 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
126 * sysctl_sched_features, defined in sched.h, to allow constants propagation
127 * at compile time and compiler optimization based on features default.
bf5c91ba 128 */
f00b45c1
PZ
129#define SCHED_FEAT(name, enabled) \
130 (1UL << __SCHED_FEAT_##name) * enabled |
57903f72 131__read_mostly unsigned int sysctl_sched_features =
391e43da 132#include "features.h"
f00b45c1 133 0;
f00b45c1 134#undef SCHED_FEAT
c006fac5
PT
135
136/*
137 * Print a warning if need_resched is set for the given duration (if
138 * LATENCY_WARN is enabled).
139 *
140 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
141 * per boot.
142 */
143__read_mostly int sysctl_resched_latency_warn_ms = 100;
144__read_mostly int sysctl_resched_latency_warn_once = 1;
f00b45c1 145
b82d9fdd
PZ
146/*
147 * Number of tasks to iterate in a single balance run.
148 * Limited because this is done with IRQs disabled.
149 */
57903f72 150__read_mostly unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
b82d9fdd 151
029632fb 152__read_mostly int scheduler_running;
6892b75e 153
9edeaea1
PZ
154#ifdef CONFIG_SCHED_CORE
155
156DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
157
8a311c74 158/* kernel prio, less is more */
904cbab7 159static inline int __task_prio(const struct task_struct *p)
8a311c74
PZ
160{
161 if (p->sched_class == &stop_sched_class) /* trumps deadline */
162 return -2;
163
4b26cfdd
JFG
164 if (p->dl_server)
165 return -1; /* deadline */
166
ae04f69d 167 if (rt_or_dl_prio(p->prio))
8a311c74
PZ
168 return p->prio; /* [-1, 99] */
169
170 if (p->sched_class == &idle_sched_class)
171 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
172
7b0888b7
TH
173 if (task_on_scx(p))
174 return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
175
176 return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
8a311c74
PZ
177}
178
179/*
180 * l(a,b)
181 * le(a,b) := !l(b,a)
182 * g(a,b) := l(b,a)
183 * ge(a,b) := !l(a,b)
184 */
185
186/* real prio, less is less */
904cbab7
MWO
187static inline bool prio_less(const struct task_struct *a,
188 const struct task_struct *b, bool in_fi)
8a311c74
PZ
189{
190
191 int pa = __task_prio(a), pb = __task_prio(b);
192
193 if (-pa < -pb)
194 return true;
195
196 if (-pb < -pa)
197 return false;
198
4b26cfdd
JFG
199 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
200 const struct sched_dl_entity *a_dl, *b_dl;
201
202 a_dl = &a->dl;
203 /*
204 * Since,'a' and 'b' can be CFS tasks served by DL server,
205 * __task_prio() can return -1 (for DL) even for those. In that
206 * case, get to the dl_server's DL entity.
207 */
208 if (a->dl_server)
209 a_dl = a->dl_server;
210
211 b_dl = &b->dl;
212 if (b->dl_server)
213 b_dl = b->dl_server;
214
215 return !dl_time_before(a_dl->deadline, b_dl->deadline);
216 }
8a311c74 217
c6047c2e
JFG
218 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
219 return cfs_prio_less(a, b, in_fi);
8a311c74 220
7b0888b7
TH
221#ifdef CONFIG_SCHED_CLASS_EXT
222 if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */
223 return scx_prio_less(a, b, in_fi);
224#endif
225
8a311c74
PZ
226 return false;
227}
228
904cbab7
MWO
229static inline bool __sched_core_less(const struct task_struct *a,
230 const struct task_struct *b)
8a311c74
PZ
231{
232 if (a->core_cookie < b->core_cookie)
233 return true;
234
235 if (a->core_cookie > b->core_cookie)
236 return false;
237
238 /* flip prio, so high prio is leftmost */
4feee7d1 239 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
8a311c74
PZ
240 return true;
241
242 return false;
243}
244
245#define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
246
247static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
248{
249 return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
250}
251
252static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
253{
254 const struct task_struct *p = __node_2_sc(node);
255 unsigned long cookie = (unsigned long)key;
256
257 if (cookie < p->core_cookie)
258 return -1;
259
260 if (cookie > p->core_cookie)
261 return 1;
262
263 return 0;
264}
265
6e33cad0 266void sched_core_enqueue(struct rq *rq, struct task_struct *p)
8a311c74 267{
c662e2b1
PZ
268 if (p->se.sched_delayed)
269 return;
270
8a311c74
PZ
271 rq->core->core_task_seq++;
272
273 if (!p->core_cookie)
274 return;
275
276 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
277}
278
4feee7d1 279void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
8a311c74 280{
c662e2b1
PZ
281 if (p->se.sched_delayed)
282 return;
283
8a311c74
PZ
284 rq->core->core_task_seq++;
285
4feee7d1
JD
286 if (sched_core_enqueued(p)) {
287 rb_erase(&p->core_node, &rq->core_tree);
288 RB_CLEAR_NODE(&p->core_node);
289 }
8a311c74 290
4feee7d1
JD
291 /*
292 * Migrating the last task off the cpu, with the cpu in forced idle
293 * state. Reschedule to create an accounting edge for forced idle,
294 * and re-examine whether the core is still in forced idle state.
295 */
296 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
297 rq->core->core_forceidle_count && rq->curr == rq->idle)
298 resched_curr(rq);
8a311c74
PZ
299}
300
530bfad1 301static int sched_task_is_throttled(struct task_struct *p, int cpu)
8a311c74 302{
530bfad1
HJ
303 if (p->sched_class->task_is_throttled)
304 return p->sched_class->task_is_throttled(p, cpu);
8a311c74 305
530bfad1 306 return 0;
8a311c74
PZ
307}
308
d2dfa17b
PZ
309static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
310{
311 struct rb_node *node = &p->core_node;
530bfad1 312 int cpu = task_cpu(p);
d2dfa17b 313
530bfad1
HJ
314 do {
315 node = rb_next(node);
316 if (!node)
317 return NULL;
d2dfa17b 318
530bfad1
HJ
319 p = __node_2_sc(node);
320 if (p->core_cookie != cookie)
321 return NULL;
322
323 } while (sched_task_is_throttled(p, cpu));
324
325 return p;
326}
327
328/*
329 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
330 * If no suitable task is found, NULL will be returned.
331 */
332static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
333{
334 struct task_struct *p;
335 struct rb_node *node;
336
337 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
d2dfa17b
PZ
338 if (!node)
339 return NULL;
340
530bfad1
HJ
341 p = __node_2_sc(node);
342 if (!sched_task_is_throttled(p, rq->cpu))
343 return p;
d2dfa17b 344
530bfad1 345 return sched_core_next(p, cookie);
d2dfa17b
PZ
346}
347
9edeaea1
PZ
348/*
349 * Magic required such that:
350 *
351 * raw_spin_rq_lock(rq);
352 * ...
353 * raw_spin_rq_unlock(rq);
354 *
355 * ends up locking and unlocking the _same_ lock, and all CPUs
356 * always agree on what rq has what lock.
357 *
358 * XXX entirely possible to selectively enable cores, don't bother for now.
359 */
360
361static DEFINE_MUTEX(sched_core_mutex);
875feb41 362static atomic_t sched_core_count;
9edeaea1
PZ
363static struct cpumask sched_core_mask;
364
3c474b32
PZ
365static void sched_core_lock(int cpu, unsigned long *flags)
366{
367 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
368 int t, i = 0;
369
370 local_irq_save(*flags);
371 for_each_cpu(t, smt_mask)
372 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
373}
374
375static void sched_core_unlock(int cpu, unsigned long *flags)
376{
377 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
378 int t;
379
380 for_each_cpu(t, smt_mask)
381 raw_spin_unlock(&cpu_rq(t)->__lock);
382 local_irq_restore(*flags);
383}
384
9edeaea1
PZ
385static void __sched_core_flip(bool enabled)
386{
3c474b32
PZ
387 unsigned long flags;
388 int cpu, t;
9edeaea1
PZ
389
390 cpus_read_lock();
391
392 /*
393 * Toggle the online cores, one by one.
394 */
395 cpumask_copy(&sched_core_mask, cpu_online_mask);
396 for_each_cpu(cpu, &sched_core_mask) {
397 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
398
3c474b32 399 sched_core_lock(cpu, &flags);
9edeaea1
PZ
400
401 for_each_cpu(t, smt_mask)
402 cpu_rq(t)->core_enabled = enabled;
403
4feee7d1
JD
404 cpu_rq(cpu)->core->core_forceidle_start = 0;
405
3c474b32 406 sched_core_unlock(cpu, &flags);
9edeaea1
PZ
407
408 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
409 }
410
411 /*
412 * Toggle the offline CPUs.
413 */
585463f0 414 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
9edeaea1
PZ
415 cpu_rq(cpu)->core_enabled = enabled;
416
417 cpus_read_unlock();
418}
419
8a311c74 420static void sched_core_assert_empty(void)
9edeaea1 421{
8a311c74 422 int cpu;
9edeaea1 423
8a311c74
PZ
424 for_each_possible_cpu(cpu)
425 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
426}
427
428static void __sched_core_enable(void)
429{
9edeaea1
PZ
430 static_branch_enable(&__sched_core_enabled);
431 /*
432 * Ensure all previous instances of raw_spin_rq_*lock() have finished
433 * and future ones will observe !sched_core_disabled().
434 */
435 synchronize_rcu();
436 __sched_core_flip(true);
8a311c74 437 sched_core_assert_empty();
9edeaea1
PZ
438}
439
440static void __sched_core_disable(void)
441{
8a311c74 442 sched_core_assert_empty();
9edeaea1
PZ
443 __sched_core_flip(false);
444 static_branch_disable(&__sched_core_enabled);
445}
446
447void sched_core_get(void)
448{
875feb41
PZ
449 if (atomic_inc_not_zero(&sched_core_count))
450 return;
451
9edeaea1 452 mutex_lock(&sched_core_mutex);
875feb41 453 if (!atomic_read(&sched_core_count))
9edeaea1 454 __sched_core_enable();
875feb41
PZ
455
456 smp_mb__before_atomic();
457 atomic_inc(&sched_core_count);
9edeaea1
PZ
458 mutex_unlock(&sched_core_mutex);
459}
460
875feb41 461static void __sched_core_put(struct work_struct *work)
9edeaea1 462{
875feb41 463 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
9edeaea1 464 __sched_core_disable();
875feb41
PZ
465 mutex_unlock(&sched_core_mutex);
466 }
467}
468
469void sched_core_put(void)
470{
471 static DECLARE_WORK(_work, __sched_core_put);
472
473 /*
474 * "There can be only one"
475 *
476 * Either this is the last one, or we don't actually need to do any
477 * 'work'. If it is the last *again*, we rely on
478 * WORK_STRUCT_PENDING_BIT.
479 */
480 if (!atomic_add_unless(&sched_core_count, -1, 1))
481 schedule_work(&_work);
9edeaea1
PZ
482}
483
8a311c74
PZ
484#else /* !CONFIG_SCHED_CORE */
485
486static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
4feee7d1
JD
487static inline void
488sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
8a311c74 489
9edeaea1
PZ
490#endif /* CONFIG_SCHED_CORE */
491
26f80681
GM
492/* need a wrapper since we may need to trace from modules */
493EXPORT_TRACEPOINT_SYMBOL(sched_set_state_tp);
494
495/* Call via the helper macro trace_set_current_state. */
496void __trace_set_current_state(int state_value)
497{
498 trace_sched_set_state_tp(current, state_value);
499}
500EXPORT_SYMBOL(__trace_set_current_state);
501
58877d34
PZ
502/*
503 * Serialization rules:
504 *
505 * Lock order:
506 *
507 * p->pi_lock
508 * rq->lock
509 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
510 *
511 * rq1->lock
512 * rq2->lock where: rq1 < rq2
513 *
514 * Regular state:
515 *
516 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
517 * local CPU's rq->lock, it optionally removes the task from the runqueue and
b19a888c 518 * always looks at the local rq data structures to find the most eligible task
58877d34
PZ
519 * to run next.
520 *
521 * Task enqueue is also under rq->lock, possibly taken from another CPU.
522 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
523 * the local CPU to avoid bouncing the runqueue state around [ see
524 * ttwu_queue_wakelist() ]
525 *
526 * Task wakeup, specifically wakeups that involve migration, are horribly
527 * complicated to avoid having to take two rq->locks.
528 *
529 * Special state:
530 *
531 * System-calls and anything external will use task_rq_lock() which acquires
532 * both p->pi_lock and rq->lock. As a consequence the state they change is
533 * stable while holding either lock:
534 *
535 * - sched_setaffinity()/
536 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
537 * - set_user_nice(): p->se.load, p->*prio
538 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
539 * p->se.load, p->rt_priority,
540 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
541 * - sched_setnuma(): p->numa_preferred_nid
39c42611 542 * - sched_move_task(): p->sched_task_group
58877d34
PZ
543 * - uclamp_update_active() p->uclamp*
544 *
545 * p->state <- TASK_*:
546 *
547 * is changed locklessly using set_current_state(), __set_current_state() or
548 * set_special_state(), see their respective comments, or by
549 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
550 * concurrent self.
551 *
552 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
553 *
554 * is set by activate_task() and cleared by deactivate_task(), under
555 * rq->lock. Non-zero indicates the task is runnable, the special
556 * ON_RQ_MIGRATING state is used for migration without holding both
557 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
558 *
cd9626e9
PZ
559 * Additionally it is possible to be ->on_rq but still be considered not
560 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
561 * but will be dequeued as soon as they get picked again. See the
562 * task_is_runnable() helper.
563 *
58877d34
PZ
564 * p->on_cpu <- { 0, 1 }:
565 *
566 * is set by prepare_task() and cleared by finish_task() such that it will be
567 * set before p is scheduled-in and cleared after p is scheduled-out, both
568 * under rq->lock. Non-zero indicates the task is running on its CPU.
569 *
570 * [ The astute reader will observe that it is possible for two tasks on one
571 * CPU to have ->on_cpu = 1 at the same time. ]
572 *
573 * task_cpu(p): is changed by set_task_cpu(), the rules are:
574 *
575 * - Don't call set_task_cpu() on a blocked task:
576 *
577 * We don't care what CPU we're not running on, this simplifies hotplug,
578 * the CPU assignment of blocked tasks isn't required to be valid.
579 *
580 * - for try_to_wake_up(), called under p->pi_lock:
581 *
582 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
583 *
584 * - for migration called under rq->lock:
585 * [ see task_on_rq_migrating() in task_rq_lock() ]
586 *
587 * o move_queued_task()
588 * o detach_task()
589 *
590 * - for migration called under double_rq_lock():
591 *
592 * o __migrate_swap_task()
593 * o push_rt_task() / pull_rt_task()
594 * o push_dl_task() / pull_dl_task()
595 * o dl_task_offline_migration()
596 *
597 */
598
39d371b7
PZ
599void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
600{
d66f1b06
PZ
601 raw_spinlock_t *lock;
602
9edeaea1
PZ
603 /* Matches synchronize_rcu() in __sched_core_enable() */
604 preempt_disable();
d66f1b06
PZ
605 if (sched_core_disabled()) {
606 raw_spin_lock_nested(&rq->__lock, subclass);
9edeaea1
PZ
607 /* preempt_count *MUST* be > 1 */
608 preempt_enable_no_resched();
d66f1b06
PZ
609 return;
610 }
611
612 for (;;) {
9ef7e7e3 613 lock = __rq_lockp(rq);
d66f1b06 614 raw_spin_lock_nested(lock, subclass);
9ef7e7e3 615 if (likely(lock == __rq_lockp(rq))) {
9edeaea1
PZ
616 /* preempt_count *MUST* be > 1 */
617 preempt_enable_no_resched();
d66f1b06 618 return;
9edeaea1 619 }
d66f1b06
PZ
620 raw_spin_unlock(lock);
621 }
39d371b7
PZ
622}
623
624bool raw_spin_rq_trylock(struct rq *rq)
625{
d66f1b06
PZ
626 raw_spinlock_t *lock;
627 bool ret;
628
9edeaea1
PZ
629 /* Matches synchronize_rcu() in __sched_core_enable() */
630 preempt_disable();
631 if (sched_core_disabled()) {
632 ret = raw_spin_trylock(&rq->__lock);
633 preempt_enable();
634 return ret;
635 }
d66f1b06
PZ
636
637 for (;;) {
9ef7e7e3 638 lock = __rq_lockp(rq);
d66f1b06 639 ret = raw_spin_trylock(lock);
9ef7e7e3 640 if (!ret || (likely(lock == __rq_lockp(rq)))) {
9edeaea1 641 preempt_enable();
d66f1b06 642 return ret;
9edeaea1 643 }
d66f1b06
PZ
644 raw_spin_unlock(lock);
645 }
39d371b7
PZ
646}
647
648void raw_spin_rq_unlock(struct rq *rq)
649{
650 raw_spin_unlock(rq_lockp(rq));
651}
652
d66f1b06
PZ
653#ifdef CONFIG_SMP
654/*
655 * double_rq_lock - safely lock two runqueues
656 */
657void double_rq_lock(struct rq *rq1, struct rq *rq2)
658{
659 lockdep_assert_irqs_disabled();
660
661 if (rq_order_less(rq2, rq1))
662 swap(rq1, rq2);
663
664 raw_spin_rq_lock(rq1);
2679a837
HJ
665 if (__rq_lockp(rq1) != __rq_lockp(rq2))
666 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
d66f1b06 667
2679a837 668 double_rq_clock_clear_update(rq1, rq2);
d66f1b06
PZ
669}
670#endif
671
3e71a462
PZ
672/*
673 * __task_rq_lock - lock the rq @p resides on.
674 */
eb580751 675struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462
PZ
676 __acquires(rq->lock)
677{
678 struct rq *rq;
679
680 lockdep_assert_held(&p->pi_lock);
681
682 for (;;) {
683 rq = task_rq(p);
5cb9eaa3 684 raw_spin_rq_lock(rq);
3e71a462 685 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
d8ac8971 686 rq_pin_lock(rq, rf);
3e71a462
PZ
687 return rq;
688 }
5cb9eaa3 689 raw_spin_rq_unlock(rq);
3e71a462
PZ
690
691 while (unlikely(task_on_rq_migrating(p)))
692 cpu_relax();
693 }
694}
695
696/*
697 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
698 */
eb580751 699struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462
PZ
700 __acquires(p->pi_lock)
701 __acquires(rq->lock)
702{
703 struct rq *rq;
704
705 for (;;) {
eb580751 706 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
3e71a462 707 rq = task_rq(p);
5cb9eaa3 708 raw_spin_rq_lock(rq);
3e71a462
PZ
709 /*
710 * move_queued_task() task_rq_lock()
711 *
712 * ACQUIRE (rq->lock)
713 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
714 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
715 * [S] ->cpu = new_cpu [L] task_rq()
716 * [L] ->on_rq
717 * RELEASE (rq->lock)
718 *
c546951d 719 * If we observe the old CPU in task_rq_lock(), the acquire of
3e71a462
PZ
720 * the old rq->lock will fully serialize against the stores.
721 *
c546951d
AP
722 * If we observe the new CPU in task_rq_lock(), the address
723 * dependency headed by '[L] rq = task_rq()' and the acquire
724 * will pair with the WMB to ensure we then also see migrating.
3e71a462
PZ
725 */
726 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
d8ac8971 727 rq_pin_lock(rq, rf);
3e71a462
PZ
728 return rq;
729 }
5cb9eaa3 730 raw_spin_rq_unlock(rq);
eb580751 731 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
3e71a462
PZ
732
733 while (unlikely(task_on_rq_migrating(p)))
734 cpu_relax();
735 }
736}
737
535b9552
IM
738/*
739 * RQ-clock updating methods:
740 */
741
742static void update_rq_clock_task(struct rq *rq, s64 delta)
743{
744/*
745 * In theory, the compile should just see 0 here, and optimize out the call
746 * to sched_rt_avg_update. But I don't trust it...
747 */
11d4afd4
VG
748 s64 __maybe_unused steal = 0, irq_delta = 0;
749
535b9552 750#ifdef CONFIG_IRQ_TIME_ACCOUNTING
763a744e
YS
751 if (irqtime_enabled()) {
752 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
535b9552 753
763a744e
YS
754 /*
755 * Since irq_time is only updated on {soft,}irq_exit, we might run into
756 * this case when a previous update_rq_clock() happened inside a
757 * {soft,}IRQ region.
758 *
759 * When this happens, we stop ->clock_task and only update the
760 * prev_irq_time stamp to account for the part that fit, so that a next
761 * update will consume the rest. This ensures ->clock_task is
762 * monotonic.
763 *
764 * It does however cause some slight miss-attribution of {soft,}IRQ
765 * time, a more accurate solution would be to update the irq_time using
766 * the current rq->clock timestamp, except that would require using
767 * atomic ops.
768 */
769 if (irq_delta > delta)
770 irq_delta = delta;
535b9552 771
763a744e
YS
772 rq->prev_irq_time += irq_delta;
773 delta -= irq_delta;
774 delayacct_irq(rq->curr, irq_delta);
775 }
535b9552
IM
776#endif
777#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
778 if (static_key_false((&paravirt_steal_rq_enabled))) {
108ad099
SS
779 u64 prev_steal;
780
781 steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
535b9552
IM
782 steal -= rq->prev_steal_time_rq;
783
784 if (unlikely(steal > delta))
785 steal = delta;
786
108ad099 787 rq->prev_steal_time_rq = prev_steal;
535b9552
IM
788 delta -= steal;
789 }
790#endif
791
792 rq->clock_task += delta;
793
11d4afd4 794#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
535b9552 795 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
91c27493 796 update_irq_load_avg(rq, irq_delta + steal);
535b9552 797#endif
23127296 798 update_rq_clock_pelt(rq, delta);
535b9552
IM
799}
800
801void update_rq_clock(struct rq *rq)
802{
803 s64 delta;
3a9910b5 804 u64 clock;
535b9552 805
5cb9eaa3 806 lockdep_assert_rq_held(rq);
535b9552
IM
807
808 if (rq->clock_update_flags & RQCF_ACT_SKIP)
809 return;
810
26ae58d2 811 if (sched_feat(WARN_DOUBLE_CLOCK))
f7d2728c 812 WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED);
535b9552 813 rq->clock_update_flags |= RQCF_UPDATED;
dd5bdaf2 814
3a9910b5
CM
815 clock = sched_clock_cpu(cpu_of(rq));
816 scx_rq_clock_update(rq, clock);
26ae58d2 817
3a9910b5 818 delta = clock - rq->clock;
535b9552
IM
819 if (delta < 0)
820 return;
821 rq->clock += delta;
3a9910b5 822
535b9552
IM
823 update_rq_clock_task(rq, delta);
824}
825
8f4d37ec
PZ
826#ifdef CONFIG_SCHED_HRTICK
827/*
828 * Use HR-timers to deliver accurate preemption points.
8f4d37ec 829 */
8f4d37ec 830
8f4d37ec
PZ
831static void hrtick_clear(struct rq *rq)
832{
833 if (hrtimer_active(&rq->hrtick_timer))
834 hrtimer_cancel(&rq->hrtick_timer);
835}
836
8f4d37ec
PZ
837/*
838 * High-resolution timer tick.
839 * Runs from hardirq context with interrupts disabled.
840 */
841static enum hrtimer_restart hrtick(struct hrtimer *timer)
842{
843 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
8a8c69c3 844 struct rq_flags rf;
8f4d37ec
PZ
845
846 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
847
8a8c69c3 848 rq_lock(rq, &rf);
3e51f33f 849 update_rq_clock(rq);
af0c8b2b 850 rq->donor->sched_class->task_tick(rq, rq->curr, 1);
8a8c69c3 851 rq_unlock(rq, &rf);
8f4d37ec
PZ
852
853 return HRTIMER_NORESTART;
854}
855
95e904c7 856#ifdef CONFIG_SMP
971ee28c 857
4961b6e1 858static void __hrtick_restart(struct rq *rq)
971ee28c
PZ
859{
860 struct hrtimer *timer = &rq->hrtick_timer;
156ec6f4 861 ktime_t time = rq->hrtick_time;
971ee28c 862
156ec6f4 863 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
971ee28c
PZ
864}
865
31656519
PZ
866/*
867 * called from hardirq (IPI) context
868 */
869static void __hrtick_start(void *arg)
b328ca18 870{
31656519 871 struct rq *rq = arg;
8a8c69c3 872 struct rq_flags rf;
b328ca18 873
8a8c69c3 874 rq_lock(rq, &rf);
971ee28c 875 __hrtick_restart(rq);
8a8c69c3 876 rq_unlock(rq, &rf);
b328ca18
PZ
877}
878
31656519
PZ
879/*
880 * Called to set the hrtick timer state.
881 *
402de7fc 882 * called with rq->lock held and IRQs disabled
31656519 883 */
029632fb 884void hrtick_start(struct rq *rq, u64 delay)
b328ca18 885{
31656519 886 struct hrtimer *timer = &rq->hrtick_timer;
177ef2a6 887 s64 delta;
888
889 /*
890 * Don't schedule slices shorter than 10000ns, that just
891 * doesn't make sense and can cause timer DoS.
892 */
893 delta = max_t(s64, delay, 10000LL);
156ec6f4 894 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
31656519 895
fd3eafda 896 if (rq == this_rq())
971ee28c 897 __hrtick_restart(rq);
fd3eafda 898 else
c46fff2a 899 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
b328ca18
PZ
900}
901
31656519
PZ
902#else
903/*
904 * Called to set the hrtick timer state.
905 *
402de7fc 906 * called with rq->lock held and IRQs disabled
31656519 907 */
029632fb 908void hrtick_start(struct rq *rq, u64 delay)
31656519 909{
86893335
WL
910 /*
911 * Don't schedule slices shorter than 10000ns, that just
912 * doesn't make sense. Rely on vruntime for fairness.
913 */
914 delay = max_t(u64, delay, 10000LL);
4961b6e1 915 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
d5096aa6 916 HRTIMER_MODE_REL_PINNED_HARD);
31656519 917}
90b5363a 918
31656519 919#endif /* CONFIG_SMP */
8f4d37ec 920
77a021be 921static void hrtick_rq_init(struct rq *rq)
8f4d37ec 922{
31656519 923#ifdef CONFIG_SMP
545b8c8d 924 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
31656519 925#endif
ee13da87 926 hrtimer_setup(&rq->hrtick_timer, hrtick, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
8f4d37ec 927}
006c75f1 928#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
929static inline void hrtick_clear(struct rq *rq)
930{
931}
932
77a021be 933static inline void hrtick_rq_init(struct rq *rq)
8f4d37ec
PZ
934{
935}
006c75f1 936#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 937
5529578a 938/*
402de7fc 939 * try_cmpxchg based fetch_or() macro so it works for different integer types:
5529578a
FW
940 */
941#define fetch_or(ptr, mask) \
942 ({ \
943 typeof(ptr) _ptr = (ptr); \
944 typeof(mask) _mask = (mask); \
c02d5546 945 typeof(*_ptr) _val = *_ptr; \
5529578a 946 \
c02d5546
UB
947 do { \
948 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
949 _val; \
5529578a
FW
950})
951
e3baac47 952#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
fd99f91a
PZ
953/*
954 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
955 * this avoids any races wrt polling state changes and thereby avoids
956 * spurious IPIs.
957 */
26baa1f1 958static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
fd99f91a 959{
26baa1f1 960 return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG);
fd99f91a 961}
e3baac47
PZ
962
963/*
964 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
965 *
966 * If this returns true, then the idle task promises to call
967 * sched_ttwu_pending() and reschedule soon.
968 */
969static bool set_nr_if_polling(struct task_struct *p)
970{
971 struct thread_info *ti = task_thread_info(p);
c02d5546 972 typeof(ti->flags) val = READ_ONCE(ti->flags);
e3baac47 973
4ff34ad3 974 do {
e3baac47
PZ
975 if (!(val & _TIF_POLLING_NRFLAG))
976 return false;
977 if (val & _TIF_NEED_RESCHED)
978 return true;
4ff34ad3
UB
979 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
980
e3baac47
PZ
981 return true;
982}
983
fd99f91a 984#else
26baa1f1 985static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
fd99f91a 986{
26baa1f1 987 set_ti_thread_flag(ti, tif);
fd99f91a
PZ
988 return true;
989}
e3baac47
PZ
990
991#ifdef CONFIG_SMP
c02d5546 992static inline bool set_nr_if_polling(struct task_struct *p)
e3baac47
PZ
993{
994 return false;
995}
996#endif
fd99f91a
PZ
997#endif
998
07879c6a 999static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
76751049
PZ
1000{
1001 struct wake_q_node *node = &task->wake_q;
1002
1003 /*
1004 * Atomically grab the task, if ->wake_q is !nil already it means
b19a888c 1005 * it's already queued (either by us or someone else) and will get the
76751049
PZ
1006 * wakeup due to that.
1007 *
4c4e3731
PZ
1008 * In order to ensure that a pending wakeup will observe our pending
1009 * state, even in the failed case, an explicit smp_mb() must be used.
76751049 1010 */
4c4e3731 1011 smp_mb__before_atomic();
87ff19cb 1012 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
07879c6a 1013 return false;
76751049
PZ
1014
1015 /*
1016 * The head is context local, there can be no concurrency.
1017 */
1018 *head->lastp = node;
1019 head->lastp = &node->next;
07879c6a
DB
1020 return true;
1021}
1022
1023/**
1024 * wake_q_add() - queue a wakeup for 'later' waking.
1025 * @head: the wake_q_head to add @task to
1026 * @task: the task to queue for 'later' wakeup
1027 *
1028 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1029 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1030 * instantly.
1031 *
1032 * This function must be used as-if it were wake_up_process(); IOW the task
1033 * must be ready to be woken at this location.
1034 */
1035void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1036{
1037 if (__wake_q_add(head, task))
1038 get_task_struct(task);
1039}
1040
1041/**
1042 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1043 * @head: the wake_q_head to add @task to
1044 * @task: the task to queue for 'later' wakeup
1045 *
1046 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1047 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1048 * instantly.
1049 *
1050 * This function must be used as-if it were wake_up_process(); IOW the task
1051 * must be ready to be woken at this location.
1052 *
1053 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1054 * that already hold reference to @task can call the 'safe' version and trust
1055 * wake_q to do the right thing depending whether or not the @task is already
1056 * queued for wakeup.
1057 */
1058void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1059{
1060 if (!__wake_q_add(head, task))
1061 put_task_struct(task);
76751049
PZ
1062}
1063
1064void wake_up_q(struct wake_q_head *head)
1065{
1066 struct wake_q_node *node = head->first;
1067
1068 while (node != WAKE_Q_TAIL) {
1069 struct task_struct *task;
1070
1071 task = container_of(node, struct task_struct, wake_q);
76751049 1072 node = node->next;
bcc6244e
JH
1073 /* pairs with cmpxchg_relaxed() in __wake_q_add() */
1074 WRITE_ONCE(task->wake_q.next, NULL);
1075 /* Task can safely be re-inserted now. */
76751049
PZ
1076
1077 /*
7696f991
AP
1078 * wake_up_process() executes a full barrier, which pairs with
1079 * the queueing in wake_q_add() so as not to miss wakeups.
76751049
PZ
1080 */
1081 wake_up_process(task);
1082 put_task_struct(task);
1083 }
1084}
1085
c24d20db 1086/*
8875125e 1087 * resched_curr - mark rq's current task 'to be rescheduled now'.
c24d20db
IM
1088 *
1089 * On UP this means the setting of the need_resched flag, on SMP it
1090 * might also involve a cross-CPU call to trigger the scheduler on
1091 * the target CPU.
1092 */
26baa1f1 1093static void __resched_curr(struct rq *rq, int tif)
c24d20db 1094{
8875125e 1095 struct task_struct *curr = rq->curr;
26baa1f1 1096 struct thread_info *cti = task_thread_info(curr);
c24d20db
IM
1097 int cpu;
1098
5cb9eaa3 1099 lockdep_assert_rq_held(rq);
c24d20db 1100
7c70cb94
PZ
1101 /*
1102 * Always immediately preempt the idle task; no point in delaying doing
1103 * actual work.
1104 */
1105 if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
1106 tif = TIF_NEED_RESCHED;
1107
26baa1f1 1108 if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
c24d20db
IM
1109 return;
1110
8875125e 1111 cpu = cpu_of(rq);
fd99f91a 1112
f27dde8d 1113 if (cpu == smp_processor_id()) {
26baa1f1
PZ
1114 set_ti_thread_flag(cti, tif);
1115 if (tif == TIF_NEED_RESCHED)
1116 set_preempt_need_resched();
c24d20db 1117 return;
f27dde8d 1118 }
c24d20db 1119
26baa1f1
PZ
1120 if (set_nr_and_not_polling(cti, tif)) {
1121 if (tif == TIF_NEED_RESCHED)
1122 smp_send_reschedule(cpu);
1123 } else {
dfc68f29 1124 trace_sched_wake_idle_without_ipi(cpu);
26baa1f1
PZ
1125 }
1126}
1127
1128void resched_curr(struct rq *rq)
1129{
1130 __resched_curr(rq, TIF_NEED_RESCHED);
c24d20db
IM
1131}
1132
7c70cb94
PZ
1133#ifdef CONFIG_PREEMPT_DYNAMIC
1134static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy);
1135static __always_inline bool dynamic_preempt_lazy(void)
1136{
1137 return static_branch_unlikely(&sk_dynamic_preempt_lazy);
1138}
1139#else
1140static __always_inline bool dynamic_preempt_lazy(void)
1141{
1142 return IS_ENABLED(CONFIG_PREEMPT_LAZY);
1143}
1144#endif
1145
1146static __always_inline int get_lazy_tif_bit(void)
1147{
1148 if (dynamic_preempt_lazy())
1149 return TIF_NEED_RESCHED_LAZY;
1150
1151 return TIF_NEED_RESCHED;
1152}
1153
1154void resched_curr_lazy(struct rq *rq)
1155{
1156 __resched_curr(rq, get_lazy_tif_bit());
c24d20db
IM
1157}
1158
029632fb 1159void resched_cpu(int cpu)
c24d20db
IM
1160{
1161 struct rq *rq = cpu_rq(cpu);
1162 unsigned long flags;
1163
5cb9eaa3 1164 raw_spin_rq_lock_irqsave(rq, flags);
a0982dfa
PM
1165 if (cpu_online(cpu) || cpu == smp_processor_id())
1166 resched_curr(rq);
5cb9eaa3 1167 raw_spin_rq_unlock_irqrestore(rq, flags);
c24d20db 1168}
06d8308c 1169
b021fe3e 1170#ifdef CONFIG_SMP
3451d024 1171#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2 1172/*
d1ccc66d
IM
1173 * In the semi idle case, use the nearest busy CPU for migrating timers
1174 * from an idle CPU. This is good for power-savings.
83cd4fe2
VP
1175 *
1176 * We don't do similar optimization for completely idle system, as
d1ccc66d 1177 * selecting an idle CPU will add more delays to the timers than intended
402de7fc 1178 * (as that CPU's timer base may not be up to date wrt jiffies etc).
83cd4fe2 1179 */
bc7a34b8 1180int get_nohz_timer_target(void)
83cd4fe2 1181{
e938b9c9 1182 int i, cpu = smp_processor_id(), default_cpu = -1;
83cd4fe2 1183 struct sched_domain *sd;
031e3bd8 1184 const struct cpumask *hk_mask;
83cd4fe2 1185
c907cd44 1186 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) {
e938b9c9
WL
1187 if (!idle_cpu(cpu))
1188 return cpu;
1189 default_cpu = cpu;
1190 }
6201b4d6 1191
c907cd44 1192 hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
031e3bd8 1193
7537b90c
PZ
1194 guard(rcu)();
1195
83cd4fe2 1196 for_each_domain(cpu, sd) {
031e3bd8 1197 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
44496922
WL
1198 if (cpu == i)
1199 continue;
1200
7537b90c
PZ
1201 if (!idle_cpu(i))
1202 return i;
057f3fad 1203 }
83cd4fe2 1204 }
9642d18e 1205
e938b9c9 1206 if (default_cpu == -1)
c907cd44 1207 default_cpu = housekeeping_any_cpu(HK_TYPE_KERNEL_NOISE);
7537b90c
PZ
1208
1209 return default_cpu;
83cd4fe2 1210}
d1ccc66d 1211
06d8308c
TG
1212/*
1213 * When add_timer_on() enqueues a timer into the timer wheel of an
1214 * idle CPU then this timer might expire before the next timer event
1215 * which is scheduled to wake up that CPU. In case of a completely
1216 * idle system the next event might even be infinite time into the
1217 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1218 * leaves the inner idle loop so the newly added timer is taken into
1219 * account when the CPU goes back to idle and evaluates the timer
1220 * wheel for the next timer event.
1221 */
1c20091e 1222static void wake_up_idle_cpu(int cpu)
06d8308c
TG
1223{
1224 struct rq *rq = cpu_rq(cpu);
1225
1226 if (cpu == smp_processor_id())
1227 return;
1228
19460000
FW
1229 /*
1230 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1231 * part of the idle loop. This forces an exit from the idle loop
1232 * and a round trip to schedule(). Now this could be optimized
1233 * because a simple new idle loop iteration is enough to
1234 * re-evaluate the next tick. Provided some re-ordering of tick
1235 * nohz functions that would need to follow TIF_NR_POLLING
1236 * clearing:
1237 *
402de7fc 1238 * - On most architectures, a simple fetch_or on ti::flags with a
19460000
FW
1239 * "0" value would be enough to know if an IPI needs to be sent.
1240 *
1241 * - x86 needs to perform a last need_resched() check between
1242 * monitor and mwait which doesn't take timers into account.
1243 * There a dedicated TIF_TIMER flag would be required to
1244 * fetch_or here and be checked along with TIF_NEED_RESCHED
1245 * before mwait().
1246 *
1247 * However, remote timer enqueue is not such a frequent event
1248 * and testing of the above solutions didn't appear to report
1249 * much benefits.
1250 */
26baa1f1 1251 if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED))
06d8308c 1252 smp_send_reschedule(cpu);
dfc68f29
AL
1253 else
1254 trace_sched_wake_idle_without_ipi(cpu);
45bf76df
IM
1255}
1256
c5bfece2 1257static bool wake_up_full_nohz_cpu(int cpu)
1c20091e 1258{
53c5fa16
FW
1259 /*
1260 * We just need the target to call irq_exit() and re-evaluate
1261 * the next tick. The nohz full kick at least implies that.
1262 * If needed we can still optimize that later with an
1263 * empty IRQ.
1264 */
379d9ecb
PM
1265 if (cpu_is_offline(cpu))
1266 return true; /* Don't try to wake offline CPUs. */
c5bfece2 1267 if (tick_nohz_full_cpu(cpu)) {
1c20091e
FW
1268 if (cpu != smp_processor_id() ||
1269 tick_nohz_tick_stopped())
53c5fa16 1270 tick_nohz_full_kick_cpu(cpu);
1c20091e
FW
1271 return true;
1272 }
1273
1274 return false;
1275}
1276
379d9ecb
PM
1277/*
1278 * Wake up the specified CPU. If the CPU is going offline, it is the
1279 * caller's responsibility to deal with the lost wakeup, for example,
1280 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1281 */
1c20091e
FW
1282void wake_up_nohz_cpu(int cpu)
1283{
c5bfece2 1284 if (!wake_up_full_nohz_cpu(cpu))
1c20091e
FW
1285 wake_up_idle_cpu(cpu);
1286}
1287
19a1f5ec 1288static void nohz_csd_func(void *info)
45bf76df 1289{
19a1f5ec
PZ
1290 struct rq *rq = info;
1291 int cpu = cpu_of(rq);
1292 unsigned int flags;
873b4c65
VG
1293
1294 /*
19a1f5ec 1295 * Release the rq::nohz_csd.
873b4c65 1296 */
c6f88654 1297 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
19a1f5ec 1298 WARN_ON(!(flags & NOHZ_KICK_MASK));
45bf76df 1299
19a1f5ec 1300 rq->idle_balance = idle_cpu(cpu);
ea9cffc0 1301 if (rq->idle_balance) {
19a1f5ec 1302 rq->nohz_idle_balance = flags;
e932c4ab 1303 __raise_softirq_irqoff(SCHED_SOFTIRQ);
90b5363a 1304 }
2069dd75
PZ
1305}
1306
3451d024 1307#endif /* CONFIG_NO_HZ_COMMON */
d842de87 1308
ce831b38 1309#ifdef CONFIG_NO_HZ_FULL
88c56cfe
PA
1310static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1311{
1312 if (rq->nr_running != 1)
1313 return false;
1314
1315 if (p->sched_class != &fair_sched_class)
1316 return false;
1317
1318 if (!task_on_rq_queued(p))
1319 return false;
1320
1321 return true;
1322}
1323
76d92ac3 1324bool sched_can_stop_tick(struct rq *rq)
ce831b38 1325{
76d92ac3
FW
1326 int fifo_nr_running;
1327
1328 /* Deadline tasks, even if single, need the tick */
1329 if (rq->dl.dl_nr_running)
1330 return false;
1331
1e78cdbd 1332 /*
b19a888c 1333 * If there are more than one RR tasks, we need the tick to affect the
2548d546 1334 * actual RR behaviour.
1e78cdbd 1335 */
76d92ac3
FW
1336 if (rq->rt.rr_nr_running) {
1337 if (rq->rt.rr_nr_running == 1)
1338 return true;
1339 else
1340 return false;
1e78cdbd
RR
1341 }
1342
2548d546
PZ
1343 /*
1344 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1345 * forced preemption between FIFO tasks.
1346 */
1347 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1348 if (fifo_nr_running)
1349 return true;
1350
1351 /*
22a92020
TH
1352 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1353 * left. For CFS, if there's more than one we need the tick for
1354 * involuntary preemption. For SCX, ask.
2548d546 1355 */
11cc374f 1356 if (scx_enabled() && !scx_can_stop_tick(rq))
22a92020
TH
1357 return false;
1358
7b8a702d 1359 if (rq->cfs.h_nr_queued > 1)
541b8264 1360 return false;
ce831b38 1361
88c56cfe
PA
1362 /*
1363 * If there is one task and it has CFS runtime bandwidth constraints
1364 * and it's on the cpu now we don't want to stop the tick.
1365 * This check prevents clearing the bit if a newly enqueued task here is
1366 * dequeued by migrating while the constrained task continues to run.
1367 * E.g. going from 2->1 without going through pick_next_task().
1368 */
a58501fb 1369 if (__need_bw_check(rq, rq->curr)) {
88c56cfe
PA
1370 if (cfs_task_bw_constrained(rq->curr))
1371 return false;
1372 }
1373
541b8264 1374 return true;
ce831b38
FW
1375}
1376#endif /* CONFIG_NO_HZ_FULL */
6d6bc0ad 1377#endif /* CONFIG_SMP */
18d95a28 1378
a790de99
PT
1379#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1380 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 1381/*
8277434e
PT
1382 * Iterate task_group tree rooted at *from, calling @down when first entering a
1383 * node and @up when leaving it for the final time.
1384 *
1385 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 1386 */
029632fb 1387int walk_tg_tree_from(struct task_group *from,
8277434e 1388 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
1389{
1390 struct task_group *parent, *child;
eb755805 1391 int ret;
c09595f6 1392
8277434e
PT
1393 parent = from;
1394
c09595f6 1395down:
eb755805
PZ
1396 ret = (*down)(parent, data);
1397 if (ret)
8277434e 1398 goto out;
c09595f6
PZ
1399 list_for_each_entry_rcu(child, &parent->children, siblings) {
1400 parent = child;
1401 goto down;
1402
1403up:
1404 continue;
1405 }
eb755805 1406 ret = (*up)(parent, data);
8277434e
PT
1407 if (ret || parent == from)
1408 goto out;
c09595f6
PZ
1409
1410 child = parent;
1411 parent = parent->parent;
1412 if (parent)
1413 goto up;
8277434e 1414out:
eb755805 1415 return ret;
c09595f6
PZ
1416}
1417
029632fb 1418int tg_nop(struct task_group *tg, void *data)
eb755805 1419{
e2b245f8 1420 return 0;
eb755805 1421}
18d95a28
PZ
1422#endif
1423
04746ed8 1424void set_load_weight(struct task_struct *p, bool update_load)
45bf76df 1425{
f05998d4 1426 int prio = p->static_prio - MAX_RT_PRIO;
d3296052 1427 struct load_weight lw;
f05998d4 1428
1da1843f 1429 if (task_has_idle_policy(p)) {
d3296052
TH
1430 lw.weight = scale_load(WEIGHT_IDLEPRIO);
1431 lw.inv_weight = WMULT_IDLEPRIO;
1432 } else {
1433 lw.weight = scale_load(sched_prio_to_weight[prio]);
1434 lw.inv_weight = sched_prio_to_wmult[prio];
dd41f596 1435 }
71f8bd46 1436
9059393e
VG
1437 /*
1438 * SCHED_OTHER tasks have to update their load when changing their
1439 * weight
1440 */
7b9f6c86
TH
1441 if (update_load && p->sched_class->reweight_task)
1442 p->sched_class->reweight_task(task_rq(p), p, &lw);
d3296052
TH
1443 else
1444 p->se.load = lw;
71f8bd46
IM
1445}
1446
69842cba 1447#ifdef CONFIG_UCLAMP_TASK
2480c093
PB
1448/*
1449 * Serializes updates of utilization clamp values
1450 *
1451 * The (slow-path) user-space triggers utilization clamp value updates which
1452 * can require updates on (fast-path) scheduler's data structures used to
1453 * support enqueue/dequeue operations.
1454 * While the per-CPU rq lock protects fast-path update operations, user-space
1455 * requests are serialized using a mutex to reduce the risk of conflicting
1456 * updates or API abuses.
1457 */
23f1178a 1458static __maybe_unused DEFINE_MUTEX(uclamp_mutex);
2480c093 1459
e8f14172 1460/* Max allowed minimum utilization */
494dcdf4 1461static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
e8f14172
PB
1462
1463/* Max allowed maximum utilization */
494dcdf4 1464static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
e8f14172 1465
13685c4a
QY
1466/*
1467 * By default RT tasks run at the maximum performance point/capacity of the
1468 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1469 * SCHED_CAPACITY_SCALE.
1470 *
1471 * This knob allows admins to change the default behavior when uclamp is being
1472 * used. In battery powered devices, particularly, running at the maximum
1473 * capacity and frequency will increase energy consumption and shorten the
1474 * battery life.
1475 *
1476 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1477 *
1478 * This knob will not override the system default sched_util_clamp_min defined
1479 * above.
1480 */
04746ed8 1481unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
13685c4a 1482
e8f14172
PB
1483/* All clamps are required to be less or equal than these values */
1484static struct uclamp_se uclamp_default[UCLAMP_CNT];
69842cba 1485
46609ce2
QY
1486/*
1487 * This static key is used to reduce the uclamp overhead in the fast path. It
1488 * primarily disables the call to uclamp_rq_{inc, dec}() in
1489 * enqueue/dequeue_task().
1490 *
1491 * This allows users to continue to enable uclamp in their kernel config with
1492 * minimum uclamp overhead in the fast path.
1493 *
1494 * As soon as userspace modifies any of the uclamp knobs, the static key is
1495 * enabled, since we have an actual users that make use of uclamp
1496 * functionality.
1497 *
1498 * The knobs that would enable this static key are:
1499 *
1500 * * A task modifying its uclamp value with sched_setattr().
1501 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1502 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1503 */
1504DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1505
e496187d 1506static inline unsigned int
0413d7f3 1507uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
e496187d
PB
1508 unsigned int clamp_value)
1509{
1510 /*
1511 * Avoid blocked utilization pushing up the frequency when we go
1512 * idle (which drops the max-clamp) by retaining the last known
1513 * max-clamp.
1514 */
1515 if (clamp_id == UCLAMP_MAX) {
1516 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1517 return clamp_value;
1518 }
1519
1520 return uclamp_none(UCLAMP_MIN);
1521}
1522
0413d7f3 1523static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
e496187d
PB
1524 unsigned int clamp_value)
1525{
1526 /* Reset max-clamp retention only on idle exit */
1527 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1528 return;
1529
24422603 1530 uclamp_rq_set(rq, clamp_id, clamp_value);
e496187d
PB
1531}
1532
69842cba 1533static inline
7763baac 1534unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
0413d7f3 1535 unsigned int clamp_value)
69842cba
PB
1536{
1537 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1538 int bucket_id = UCLAMP_BUCKETS - 1;
1539
1540 /*
1541 * Since both min and max clamps are max aggregated, find the
1542 * top most bucket with tasks in.
1543 */
1544 for ( ; bucket_id >= 0; bucket_id--) {
1545 if (!bucket[bucket_id].tasks)
1546 continue;
1547 return bucket[bucket_id].value;
1548 }
1549
1550 /* No tasks -- default clamp values */
e496187d 1551 return uclamp_idle_value(rq, clamp_id, clamp_value);
69842cba
PB
1552}
1553
13685c4a
QY
1554static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1555{
1556 unsigned int default_util_min;
1557 struct uclamp_se *uc_se;
1558
1559 lockdep_assert_held(&p->pi_lock);
1560
1561 uc_se = &p->uclamp_req[UCLAMP_MIN];
1562
1563 /* Only sync if user didn't override the default */
1564 if (uc_se->user_defined)
1565 return;
1566
1567 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1568 uclamp_se_set(uc_se, default_util_min, false);
1569}
1570
1571static void uclamp_update_util_min_rt_default(struct task_struct *p)
1572{
13685c4a
QY
1573 if (!rt_task(p))
1574 return;
1575
1576 /* Protect updates to p->uclamp_* */
0e34600a 1577 guard(task_rq_lock)(p);
13685c4a 1578 __uclamp_update_util_min_rt_default(p);
13685c4a
QY
1579}
1580
3eac870a 1581static inline struct uclamp_se
0413d7f3 1582uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
3eac870a 1583{
0213b708 1584 /* Copy by value as we could modify it */
3eac870a
PB
1585 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1586#ifdef CONFIG_UCLAMP_TASK_GROUP
0213b708 1587 unsigned int tg_min, tg_max, value;
3eac870a
PB
1588
1589 /*
1590 * Tasks in autogroups or root task group will be
1591 * restricted by system defaults.
1592 */
1593 if (task_group_is_autogroup(task_group(p)))
1594 return uc_req;
1595 if (task_group(p) == &root_task_group)
1596 return uc_req;
1597
0213b708
QY
1598 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1599 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1600 value = uc_req.value;
1601 value = clamp(value, tg_min, tg_max);
1602 uclamp_se_set(&uc_req, value, false);
3eac870a
PB
1603#endif
1604
1605 return uc_req;
1606}
1607
e8f14172
PB
1608/*
1609 * The effective clamp bucket index of a task depends on, by increasing
1610 * priority:
1611 * - the task specific clamp value, when explicitly requested from userspace
3eac870a
PB
1612 * - the task group effective clamp value, for tasks not either in the root
1613 * group or in an autogroup
e8f14172
PB
1614 * - the system default clamp value, defined by the sysadmin
1615 */
1616static inline struct uclamp_se
0413d7f3 1617uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
e8f14172 1618{
3eac870a 1619 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
e8f14172
PB
1620 struct uclamp_se uc_max = uclamp_default[clamp_id];
1621
1622 /* System default restrictions always apply */
1623 if (unlikely(uc_req.value > uc_max.value))
1624 return uc_max;
1625
1626 return uc_req;
1627}
1628
686516b5 1629unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
9d20ad7d
PB
1630{
1631 struct uclamp_se uc_eff;
1632
1633 /* Task currently refcounted: use back-annotated (effective) value */
1634 if (p->uclamp[clamp_id].active)
686516b5 1635 return (unsigned long)p->uclamp[clamp_id].value;
9d20ad7d
PB
1636
1637 uc_eff = uclamp_eff_get(p, clamp_id);
1638
686516b5 1639 return (unsigned long)uc_eff.value;
9d20ad7d
PB
1640}
1641
69842cba
PB
1642/*
1643 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1644 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1645 * updates the rq's clamp value if required.
60daf9c1
PB
1646 *
1647 * Tasks can have a task-specific value requested from user-space, track
1648 * within each bucket the maximum value for tasks refcounted in it.
1649 * This "local max aggregation" allows to track the exact "requested" value
1650 * for each bucket when all its RUNNABLE tasks require the same clamp.
69842cba
PB
1651 */
1652static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
0413d7f3 1653 enum uclamp_id clamp_id)
69842cba
PB
1654{
1655 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1656 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1657 struct uclamp_bucket *bucket;
1658
5cb9eaa3 1659 lockdep_assert_rq_held(rq);
69842cba 1660
e8f14172
PB
1661 /* Update task effective clamp */
1662 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1663
69842cba
PB
1664 bucket = &uc_rq->bucket[uc_se->bucket_id];
1665 bucket->tasks++;
e8f14172 1666 uc_se->active = true;
69842cba 1667
e496187d
PB
1668 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1669
60daf9c1
PB
1670 /*
1671 * Local max aggregation: rq buckets always track the max
1672 * "requested" clamp value of its RUNNABLE tasks.
1673 */
1674 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1675 bucket->value = uc_se->value;
1676
24422603
QY
1677 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1678 uclamp_rq_set(rq, clamp_id, uc_se->value);
69842cba
PB
1679}
1680
1681/*
1682 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1683 * is released. If this is the last task reference counting the rq's max
1684 * active clamp value, then the rq's clamp value is updated.
1685 *
1686 * Both refcounted tasks and rq's cached clamp values are expected to be
1687 * always valid. If it's detected they are not, as defensive programming,
1688 * enforce the expected state and warn.
1689 */
1690static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
0413d7f3 1691 enum uclamp_id clamp_id)
69842cba
PB
1692{
1693 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1694 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1695 struct uclamp_bucket *bucket;
e496187d 1696 unsigned int bkt_clamp;
69842cba
PB
1697 unsigned int rq_clamp;
1698
5cb9eaa3 1699 lockdep_assert_rq_held(rq);
69842cba 1700
46609ce2
QY
1701 /*
1702 * If sched_uclamp_used was enabled after task @p was enqueued,
1703 * we could end up with unbalanced call to uclamp_rq_dec_id().
1704 *
1705 * In this case the uc_se->active flag should be false since no uclamp
1706 * accounting was performed at enqueue time and we can just return
1707 * here.
1708 *
b19a888c 1709 * Need to be careful of the following enqueue/dequeue ordering
46609ce2
QY
1710 * problem too
1711 *
1712 * enqueue(taskA)
1713 * // sched_uclamp_used gets enabled
1714 * enqueue(taskB)
1715 * dequeue(taskA)
b19a888c 1716 * // Must not decrement bucket->tasks here
46609ce2
QY
1717 * dequeue(taskB)
1718 *
1719 * where we could end up with stale data in uc_se and
1720 * bucket[uc_se->bucket_id].
1721 *
1722 * The following check here eliminates the possibility of such race.
1723 */
1724 if (unlikely(!uc_se->active))
1725 return;
1726
69842cba 1727 bucket = &uc_rq->bucket[uc_se->bucket_id];
46609ce2 1728
f7d2728c 1729 WARN_ON_ONCE(!bucket->tasks);
69842cba
PB
1730 if (likely(bucket->tasks))
1731 bucket->tasks--;
46609ce2 1732
e8f14172 1733 uc_se->active = false;
69842cba 1734
60daf9c1
PB
1735 /*
1736 * Keep "local max aggregation" simple and accept to (possibly)
1737 * overboost some RUNNABLE tasks in the same bucket.
1738 * The rq clamp bucket value is reset to its base value whenever
1739 * there are no more RUNNABLE tasks refcounting it.
1740 */
69842cba
PB
1741 if (likely(bucket->tasks))
1742 return;
1743
24422603 1744 rq_clamp = uclamp_rq_get(rq, clamp_id);
69842cba
PB
1745 /*
1746 * Defensive programming: this should never happen. If it happens,
402de7fc 1747 * e.g. due to future modification, warn and fix up the expected value.
69842cba 1748 */
f7d2728c 1749 WARN_ON_ONCE(bucket->value > rq_clamp);
e496187d
PB
1750 if (bucket->value >= rq_clamp) {
1751 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
24422603 1752 uclamp_rq_set(rq, clamp_id, bkt_clamp);
e496187d 1753 }
69842cba
PB
1754}
1755
90ca9410 1756static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags)
69842cba 1757{
0413d7f3 1758 enum uclamp_id clamp_id;
69842cba 1759
46609ce2
QY
1760 /*
1761 * Avoid any overhead until uclamp is actually used by the userspace.
1762 *
1763 * The condition is constructed such that a NOP is generated when
1764 * sched_uclamp_used is disabled.
1765 */
5fca5a4c 1766 if (!uclamp_is_used())
46609ce2
QY
1767 return;
1768
69842cba
PB
1769 if (unlikely(!p->sched_class->uclamp_enabled))
1770 return;
1771
90ca9410
XY
1772 /* Only inc the delayed task which being woken up. */
1773 if (p->se.sched_delayed && !(flags & ENQUEUE_DELAYED))
dfa0a574
PZ
1774 return;
1775
69842cba
PB
1776 for_each_clamp_id(clamp_id)
1777 uclamp_rq_inc_id(rq, p, clamp_id);
e496187d
PB
1778
1779 /* Reset clamp idle holding when there is one RUNNABLE task */
1780 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1781 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
69842cba
PB
1782}
1783
1784static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1785{
0413d7f3 1786 enum uclamp_id clamp_id;
69842cba 1787
46609ce2
QY
1788 /*
1789 * Avoid any overhead until uclamp is actually used by the userspace.
1790 *
1791 * The condition is constructed such that a NOP is generated when
1792 * sched_uclamp_used is disabled.
1793 */
5fca5a4c 1794 if (!uclamp_is_used())
46609ce2
QY
1795 return;
1796
69842cba
PB
1797 if (unlikely(!p->sched_class->uclamp_enabled))
1798 return;
1799
dfa0a574
PZ
1800 if (p->se.sched_delayed)
1801 return;
1802
69842cba
PB
1803 for_each_clamp_id(clamp_id)
1804 uclamp_rq_dec_id(rq, p, clamp_id);
1805}
1806
ca4984a7
QP
1807static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1808 enum uclamp_id clamp_id)
1809{
1810 if (!p->uclamp[clamp_id].active)
1811 return;
1812
1813 uclamp_rq_dec_id(rq, p, clamp_id);
1814 uclamp_rq_inc_id(rq, p, clamp_id);
1815
1816 /*
1817 * Make sure to clear the idle flag if we've transiently reached 0
1818 * active tasks on rq.
1819 */
1820 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1821 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1822}
1823
babbe170 1824static inline void
0213b708 1825uclamp_update_active(struct task_struct *p)
babbe170 1826{
0213b708 1827 enum uclamp_id clamp_id;
babbe170
PB
1828 struct rq_flags rf;
1829 struct rq *rq;
1830
1831 /*
1832 * Lock the task and the rq where the task is (or was) queued.
1833 *
1834 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1835 * price to pay to safely serialize util_{min,max} updates with
1836 * enqueues, dequeues and migration operations.
1837 * This is the same locking schema used by __set_cpus_allowed_ptr().
1838 */
1839 rq = task_rq_lock(p, &rf);
1840
1841 /*
1842 * Setting the clamp bucket is serialized by task_rq_lock().
1843 * If the task is not yet RUNNABLE and its task_struct is not
1844 * affecting a valid clamp bucket, the next time it's enqueued,
1845 * it will already see the updated clamp bucket value.
1846 */
ca4984a7
QP
1847 for_each_clamp_id(clamp_id)
1848 uclamp_rq_reinc_id(rq, p, clamp_id);
babbe170
PB
1849
1850 task_rq_unlock(rq, p, &rf);
1851}
1852
e3b8b6a0 1853#ifdef CONFIG_UCLAMP_TASK_GROUP
babbe170 1854static inline void
0213b708 1855uclamp_update_active_tasks(struct cgroup_subsys_state *css)
babbe170
PB
1856{
1857 struct css_task_iter it;
1858 struct task_struct *p;
babbe170
PB
1859
1860 css_task_iter_start(css, 0, &it);
0213b708
QY
1861 while ((p = css_task_iter_next(&it)))
1862 uclamp_update_active(p);
babbe170
PB
1863 css_task_iter_end(&it);
1864}
1865
7274a5c1 1866static void cpu_util_update_eff(struct cgroup_subsys_state *css);
494dcdf4
Y
1867#endif
1868
1869#ifdef CONFIG_SYSCTL
494dcdf4 1870#ifdef CONFIG_UCLAMP_TASK_GROUP
7274a5c1
PB
1871static void uclamp_update_root_tg(void)
1872{
1873 struct task_group *tg = &root_task_group;
1874
1875 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1876 sysctl_sched_uclamp_util_min, false);
1877 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1878 sysctl_sched_uclamp_util_max, false);
1879
0e34600a 1880 guard(rcu)();
7274a5c1 1881 cpu_util_update_eff(&root_task_group.css);
7274a5c1
PB
1882}
1883#else
1884static void uclamp_update_root_tg(void) { }
1885#endif
1886
494dcdf4
Y
1887static void uclamp_sync_util_min_rt_default(void)
1888{
1889 struct task_struct *g, *p;
1890
1891 /*
1892 * copy_process() sysctl_uclamp
1893 * uclamp_min_rt = X;
1894 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1895 * // link thread smp_mb__after_spinlock()
1896 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1897 * sched_post_fork() for_each_process_thread()
1898 * __uclamp_sync_rt() __uclamp_sync_rt()
1899 *
1900 * Ensures that either sched_post_fork() will observe the new
1901 * uclamp_min_rt or for_each_process_thread() will observe the new
1902 * task.
1903 */
1904 read_lock(&tasklist_lock);
1905 smp_mb__after_spinlock();
1906 read_unlock(&tasklist_lock);
1907
0e34600a 1908 guard(rcu)();
494dcdf4
Y
1909 for_each_process_thread(g, p)
1910 uclamp_update_util_min_rt_default(p);
494dcdf4
Y
1911}
1912
78eb4ea2 1913static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
32927393 1914 void *buffer, size_t *lenp, loff_t *ppos)
e8f14172 1915{
7274a5c1 1916 bool update_root_tg = false;
13685c4a 1917 int old_min, old_max, old_min_rt;
e8f14172
PB
1918 int result;
1919
0f92cdf3
PZ
1920 guard(mutex)(&uclamp_mutex);
1921
e8f14172
PB
1922 old_min = sysctl_sched_uclamp_util_min;
1923 old_max = sysctl_sched_uclamp_util_max;
13685c4a 1924 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
e8f14172
PB
1925
1926 result = proc_dointvec(table, write, buffer, lenp, ppos);
1927 if (result)
1928 goto undo;
1929 if (!write)
0f92cdf3 1930 return 0;
e8f14172
PB
1931
1932 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
13685c4a
QY
1933 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1934 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1935
e8f14172
PB
1936 result = -EINVAL;
1937 goto undo;
1938 }
1939
1940 if (old_min != sysctl_sched_uclamp_util_min) {
1941 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
a509a7cd 1942 sysctl_sched_uclamp_util_min, false);
7274a5c1 1943 update_root_tg = true;
e8f14172
PB
1944 }
1945 if (old_max != sysctl_sched_uclamp_util_max) {
1946 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
a509a7cd 1947 sysctl_sched_uclamp_util_max, false);
7274a5c1 1948 update_root_tg = true;
e8f14172
PB
1949 }
1950
46609ce2 1951 if (update_root_tg) {
4bc45824 1952 sched_uclamp_enable();
7274a5c1 1953 uclamp_update_root_tg();
46609ce2 1954 }
7274a5c1 1955
13685c4a 1956 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
4bc45824 1957 sched_uclamp_enable();
13685c4a
QY
1958 uclamp_sync_util_min_rt_default();
1959 }
7274a5c1 1960
e8f14172 1961 /*
7274a5c1
PB
1962 * We update all RUNNABLE tasks only when task groups are in use.
1963 * Otherwise, keep it simple and do just a lazy update at each next
1964 * task enqueue time.
e8f14172 1965 */
0f92cdf3 1966 return 0;
e8f14172
PB
1967
1968undo:
1969 sysctl_sched_uclamp_util_min = old_min;
1970 sysctl_sched_uclamp_util_max = old_max;
13685c4a 1971 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
e8f14172
PB
1972 return result;
1973}
494dcdf4 1974#endif
e8f14172
PB
1975
1976static void uclamp_fork(struct task_struct *p)
1977{
0413d7f3 1978 enum uclamp_id clamp_id;
e8f14172 1979
13685c4a
QY
1980 /*
1981 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1982 * as the task is still at its early fork stages.
1983 */
e8f14172
PB
1984 for_each_clamp_id(clamp_id)
1985 p->uclamp[clamp_id].active = false;
a87498ac
PB
1986
1987 if (likely(!p->sched_reset_on_fork))
1988 return;
1989
1990 for_each_clamp_id(clamp_id) {
eaf5a92e
QP
1991 uclamp_se_set(&p->uclamp_req[clamp_id],
1992 uclamp_none(clamp_id), false);
a87498ac 1993 }
e8f14172
PB
1994}
1995
13685c4a
QY
1996static void uclamp_post_fork(struct task_struct *p)
1997{
1998 uclamp_update_util_min_rt_default(p);
1999}
2000
d81ae8aa
QY
2001static void __init init_uclamp_rq(struct rq *rq)
2002{
2003 enum uclamp_id clamp_id;
2004 struct uclamp_rq *uc_rq = rq->uclamp;
2005
2006 for_each_clamp_id(clamp_id) {
2007 uc_rq[clamp_id] = (struct uclamp_rq) {
2008 .value = uclamp_none(clamp_id)
2009 };
2010 }
2011
315c4f88 2012 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
d81ae8aa
QY
2013}
2014
69842cba
PB
2015static void __init init_uclamp(void)
2016{
e8f14172 2017 struct uclamp_se uc_max = {};
0413d7f3 2018 enum uclamp_id clamp_id;
69842cba
PB
2019 int cpu;
2020
d81ae8aa
QY
2021 for_each_possible_cpu(cpu)
2022 init_uclamp_rq(cpu_rq(cpu));
69842cba 2023
69842cba 2024 for_each_clamp_id(clamp_id) {
e8f14172 2025 uclamp_se_set(&init_task.uclamp_req[clamp_id],
a509a7cd 2026 uclamp_none(clamp_id), false);
69842cba 2027 }
e8f14172
PB
2028
2029 /* System defaults allow max clamp values for both indexes */
a509a7cd 2030 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2480c093 2031 for_each_clamp_id(clamp_id) {
e8f14172 2032 uclamp_default[clamp_id] = uc_max;
2480c093
PB
2033#ifdef CONFIG_UCLAMP_TASK_GROUP
2034 root_task_group.uclamp_req[clamp_id] = uc_max;
0b60ba2d 2035 root_task_group.uclamp[clamp_id] = uc_max;
2480c093
PB
2036#endif
2037 }
69842cba
PB
2038}
2039
8cec3dd9 2040#else /* !CONFIG_UCLAMP_TASK */
90ca9410 2041static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) { }
69842cba 2042static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
e8f14172 2043static inline void uclamp_fork(struct task_struct *p) { }
13685c4a 2044static inline void uclamp_post_fork(struct task_struct *p) { }
69842cba
PB
2045static inline void init_uclamp(void) { }
2046#endif /* CONFIG_UCLAMP_TASK */
2047
a1dfb631
MT
2048bool sched_task_on_rq(struct task_struct *p)
2049{
2050 return task_on_rq_queued(p);
2051}
2052
42a20f86
KC
2053unsigned long get_wchan(struct task_struct *p)
2054{
2055 unsigned long ip = 0;
2056 unsigned int state;
2057
2058 if (!p || p == current)
2059 return 0;
2060
2061 /* Only get wchan if task is blocked and we can keep it that way. */
2062 raw_spin_lock_irq(&p->pi_lock);
2063 state = READ_ONCE(p->__state);
2064 smp_rmb(); /* see try_to_wake_up() */
2065 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2066 ip = __get_wchan(p);
2067 raw_spin_unlock_irq(&p->pi_lock);
2068
2069 return ip;
2070}
2071
04746ed8 2072void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 2073{
0a67d1ee
PZ
2074 if (!(flags & ENQUEUE_NOCLOCK))
2075 update_rq_clock(rq);
2076
dfa0a574 2077 /*
90ca9410
XY
2078 * Can be before ->enqueue_task() because uclamp considers the
2079 * ENQUEUE_DELAYED task before its ->sched_delayed gets cleared
2080 * in ->enqueue_task().
dfa0a574 2081 */
90ca9410
XY
2082 uclamp_rq_inc(rq, p, flags);
2083
2084 p->sched_class->enqueue_task(rq, p, flags);
8a311c74 2085
1a615101
JW
2086 psi_enqueue(p, flags);
2087
2088 if (!(flags & ENQUEUE_RESTORE))
c6508124 2089 sched_info_enqueue(rq, p);
c6508124 2090
8a311c74
PZ
2091 if (sched_core_enabled(rq))
2092 sched_core_enqueue(rq, p);
71f8bd46
IM
2093}
2094
863ccdbb
PZ
2095/*
2096 * Must only return false when DEQUEUE_SLEEP.
2097 */
2098inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 2099{
8a311c74 2100 if (sched_core_enabled(rq))
4feee7d1 2101 sched_core_dequeue(rq, p, flags);
8a311c74 2102
0a67d1ee
PZ
2103 if (!(flags & DEQUEUE_NOCLOCK))
2104 update_rq_clock(rq);
2105
1a615101 2106 if (!(flags & DEQUEUE_SAVE))
4e29fb70 2107 sched_info_dequeue(rq, p);
1a615101
JW
2108
2109 psi_dequeue(p, flags);
0a67d1ee 2110
dfa0a574
PZ
2111 /*
2112 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2113 * and mark the task ->sched_delayed.
2114 */
69842cba 2115 uclamp_rq_dec(rq, p);
863ccdbb 2116 return p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
2117}
2118
029632fb 2119void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd 2120{
a53ce18c
VG
2121 if (task_on_rq_migrating(p))
2122 flags |= ENQUEUE_MIGRATED;
223baf9d
MD
2123 if (flags & ENQUEUE_MIGRATED)
2124 sched_mm_cid_migrate_to(rq, p);
a53ce18c 2125
371fd7e7 2126 enqueue_task(rq, p, flags);
7dd77884 2127
d6111cf4
PM
2128 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2129 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
1e3c88bd
PZ
2130}
2131
029632fb 2132void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd 2133{
f7d2728c 2134 WARN_ON_ONCE(flags & DEQUEUE_SLEEP);
abc158c8 2135
e8901061 2136 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
d6111cf4 2137 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
7dd77884 2138
e8901061
PZ
2139 /*
2140 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2141 * dequeue_task() and cleared *after* enqueue_task().
2142 */
2143
371fd7e7 2144 dequeue_task(rq, p, flags);
1e3c88bd
PZ
2145}
2146
e8901061
PZ
2147static void block_task(struct rq *rq, struct task_struct *p, int flags)
2148{
2149 if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2150 __block_task(rq, p);
2151}
2152
1da177e4
LT
2153/**
2154 * task_curr - is this task currently executing on a CPU?
2155 * @p: the task in question.
e69f6186
YB
2156 *
2157 * Return: 1 if the task is currently executing. 0 otherwise.
1da177e4 2158 */
36c8b586 2159inline int task_curr(const struct task_struct *p)
1da177e4
LT
2160{
2161 return cpu_curr(task_cpu(p)) == p;
2162}
2163
d8c7bc2e
TH
2164/*
2165 * ->switching_to() is called with the pi_lock and rq_lock held and must not
2166 * mess with locking.
2167 */
2168void check_class_changing(struct rq *rq, struct task_struct *p,
2169 const struct sched_class *prev_class)
2170{
2171 if (prev_class != p->sched_class && p->sched_class->switching_to)
2172 p->sched_class->switching_to(rq, p);
2173}
2174
67dfa1b7 2175/*
4c9a4bc8
PZ
2176 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2177 * use the balance_callback list if you want balancing.
2178 *
2179 * this means any call to check_class_changed() must be followed by a call to
2180 * balance_callback().
67dfa1b7 2181 */
04746ed8
IM
2182void check_class_changed(struct rq *rq, struct task_struct *p,
2183 const struct sched_class *prev_class,
2184 int oldprio)
cb469845
SR
2185{
2186 if (prev_class != p->sched_class) {
2187 if (prev_class->switched_from)
da7a735e 2188 prev_class->switched_from(rq, p);
4c9a4bc8 2189
da7a735e 2190 p->sched_class->switched_to(rq, p);
2d3d891d 2191 } else if (oldprio != p->prio || dl_task(p))
da7a735e 2192 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
2193}
2194
e23edc86 2195void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
1e5a7405 2196{
af0c8b2b
PZ
2197 struct task_struct *donor = rq->donor;
2198
2199 if (p->sched_class == donor->sched_class)
2200 donor->sched_class->wakeup_preempt(rq, p, flags);
2201 else if (sched_class_above(p->sched_class, donor->sched_class))
aa93cd53 2202 resched_curr(rq);
1e5a7405
PZ
2203
2204 /*
2205 * A queue event has occurred, and we're going to schedule. In
2206 * this case, we can save a useless back to back clock update.
2207 */
af0c8b2b 2208 if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
adcc8da8 2209 rq_clock_skip_update(rq);
1e5a7405
PZ
2210}
2211
1c069187
PZ
2212static __always_inline
2213int __task_state_match(struct task_struct *p, unsigned int state)
2214{
2215 if (READ_ONCE(p->__state) & state)
2216 return 1;
2217
1c069187
PZ
2218 if (READ_ONCE(p->saved_state) & state)
2219 return -1;
fbaa6a18 2220
1c069187
PZ
2221 return 0;
2222}
2223
2224static __always_inline
2225int task_state_match(struct task_struct *p, unsigned int state)
2226{
1c069187 2227 /*
8f0eed4a
EB
2228 * Serialize against current_save_and_set_rtlock_wait_state(),
2229 * current_restore_rtlock_saved_state(), and __refrigerator().
1c069187 2230 */
0e34600a 2231 guard(raw_spinlock_irq)(&p->pi_lock);
1c069187 2232 return __task_state_match(p, state);
1c069187
PZ
2233}
2234
d5e15866
PZ
2235/*
2236 * wait_task_inactive - wait for a thread to unschedule.
2237 *
2238 * Wait for the thread to block in any of the states set in @match_state.
2239 * If it changes, i.e. @p might have woken up, then return zero. When we
2240 * succeed in waiting for @p to be off its CPU, we return a positive number
2241 * (its total switch count). If a second call a short while later returns the
2242 * same number, the caller can be sure that @p has remained unscheduled the
2243 * whole time.
2244 *
2245 * The caller must ensure that the task *will* unschedule sometime soon,
2246 * else this function might spin for a *long* time. This function can't
2247 * be called with interrupts off, or it may introduce deadlock with
2248 * smp_call_function() if an IPI is sent by the same process we are
2249 * waiting to become inactive.
2250 */
2251unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2252{
1c069187 2253 int running, queued, match;
d5e15866
PZ
2254 struct rq_flags rf;
2255 unsigned long ncsw;
2256 struct rq *rq;
2257
2258 for (;;) {
2259 /*
2260 * We do the initial early heuristics without holding
2261 * any task-queue locks at all. We'll only try to get
2262 * the runqueue lock when things look like they will
2263 * work out!
2264 */
2265 rq = task_rq(p);
2266
2267 /*
2268 * If the task is actively running on another CPU
2269 * still, just relax and busy-wait without holding
2270 * any locks.
2271 *
2272 * NOTE! Since we don't hold any locks, it's not
2273 * even sure that "rq" stays as the right runqueue!
2274 * But we don't care, since "task_on_cpu()" will
2275 * return false if the runqueue has changed and p
2276 * is actually now running somewhere else!
2277 */
2278 while (task_on_cpu(rq, p)) {
1c069187 2279 if (!task_state_match(p, match_state))
d5e15866
PZ
2280 return 0;
2281 cpu_relax();
2282 }
2283
2284 /*
2285 * Ok, time to look more closely! We need the rq
2286 * lock now, to be *sure*. If we're wrong, we'll
2287 * just go back and repeat.
2288 */
2289 rq = task_rq_lock(p, &rf);
b7ca5743
JS
2290 /*
2291 * If task is sched_delayed, force dequeue it, to avoid always
2292 * hitting the tick timeout in the queued case
2293 */
2294 if (p->se.sched_delayed)
2295 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
d5e15866
PZ
2296 trace_sched_wait_task(p);
2297 running = task_on_cpu(rq, p);
2298 queued = task_on_rq_queued(p);
2299 ncsw = 0;
1c069187
PZ
2300 if ((match = __task_state_match(p, match_state))) {
2301 /*
2302 * When matching on p->saved_state, consider this task
2303 * still queued so it will wait.
2304 */
2305 if (match < 0)
2306 queued = 1;
d5e15866 2307 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1c069187 2308 }
d5e15866
PZ
2309 task_rq_unlock(rq, p, &rf);
2310
2311 /*
2312 * If it changed from the expected state, bail out now.
2313 */
2314 if (unlikely(!ncsw))
2315 break;
2316
2317 /*
2318 * Was it really running after all now that we
2319 * checked with the proper locks actually held?
2320 *
2321 * Oops. Go back and try again..
2322 */
2323 if (unlikely(running)) {
2324 cpu_relax();
2325 continue;
2326 }
2327
2328 /*
2329 * It's not enough that it's not actively running,
2330 * it must be off the runqueue _entirely_, and not
2331 * preempted!
2332 *
2333 * So if it was still runnable (but just not actively
2334 * running right now), it's preempted, and we should
2335 * yield - it could be a while.
2336 */
2337 if (unlikely(queued)) {
2338 ktime_t to = NSEC_PER_SEC / HZ;
2339
2340 set_current_state(TASK_UNINTERRUPTIBLE);
2341 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2342 continue;
2343 }
2344
2345 /*
2346 * Ahh, all good. It wasn't running, and it wasn't
2347 * runnable, which means that it will never become
2348 * running in the future either. We're all done!
2349 */
2350 break;
2351 }
2352
2353 return ncsw;
2354}
2355
1da177e4 2356#ifdef CONFIG_SMP
175f0e25 2357
af449901 2358static void
713a2e21 2359__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
af449901 2360
af449901
PZ
2361static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2362{
713a2e21
WL
2363 struct affinity_context ac = {
2364 .new_mask = cpumask_of(rq->cpu),
2365 .flags = SCA_MIGRATE_DISABLE,
2366 };
2367
af449901
PZ
2368 if (likely(!p->migration_disabled))
2369 return;
2370
2371 if (p->cpus_ptr != &p->cpus_mask)
2372 return;
2373
2374 /*
402de7fc 2375 * Violates locking rules! See comment in __do_set_cpus_allowed().
af449901 2376 */
713a2e21 2377 __do_set_cpus_allowed(p, &ac);
af449901
PZ
2378}
2379
2380void migrate_disable(void)
2381{
3015ef4b
TG
2382 struct task_struct *p = current;
2383
2384 if (p->migration_disabled) {
0ec8d5ae
PH
2385#ifdef CONFIG_DEBUG_PREEMPT
2386 /*
2387 *Warn about overflow half-way through the range.
2388 */
2389 WARN_ON_ONCE((s16)p->migration_disabled < 0);
2390#endif
3015ef4b 2391 p->migration_disabled++;
af449901 2392 return;
3015ef4b 2393 }
af449901 2394
0e34600a 2395 guard(preempt)();
3015ef4b
TG
2396 this_rq()->nr_pinned++;
2397 p->migration_disabled = 1;
af449901
PZ
2398}
2399EXPORT_SYMBOL_GPL(migrate_disable);
2400
2401void migrate_enable(void)
2402{
2403 struct task_struct *p = current;
713a2e21
WL
2404 struct affinity_context ac = {
2405 .new_mask = &p->cpus_mask,
2406 .flags = SCA_MIGRATE_ENABLE,
2407 };
af449901 2408
0ec8d5ae
PH
2409#ifdef CONFIG_DEBUG_PREEMPT
2410 /*
2411 * Check both overflow from migrate_disable() and superfluous
2412 * migrate_enable().
2413 */
2414 if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
2415 return;
2416#endif
2417
6d337eab
PZ
2418 if (p->migration_disabled > 1) {
2419 p->migration_disabled--;
af449901 2420 return;
6d337eab 2421 }
af449901 2422
6d337eab
PZ
2423 /*
2424 * Ensure stop_task runs either before or after this, and that
2425 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2426 */
0e34600a 2427 guard(preempt)();
6d337eab 2428 if (p->cpus_ptr != &p->cpus_mask)
713a2e21 2429 __set_cpus_allowed_ptr(p, &ac);
6d337eab
PZ
2430 /*
2431 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2432 * regular cpus_mask, otherwise things that race (eg.
2433 * select_fallback_rq) get confused.
2434 */
af449901 2435 barrier();
6d337eab 2436 p->migration_disabled = 0;
3015ef4b 2437 this_rq()->nr_pinned--;
af449901
PZ
2438}
2439EXPORT_SYMBOL_GPL(migrate_enable);
2440
3015ef4b
TG
2441static inline bool rq_has_pinned_tasks(struct rq *rq)
2442{
2443 return rq->nr_pinned;
2444}
2445
175f0e25 2446/*
bee98539 2447 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
175f0e25
PZ
2448 * __set_cpus_allowed_ptr() and select_fallback_rq().
2449 */
2450static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2451{
5ba2ffba 2452 /* When not in the task's cpumask, no point in looking further. */
2c390dda 2453 if (!task_allowed_on_cpu(p, cpu))
175f0e25
PZ
2454 return false;
2455
5ba2ffba
PZ
2456 /* migrate_disabled() must be allowed to finish. */
2457 if (is_migration_disabled(p))
175f0e25
PZ
2458 return cpu_online(cpu);
2459
5ba2ffba
PZ
2460 /* Non kernel threads are not allowed during either online or offline. */
2461 if (!(p->flags & PF_KTHREAD))
2c390dda 2462 return cpu_active(cpu);
5ba2ffba
PZ
2463
2464 /* KTHREAD_IS_PER_CPU is always allowed. */
2465 if (kthread_is_per_cpu(p))
2466 return cpu_online(cpu);
2467
2468 /* Regular kernel threads don't get to stay during offline. */
b5c44773 2469 if (cpu_dying(cpu))
5ba2ffba
PZ
2470 return false;
2471
2472 /* But are allowed during online. */
2473 return cpu_online(cpu);
175f0e25
PZ
2474}
2475
5cc389bc
PZ
2476/*
2477 * This is how migration works:
2478 *
2479 * 1) we invoke migration_cpu_stop() on the target CPU using
2480 * stop_one_cpu().
2481 * 2) stopper starts to run (implicitly forcing the migrated thread
2482 * off the CPU)
2483 * 3) it checks whether the migrated task is still in the wrong runqueue.
2484 * 4) if it's in the wrong runqueue then the migration thread removes
2485 * it and puts it into the right queue.
2486 * 5) stopper completes and stop_one_cpu() returns and the migration
2487 * is done.
2488 */
2489
2490/*
2491 * move_queued_task - move a queued task to new rq.
2492 *
2493 * Returns (locked) new rq. Old rq's lock is released.
2494 */
8a8c69c3
PZ
2495static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2496 struct task_struct *p, int new_cpu)
5cc389bc 2497{
5cb9eaa3 2498 lockdep_assert_rq_held(rq);
5cc389bc 2499
58877d34 2500 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
5cc389bc 2501 set_task_cpu(p, new_cpu);
8a8c69c3 2502 rq_unlock(rq, rf);
5cc389bc
PZ
2503
2504 rq = cpu_rq(new_cpu);
2505
8a8c69c3 2506 rq_lock(rq, rf);
09348d75 2507 WARN_ON_ONCE(task_cpu(p) != new_cpu);
58877d34 2508 activate_task(rq, p, 0);
e23edc86 2509 wakeup_preempt(rq, p, 0);
5cc389bc
PZ
2510
2511 return rq;
2512}
2513
2514struct migration_arg {
6d337eab
PZ
2515 struct task_struct *task;
2516 int dest_cpu;
2517 struct set_affinity_pending *pending;
2518};
2519
50caf9c1
PZ
2520/*
2521 * @refs: number of wait_for_completion()
2522 * @stop_pending: is @stop_work in use
2523 */
6d337eab
PZ
2524struct set_affinity_pending {
2525 refcount_t refs;
9e81889c 2526 unsigned int stop_pending;
6d337eab
PZ
2527 struct completion done;
2528 struct cpu_stop_work stop_work;
2529 struct migration_arg arg;
5cc389bc
PZ
2530};
2531
2532/*
d1ccc66d 2533 * Move (not current) task off this CPU, onto the destination CPU. We're doing
5cc389bc
PZ
2534 * this because either it can't run here any more (set_cpus_allowed()
2535 * away from this CPU, or CPU going down), or because we're
2536 * attempting to rebalance this task on exec (sched_exec).
2537 *
2538 * So we race with normal scheduler movements, but that's OK, as long
2539 * as the task is no longer on this CPU.
5cc389bc 2540 */
8a8c69c3
PZ
2541static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2542 struct task_struct *p, int dest_cpu)
5cc389bc 2543{
5cc389bc 2544 /* Affinity changed (again). */
175f0e25 2545 if (!is_cpu_allowed(p, dest_cpu))
5e16bbc2 2546 return rq;
5cc389bc 2547
8a8c69c3 2548 rq = move_queued_task(rq, rf, p, dest_cpu);
5e16bbc2
PZ
2549
2550 return rq;
5cc389bc
PZ
2551}
2552
2553/*
402de7fc 2554 * migration_cpu_stop - this will be executed by a high-prio stopper thread
5cc389bc
PZ
2555 * and performs thread migration by bumping thread off CPU then
2556 * 'pushing' onto another runqueue.
2557 */
2558static int migration_cpu_stop(void *data)
2559{
2560 struct migration_arg *arg = data;
c20cf065 2561 struct set_affinity_pending *pending = arg->pending;
5e16bbc2
PZ
2562 struct task_struct *p = arg->task;
2563 struct rq *rq = this_rq();
6d337eab 2564 bool complete = false;
8a8c69c3 2565 struct rq_flags rf;
5cc389bc
PZ
2566
2567 /*
d1ccc66d
IM
2568 * The original target CPU might have gone down and we might
2569 * be on another CPU but it doesn't matter.
5cc389bc 2570 */
6d337eab 2571 local_irq_save(rf.flags);
5cc389bc
PZ
2572 /*
2573 * We need to explicitly wake pending tasks before running
3bd37062 2574 * __migrate_task() such that we will not miss enforcing cpus_ptr
5cc389bc
PZ
2575 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2576 */
16bf5a5e 2577 flush_smp_call_function_queue();
5e16bbc2
PZ
2578
2579 raw_spin_lock(&p->pi_lock);
8a8c69c3 2580 rq_lock(rq, &rf);
6d337eab 2581
e140749c
VS
2582 /*
2583 * If we were passed a pending, then ->stop_pending was set, thus
2584 * p->migration_pending must have remained stable.
2585 */
2586 WARN_ON_ONCE(pending && pending != p->migration_pending);
2587
5e16bbc2
PZ
2588 /*
2589 * If task_rq(p) != rq, it cannot be migrated here, because we're
2590 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2591 * we're holding p->pi_lock.
2592 */
bf89a304 2593 if (task_rq(p) == rq) {
6d337eab
PZ
2594 if (is_migration_disabled(p))
2595 goto out;
2596
2597 if (pending) {
e140749c 2598 p->migration_pending = NULL;
6d337eab 2599 complete = true;
6d337eab 2600
3f1bc119
PZ
2601 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2602 goto out;
3f1bc119 2603 }
6d337eab 2604
96500560
HJ
2605 if (task_on_rq_queued(p)) {
2606 update_rq_clock(rq);
475ea6c6 2607 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
96500560 2608 } else {
475ea6c6 2609 p->wake_cpu = arg->dest_cpu;
96500560 2610 }
6d337eab 2611
3f1bc119
PZ
2612 /*
2613 * XXX __migrate_task() can fail, at which point we might end
2614 * up running on a dodgy CPU, AFAICT this can only happen
2615 * during CPU hotplug, at which point we'll get pushed out
2616 * anyway, so it's probably not a big deal.
2617 */
2618
c20cf065 2619 } else if (pending) {
6d337eab
PZ
2620 /*
2621 * This happens when we get migrated between migrate_enable()'s
2622 * preempt_enable() and scheduling the stopper task. At that
2623 * point we're a regular task again and not current anymore.
2624 *
2625 * A !PREEMPT kernel has a giant hole here, which makes it far
2626 * more likely.
2627 */
2628
d707faa6
VS
2629 /*
2630 * The task moved before the stopper got to run. We're holding
2631 * ->pi_lock, so the allowed mask is stable - if it got
2632 * somewhere allowed, we're done.
2633 */
c20cf065 2634 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
e140749c 2635 p->migration_pending = NULL;
d707faa6
VS
2636 complete = true;
2637 goto out;
2638 }
2639
6d337eab
PZ
2640 /*
2641 * When migrate_enable() hits a rq mis-match we can't reliably
2642 * determine is_migration_disabled() and so have to chase after
2643 * it.
2644 */
9e81889c 2645 WARN_ON_ONCE(!pending->stop_pending);
f0498d2a 2646 preempt_disable();
6d337eab
PZ
2647 task_rq_unlock(rq, p, &rf);
2648 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2649 &pending->arg, &pending->stop_work);
f0498d2a 2650 preempt_enable();
6d337eab 2651 return 0;
bf89a304 2652 }
6d337eab 2653out:
9e81889c
PZ
2654 if (pending)
2655 pending->stop_pending = false;
6d337eab
PZ
2656 task_rq_unlock(rq, p, &rf);
2657
2658 if (complete)
2659 complete_all(&pending->done);
2660
5cc389bc
PZ
2661 return 0;
2662}
2663
a7c81556
PZ
2664int push_cpu_stop(void *arg)
2665{
2666 struct rq *lowest_rq = NULL, *rq = this_rq();
2667 struct task_struct *p = arg;
2668
2669 raw_spin_lock_irq(&p->pi_lock);
5cb9eaa3 2670 raw_spin_rq_lock(rq);
a7c81556
PZ
2671
2672 if (task_rq(p) != rq)
2673 goto out_unlock;
2674
2675 if (is_migration_disabled(p)) {
2676 p->migration_flags |= MDF_PUSH;
2677 goto out_unlock;
2678 }
2679
2680 p->migration_flags &= ~MDF_PUSH;
2681
2682 if (p->sched_class->find_lock_rq)
2683 lowest_rq = p->sched_class->find_lock_rq(p, rq);
5e16bbc2 2684
a7c81556
PZ
2685 if (!lowest_rq)
2686 goto out_unlock;
2687
2688 // XXX validate p is still the highest prio task
2689 if (task_rq(p) == rq) {
2b05a0b4 2690 move_queued_task_locked(rq, lowest_rq, p);
a7c81556
PZ
2691 resched_curr(lowest_rq);
2692 }
2693
2694 double_unlock_balance(rq, lowest_rq);
2695
2696out_unlock:
2697 rq->push_busy = false;
5cb9eaa3 2698 raw_spin_rq_unlock(rq);
a7c81556
PZ
2699 raw_spin_unlock_irq(&p->pi_lock);
2700
2701 put_task_struct(p);
5cc389bc
PZ
2702 return 0;
2703}
2704
c5b28038
PZ
2705/*
2706 * sched_class::set_cpus_allowed must do the below, but is not required to
2707 * actually call this function.
2708 */
713a2e21 2709void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
5cc389bc 2710{
713a2e21
WL
2711 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2712 p->cpus_ptr = ctx->new_mask;
af449901
PZ
2713 return;
2714 }
2715
713a2e21
WL
2716 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2717 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
8f9ea86f
WL
2718
2719 /*
2720 * Swap in a new user_cpus_ptr if SCA_USER flag set
2721 */
2722 if (ctx->flags & SCA_USER)
2723 swap(p->user_cpus_ptr, ctx->user_mask);
5cc389bc
PZ
2724}
2725
9cfc3e18 2726static void
713a2e21 2727__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
c5b28038 2728{
6c37067e
PZ
2729 struct rq *rq = task_rq(p);
2730 bool queued, running;
2731
af449901
PZ
2732 /*
2733 * This here violates the locking rules for affinity, since we're only
2734 * supposed to change these variables while holding both rq->lock and
2735 * p->pi_lock.
2736 *
2737 * HOWEVER, it magically works, because ttwu() is the only code that
2738 * accesses these variables under p->pi_lock and only does so after
2739 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2740 * before finish_task().
2741 *
2742 * XXX do further audits, this smells like something putrid.
2743 */
713a2e21 2744 if (ctx->flags & SCA_MIGRATE_DISABLE)
f7d2728c 2745 WARN_ON_ONCE(!p->on_cpu);
af449901
PZ
2746 else
2747 lockdep_assert_held(&p->pi_lock);
6c37067e
PZ
2748
2749 queued = task_on_rq_queued(p);
af0c8b2b 2750 running = task_current_donor(rq, p);
6c37067e
PZ
2751
2752 if (queued) {
2753 /*
2754 * Because __kthread_bind() calls this on blocked tasks without
2755 * holding rq->lock.
2756 */
5cb9eaa3 2757 lockdep_assert_rq_held(rq);
7a57f32a 2758 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
6c37067e
PZ
2759 }
2760 if (running)
2761 put_prev_task(rq, p);
2762
713a2e21 2763 p->sched_class->set_cpus_allowed(p, ctx);
7e019dcc 2764 mm_set_cpus_allowed(p->mm, ctx->new_mask);
6c37067e 2765
6c37067e 2766 if (queued)
7134b3e9 2767 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
a399d233 2768 if (running)
03b7fad1 2769 set_next_task(rq, p);
c5b28038
PZ
2770}
2771
851a723e
WL
2772/*
2773 * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2774 * affinity (if any) should be destroyed too.
2775 */
9cfc3e18
PZ
2776void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2777{
713a2e21
WL
2778 struct affinity_context ac = {
2779 .new_mask = new_mask,
851a723e
WL
2780 .user_mask = NULL,
2781 .flags = SCA_USER, /* clear the user requested mask */
713a2e21 2782 };
9a5418bc
WL
2783 union cpumask_rcuhead {
2784 cpumask_t cpumask;
2785 struct rcu_head rcu;
2786 };
713a2e21
WL
2787
2788 __do_set_cpus_allowed(p, &ac);
9a5418bc
WL
2789
2790 /*
2791 * Because this is called with p->pi_lock held, it is not possible
2792 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2793 * kfree_rcu().
2794 */
2795 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2796}
2797
b90ca8ba
WD
2798int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2799 int node)
2800{
87ca4f9e 2801 cpumask_t *user_mask;
8f9ea86f
WL
2802 unsigned long flags;
2803
87ca4f9e
WL
2804 /*
2805 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2806 * may differ by now due to racing.
2807 */
2808 dst->user_cpus_ptr = NULL;
2809
2810 /*
2811 * This check is racy and losing the race is a valid situation.
2812 * It is not worth the extra overhead of taking the pi_lock on
2813 * every fork/clone.
2814 */
2815 if (data_race(!src->user_cpus_ptr))
b90ca8ba
WD
2816 return 0;
2817
9a5418bc 2818 user_mask = alloc_user_cpus_ptr(node);
87ca4f9e 2819 if (!user_mask)
b90ca8ba
WD
2820 return -ENOMEM;
2821
87ca4f9e
WL
2822 /*
2823 * Use pi_lock to protect content of user_cpus_ptr
2824 *
2825 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2826 * do_set_cpus_allowed().
2827 */
8f9ea86f 2828 raw_spin_lock_irqsave(&src->pi_lock, flags);
87ca4f9e
WL
2829 if (src->user_cpus_ptr) {
2830 swap(dst->user_cpus_ptr, user_mask);
2831 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2832 }
8f9ea86f 2833 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
87ca4f9e
WL
2834
2835 if (unlikely(user_mask))
2836 kfree(user_mask);
2837
b90ca8ba
WD
2838 return 0;
2839}
2840
07ec77a1
WD
2841static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2842{
2843 struct cpumask *user_mask = NULL;
2844
2845 swap(p->user_cpus_ptr, user_mask);
2846
2847 return user_mask;
2848}
2849
b90ca8ba
WD
2850void release_user_cpus_ptr(struct task_struct *p)
2851{
07ec77a1 2852 kfree(clear_user_cpus_ptr(p));
b90ca8ba
WD
2853}
2854
6d337eab 2855/*
c777d847
VS
2856 * This function is wildly self concurrent; here be dragons.
2857 *
2858 *
2859 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2860 * designated task is enqueued on an allowed CPU. If that task is currently
2861 * running, we have to kick it out using the CPU stopper.
2862 *
2863 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2864 * Consider:
2865 *
2866 * Initial conditions: P0->cpus_mask = [0, 1]
2867 *
2868 * P0@CPU0 P1
2869 *
2870 * migrate_disable();
2871 * <preempted>
2872 * set_cpus_allowed_ptr(P0, [1]);
2873 *
2874 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2875 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2876 * This means we need the following scheme:
2877 *
2878 * P0@CPU0 P1
2879 *
2880 * migrate_disable();
2881 * <preempted>
2882 * set_cpus_allowed_ptr(P0, [1]);
2883 * <blocks>
2884 * <resumes>
2885 * migrate_enable();
2886 * __set_cpus_allowed_ptr();
2887 * <wakes local stopper>
2888 * `--> <woken on migration completion>
2889 *
2890 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2891 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2892 * task p are serialized by p->pi_lock, which we can leverage: the one that
2893 * should come into effect at the end of the Migrate-Disable region is the last
2894 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2895 * but we still need to properly signal those waiting tasks at the appropriate
2896 * moment.
2897 *
2898 * This is implemented using struct set_affinity_pending. The first
2899 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2900 * setup an instance of that struct and install it on the targeted task_struct.
2901 * Any and all further callers will reuse that instance. Those then wait for
2902 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2903 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2904 *
2905 *
2906 * (1) In the cases covered above. There is one more where the completion is
2907 * signaled within affine_move_task() itself: when a subsequent affinity request
e140749c
VS
2908 * occurs after the stopper bailed out due to the targeted task still being
2909 * Migrate-Disable. Consider:
c777d847
VS
2910 *
2911 * Initial conditions: P0->cpus_mask = [0, 1]
2912 *
e140749c
VS
2913 * CPU0 P1 P2
2914 * <P0>
2915 * migrate_disable();
2916 * <preempted>
c777d847
VS
2917 * set_cpus_allowed_ptr(P0, [1]);
2918 * <blocks>
e140749c
VS
2919 * <migration/0>
2920 * migration_cpu_stop()
2921 * is_migration_disabled()
2922 * <bails>
c777d847
VS
2923 * set_cpus_allowed_ptr(P0, [0, 1]);
2924 * <signal completion>
2925 * <awakes>
2926 *
2927 * Note that the above is safe vs a concurrent migrate_enable(), as any
2928 * pending affinity completion is preceded by an uninstallation of
2929 * p->migration_pending done with p->pi_lock held.
6d337eab
PZ
2930 */
2931static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2932 int dest_cpu, unsigned int flags)
5584e8ac
WL
2933 __releases(rq->lock)
2934 __releases(p->pi_lock)
6d337eab
PZ
2935{
2936 struct set_affinity_pending my_pending = { }, *pending = NULL;
9e81889c 2937 bool stop_pending, complete = false;
6d337eab
PZ
2938
2939 /* Can the task run on the task's current CPU? If so, we're done */
2940 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
a7c81556
PZ
2941 struct task_struct *push_task = NULL;
2942
2943 if ((flags & SCA_MIGRATE_ENABLE) &&
2944 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2945 rq->push_busy = true;
2946 push_task = get_task_struct(p);
2947 }
2948
50caf9c1
PZ
2949 /*
2950 * If there are pending waiters, but no pending stop_work,
2951 * then complete now.
2952 */
6d337eab 2953 pending = p->migration_pending;
50caf9c1 2954 if (pending && !pending->stop_pending) {
6d337eab
PZ
2955 p->migration_pending = NULL;
2956 complete = true;
2957 }
50caf9c1 2958
f0498d2a 2959 preempt_disable();
6d337eab 2960 task_rq_unlock(rq, p, rf);
a7c81556
PZ
2961 if (push_task) {
2962 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2963 p, &rq->push_work);
2964 }
f0498d2a 2965 preempt_enable();
a7c81556 2966
6d337eab 2967 if (complete)
50caf9c1 2968 complete_all(&pending->done);
6d337eab
PZ
2969
2970 return 0;
2971 }
2972
2973 if (!(flags & SCA_MIGRATE_ENABLE)) {
2974 /* serialized by p->pi_lock */
2975 if (!p->migration_pending) {
c777d847 2976 /* Install the request */
6d337eab
PZ
2977 refcount_set(&my_pending.refs, 1);
2978 init_completion(&my_pending.done);
8a6edb52
PZ
2979 my_pending.arg = (struct migration_arg) {
2980 .task = p,
475ea6c6 2981 .dest_cpu = dest_cpu,
8a6edb52
PZ
2982 .pending = &my_pending,
2983 };
2984
6d337eab
PZ
2985 p->migration_pending = &my_pending;
2986 } else {
2987 pending = p->migration_pending;
2988 refcount_inc(&pending->refs);
475ea6c6
VS
2989 /*
2990 * Affinity has changed, but we've already installed a
2991 * pending. migration_cpu_stop() *must* see this, else
2992 * we risk a completion of the pending despite having a
2993 * task on a disallowed CPU.
2994 *
2995 * Serialized by p->pi_lock, so this is safe.
2996 */
2997 pending->arg.dest_cpu = dest_cpu;
6d337eab
PZ
2998 }
2999 }
3000 pending = p->migration_pending;
3001 /*
3002 * - !MIGRATE_ENABLE:
3003 * we'll have installed a pending if there wasn't one already.
3004 *
3005 * - MIGRATE_ENABLE:
3006 * we're here because the current CPU isn't matching anymore,
3007 * the only way that can happen is because of a concurrent
3008 * set_cpus_allowed_ptr() call, which should then still be
3009 * pending completion.
3010 *
3011 * Either way, we really should have a @pending here.
3012 */
3013 if (WARN_ON_ONCE(!pending)) {
3014 task_rq_unlock(rq, p, rf);
3015 return -EINVAL;
3016 }
3017
0b9d46fc 3018 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
c777d847 3019 /*
58b1a450
PZ
3020 * MIGRATE_ENABLE gets here because 'p == current', but for
3021 * anything else we cannot do is_migration_disabled(), punt
3022 * and have the stopper function handle it all race-free.
c777d847 3023 */
9e81889c
PZ
3024 stop_pending = pending->stop_pending;
3025 if (!stop_pending)
3026 pending->stop_pending = true;
58b1a450 3027
58b1a450
PZ
3028 if (flags & SCA_MIGRATE_ENABLE)
3029 p->migration_flags &= ~MDF_PUSH;
50caf9c1 3030
f0498d2a 3031 preempt_disable();
6d337eab 3032 task_rq_unlock(rq, p, rf);
9e81889c
PZ
3033 if (!stop_pending) {
3034 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
3035 &pending->arg, &pending->stop_work);
3036 }
f0498d2a 3037 preempt_enable();
6d337eab 3038
58b1a450
PZ
3039 if (flags & SCA_MIGRATE_ENABLE)
3040 return 0;
6d337eab
PZ
3041 } else {
3042
3043 if (!is_migration_disabled(p)) {
3044 if (task_on_rq_queued(p))
3045 rq = move_queued_task(rq, rf, p, dest_cpu);
3046
50caf9c1
PZ
3047 if (!pending->stop_pending) {
3048 p->migration_pending = NULL;
3049 complete = true;
3050 }
6d337eab
PZ
3051 }
3052 task_rq_unlock(rq, p, rf);
3053
6d337eab
PZ
3054 if (complete)
3055 complete_all(&pending->done);
3056 }
3057
3058 wait_for_completion(&pending->done);
3059
3060 if (refcount_dec_and_test(&pending->refs))
50caf9c1 3061 wake_up_var(&pending->refs); /* No UaF, just an address */
6d337eab 3062
c777d847
VS
3063 /*
3064 * Block the original owner of &pending until all subsequent callers
3065 * have seen the completion and decremented the refcount
3066 */
6d337eab
PZ
3067 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3068
50caf9c1
PZ
3069 /* ARGH */
3070 WARN_ON_ONCE(my_pending.stop_pending);
3071
6d337eab
PZ
3072 return 0;
3073}
3074
5cc389bc 3075/*
07ec77a1 3076 * Called with both p->pi_lock and rq->lock held; drops both before returning.
5cc389bc 3077 */
07ec77a1 3078static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
713a2e21 3079 struct affinity_context *ctx,
07ec77a1
WD
3080 struct rq *rq,
3081 struct rq_flags *rf)
3082 __releases(rq->lock)
3083 __releases(p->pi_lock)
5cc389bc 3084{
234a503e 3085 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
e9d867a6 3086 const struct cpumask *cpu_valid_mask = cpu_active_mask;
234a503e 3087 bool kthread = p->flags & PF_KTHREAD;
5cc389bc
PZ
3088 unsigned int dest_cpu;
3089 int ret = 0;
3090
a499c3ea 3091 update_rq_clock(rq);
5cc389bc 3092
234a503e 3093 if (kthread || is_migration_disabled(p)) {
e9d867a6 3094 /*
741ba80f
PZ
3095 * Kernel threads are allowed on online && !active CPUs,
3096 * however, during cpu-hot-unplug, even these might get pushed
3097 * away if not KTHREAD_IS_PER_CPU.
af449901
PZ
3098 *
3099 * Specifically, migration_disabled() tasks must not fail the
3100 * cpumask_any_and_distribute() pick below, esp. so on
3101 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3102 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
e9d867a6
PZI
3103 */
3104 cpu_valid_mask = cpu_online_mask;
3105 }
3106
713a2e21 3107 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
234a503e
WD
3108 ret = -EINVAL;
3109 goto out;
3110 }
3111
25834c73
PZ
3112 /*
3113 * Must re-check here, to close a race against __kthread_bind(),
3114 * sched_setaffinity() is not guaranteed to observe the flag.
3115 */
713a2e21 3116 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
25834c73
PZ
3117 ret = -EINVAL;
3118 goto out;
3119 }
3120
713a2e21 3121 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
df14b7f9
WL
3122 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3123 if (ctx->flags & SCA_USER)
3124 swap(p->user_cpus_ptr, ctx->user_mask);
885b3ba4 3125 goto out;
df14b7f9 3126 }
885b3ba4
VS
3127
3128 if (WARN_ON_ONCE(p == current &&
3129 is_migration_disabled(p) &&
713a2e21 3130 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
885b3ba4
VS
3131 ret = -EBUSY;
3132 goto out;
3133 }
3134 }
5cc389bc 3135
46a87b38
PT
3136 /*
3137 * Picking a ~random cpu helps in cases where we are changing affinity
3138 * for groups of tasks (ie. cpuset), so that load balancing is not
3139 * immediately required to distribute the tasks within their new mask.
3140 */
713a2e21 3141 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
714e501e 3142 if (dest_cpu >= nr_cpu_ids) {
5cc389bc
PZ
3143 ret = -EINVAL;
3144 goto out;
3145 }
3146
713a2e21 3147 __do_set_cpus_allowed(p, ctx);
07ec77a1 3148
8f9ea86f 3149 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
5cc389bc 3150
5cc389bc 3151out:
07ec77a1 3152 task_rq_unlock(rq, p, rf);
5cc389bc
PZ
3153
3154 return ret;
3155}
25834c73 3156
07ec77a1
WD
3157/*
3158 * Change a given task's CPU affinity. Migrate the thread to a
3159 * proper CPU and schedule it away if the CPU it's executing on
3160 * is removed from the allowed bitmask.
3161 *
3162 * NOTE: the caller must have a valid reference to the task, the
3163 * task must not exit() & deallocate itself prematurely. The
3164 * call is not atomic; no spinlocks may be held.
3165 */
04746ed8 3166int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
07ec77a1
WD
3167{
3168 struct rq_flags rf;
3169 struct rq *rq;
3170
3171 rq = task_rq_lock(p, &rf);
da019032
WL
3172 /*
3173 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3174 * flags are set.
3175 */
3176 if (p->user_cpus_ptr &&
3177 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3178 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3179 ctx->new_mask = rq->scratch_mask;
3180
713a2e21 3181 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
07ec77a1
WD
3182}
3183
25834c73
PZ
3184int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3185{
713a2e21
WL
3186 struct affinity_context ac = {
3187 .new_mask = new_mask,
3188 .flags = 0,
3189 };
3190
3191 return __set_cpus_allowed_ptr(p, &ac);
25834c73 3192}
5cc389bc
PZ
3193EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3194
07ec77a1
WD
3195/*
3196 * Change a given task's CPU affinity to the intersection of its current
8f9ea86f
WL
3197 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3198 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3199 * affinity or use cpu_online_mask instead.
3200 *
07ec77a1
WD
3201 * If the resulting mask is empty, leave the affinity unchanged and return
3202 * -EINVAL.
3203 */
3204static int restrict_cpus_allowed_ptr(struct task_struct *p,
3205 struct cpumask *new_mask,
3206 const struct cpumask *subset_mask)
3207{
8f9ea86f
WL
3208 struct affinity_context ac = {
3209 .new_mask = new_mask,
3210 .flags = 0,
3211 };
07ec77a1
WD
3212 struct rq_flags rf;
3213 struct rq *rq;
3214 int err;
3215
07ec77a1
WD
3216 rq = task_rq_lock(p, &rf);
3217
3218 /*
3219 * Forcefully restricting the affinity of a deadline task is
3220 * likely to cause problems, so fail and noisily override the
3221 * mask entirely.
3222 */
3223 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3224 err = -EPERM;
3225 goto err_unlock;
3226 }
3227
8f9ea86f 3228 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
07ec77a1
WD
3229 err = -EINVAL;
3230 goto err_unlock;
3231 }
3232
713a2e21 3233 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
07ec77a1
WD
3234
3235err_unlock:
3236 task_rq_unlock(rq, p, &rf);
07ec77a1
WD
3237 return err;
3238}
3239
3240/*
3241 * Restrict the CPU affinity of task @p so that it is a subset of
5584e8ac 3242 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
07ec77a1
WD
3243 * old affinity mask. If the resulting mask is empty, we warn and walk
3244 * up the cpuset hierarchy until we find a suitable mask.
3245 */
3246void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3247{
3248 cpumask_var_t new_mask;
3249 const struct cpumask *override_mask = task_cpu_possible_mask(p);
3250
3251 alloc_cpumask_var(&new_mask, GFP_KERNEL);
3252
3253 /*
3254 * __migrate_task() can fail silently in the face of concurrent
3255 * offlining of the chosen destination CPU, so take the hotplug
3256 * lock to ensure that the migration succeeds.
3257 */
3258 cpus_read_lock();
3259 if (!cpumask_available(new_mask))
3260 goto out_set_mask;
3261
3262 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3263 goto out_free_mask;
3264
3265 /*
3266 * We failed to find a valid subset of the affinity mask for the
3267 * task, so override it based on its cpuset hierarchy.
3268 */
3269 cpuset_cpus_allowed(p, new_mask);
3270 override_mask = new_mask;
3271
3272out_set_mask:
3273 if (printk_ratelimit()) {
3274 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3275 task_pid_nr(p), p->comm,
3276 cpumask_pr_args(override_mask));
3277 }
3278
3279 WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3280out_free_mask:
3281 cpus_read_unlock();
3282 free_cpumask_var(new_mask);
3283}
3284
07ec77a1
WD
3285/*
3286 * Restore the affinity of a task @p which was previously restricted by a
8f9ea86f 3287 * call to force_compatible_cpus_allowed_ptr().
07ec77a1
WD
3288 *
3289 * It is the caller's responsibility to serialise this with any calls to
3290 * force_compatible_cpus_allowed_ptr(@p).
3291 */
3292void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3293{
713a2e21 3294 struct affinity_context ac = {
8f9ea86f
WL
3295 .new_mask = task_user_cpus(p),
3296 .flags = 0,
713a2e21 3297 };
8f9ea86f 3298 int ret;
07ec77a1
WD
3299
3300 /*
8f9ea86f
WL
3301 * Try to restore the old affinity mask with __sched_setaffinity().
3302 * Cpuset masking will be done there too.
07ec77a1 3303 */
8f9ea86f
WL
3304 ret = __sched_setaffinity(p, &ac);
3305 WARN_ON_ONCE(ret);
07ec77a1
WD
3306}
3307
dd41f596 3308void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 3309{
2f064a59
PZ
3310 unsigned int state = READ_ONCE(p->__state);
3311
e2912009
PZ
3312 /*
3313 * We should never call set_task_cpu() on a blocked task,
3314 * ttwu() will sort out the placement.
3315 */
2f064a59 3316 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
0122ec5b 3317
3ea94de1
JP
3318 /*
3319 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3320 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3321 * time relying on p->on_rq.
3322 */
2f064a59 3323 WARN_ON_ONCE(state == TASK_RUNNING &&
3ea94de1
JP
3324 p->sched_class == &fair_sched_class &&
3325 (p->on_rq && !task_on_rq_migrating(p)));
3326
0122ec5b 3327#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
3328 /*
3329 * The caller should hold either p->pi_lock or rq->lock, when changing
3330 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3331 *
3332 * sched_move_task() holds both and thus holding either pins the cgroup,
8323f26c 3333 * see task_group().
6c6c54e1
PZ
3334 *
3335 * Furthermore, all task_rq users should acquire both locks, see
3336 * task_rq_lock().
3337 */
0122ec5b 3338 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
9ef7e7e3 3339 lockdep_is_held(__rq_lockp(task_rq(p)))));
0122ec5b 3340#endif
4ff9083b
PZ
3341 /*
3342 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3343 */
3344 WARN_ON_ONCE(!cpu_online(new_cpu));
af449901
PZ
3345
3346 WARN_ON_ONCE(is_migration_disabled(p));
e2912009 3347
de1d7286 3348 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 3349
0c69774e 3350 if (task_cpu(p) != new_cpu) {
0a74bef8 3351 if (p->sched_class->migrate_task_rq)
1327237a 3352 p->sched_class->migrate_task_rq(p, new_cpu);
0c69774e 3353 p->se.nr_migrations++;
d7822b1e 3354 rseq_migrate(p);
223baf9d 3355 sched_mm_cid_migrate_from(p);
ff303e66 3356 perf_event_task_migrate(p);
0c69774e 3357 }
dd41f596
IM
3358
3359 __set_task_cpu(p, new_cpu);
c65cc870
IM
3360}
3361
0ad4e3df 3362#ifdef CONFIG_NUMA_BALANCING
ac66f547
PZ
3363static void __migrate_swap_task(struct task_struct *p, int cpu)
3364{
da0c1e65 3365 if (task_on_rq_queued(p)) {
ac66f547 3366 struct rq *src_rq, *dst_rq;
8a8c69c3 3367 struct rq_flags srf, drf;
ac66f547
PZ
3368
3369 src_rq = task_rq(p);
3370 dst_rq = cpu_rq(cpu);
3371
8a8c69c3
PZ
3372 rq_pin_lock(src_rq, &srf);
3373 rq_pin_lock(dst_rq, &drf);
3374
2b05a0b4 3375 move_queued_task_locked(src_rq, dst_rq, p);
e23edc86 3376 wakeup_preempt(dst_rq, p, 0);
8a8c69c3
PZ
3377
3378 rq_unpin_lock(dst_rq, &drf);
3379 rq_unpin_lock(src_rq, &srf);
3380
ac66f547
PZ
3381 } else {
3382 /*
3383 * Task isn't running anymore; make it appear like we migrated
3384 * it before it went to sleep. This means on wakeup we make the
d1ccc66d 3385 * previous CPU our target instead of where it really is.
ac66f547
PZ
3386 */
3387 p->wake_cpu = cpu;
3388 }
3389}
3390
3391struct migration_swap_arg {
3392 struct task_struct *src_task, *dst_task;
3393 int src_cpu, dst_cpu;
3394};
3395
3396static int migrate_swap_stop(void *data)
3397{
3398 struct migration_swap_arg *arg = data;
3399 struct rq *src_rq, *dst_rq;
ac66f547 3400
62694cd5
PZ
3401 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3402 return -EAGAIN;
3403
ac66f547
PZ
3404 src_rq = cpu_rq(arg->src_cpu);
3405 dst_rq = cpu_rq(arg->dst_cpu);
3406
5bb76f1d
PZ
3407 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3408 guard(double_rq_lock)(src_rq, dst_rq);
62694cd5 3409
ac66f547 3410 if (task_cpu(arg->dst_task) != arg->dst_cpu)
5bb76f1d 3411 return -EAGAIN;
ac66f547
PZ
3412
3413 if (task_cpu(arg->src_task) != arg->src_cpu)
5bb76f1d 3414 return -EAGAIN;
ac66f547 3415
3bd37062 3416 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
5bb76f1d 3417 return -EAGAIN;
ac66f547 3418
3bd37062 3419 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
5bb76f1d 3420 return -EAGAIN;
ac66f547
PZ
3421
3422 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3423 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3424
5bb76f1d 3425 return 0;
ac66f547
PZ
3426}
3427
3428/*
3429 * Cross migrate two tasks
3430 */
0ad4e3df
SD
3431int migrate_swap(struct task_struct *cur, struct task_struct *p,
3432 int target_cpu, int curr_cpu)
ac66f547
PZ
3433{
3434 struct migration_swap_arg arg;
3435 int ret = -EINVAL;
3436
ac66f547
PZ
3437 arg = (struct migration_swap_arg){
3438 .src_task = cur,
0ad4e3df 3439 .src_cpu = curr_cpu,
ac66f547 3440 .dst_task = p,
0ad4e3df 3441 .dst_cpu = target_cpu,
ac66f547
PZ
3442 };
3443
3444 if (arg.src_cpu == arg.dst_cpu)
3445 goto out;
3446
6acce3ef
PZ
3447 /*
3448 * These three tests are all lockless; this is OK since all of them
3449 * will be re-checked with proper locks held further down the line.
3450 */
ac66f547
PZ
3451 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3452 goto out;
3453
3bd37062 3454 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
ac66f547
PZ
3455 goto out;
3456
3bd37062 3457 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
ac66f547
PZ
3458 goto out;
3459
286549dc 3460 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
ac66f547
PZ
3461 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3462
3463out:
ac66f547
PZ
3464 return ret;
3465}
0ad4e3df 3466#endif /* CONFIG_NUMA_BALANCING */
ac66f547 3467
1da177e4
LT
3468/***
3469 * kick_process - kick a running thread to enter/exit the kernel
3470 * @p: the to-be-kicked thread
3471 *
3472 * Cause a process which is running on another CPU to enter
3473 * kernel-mode, without any delay. (to get signals handled.)
3474 *
25985edc 3475 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
3476 * because all it wants to ensure is that the remote task enters
3477 * the kernel. If the IPI races and the task has been migrated
3478 * to another CPU then no harm is done and the purpose has been
3479 * achieved as well.
3480 */
36c8b586 3481void kick_process(struct task_struct *p)
1da177e4 3482{
0e34600a
PZ
3483 guard(preempt)();
3484 int cpu = task_cpu(p);
1da177e4 3485
1da177e4
LT
3486 if ((cpu != smp_processor_id()) && task_curr(p))
3487 smp_send_reschedule(cpu);
1da177e4 3488}
b43e3521 3489EXPORT_SYMBOL_GPL(kick_process);
1da177e4 3490
30da688e 3491/*
3bd37062 3492 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
e9d867a6
PZI
3493 *
3494 * A few notes on cpu_active vs cpu_online:
3495 *
3496 * - cpu_active must be a subset of cpu_online
3497 *
97fb7a0a 3498 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
e9d867a6 3499 * see __set_cpus_allowed_ptr(). At this point the newly online
d1ccc66d 3500 * CPU isn't yet part of the sched domains, and balancing will not
e9d867a6
PZI
3501 * see it.
3502 *
d1ccc66d 3503 * - on CPU-down we clear cpu_active() to mask the sched domains and
e9d867a6 3504 * avoid the load balancer to place new tasks on the to be removed
d1ccc66d 3505 * CPU. Existing tasks will remain running there and will be taken
e9d867a6
PZI
3506 * off.
3507 *
3508 * This means that fallback selection must not select !active CPUs.
3509 * And can assume that any active CPU must be online. Conversely
3510 * select_task_rq() below may allow selection of !active CPUs in order
3511 * to satisfy the above rules.
30da688e 3512 */
5da9a0fb
PZ
3513static int select_fallback_rq(int cpu, struct task_struct *p)
3514{
aa00d89c
TC
3515 int nid = cpu_to_node(cpu);
3516 const struct cpumask *nodemask = NULL;
2baab4e9
PZ
3517 enum { cpuset, possible, fail } state = cpuset;
3518 int dest_cpu;
5da9a0fb 3519
aa00d89c 3520 /*
d1ccc66d
IM
3521 * If the node that the CPU is on has been offlined, cpu_to_node()
3522 * will return -1. There is no CPU on the node, and we should
3523 * select the CPU on the other node.
aa00d89c
TC
3524 */
3525 if (nid != -1) {
3526 nodemask = cpumask_of_node(nid);
3527
3528 /* Look for allowed, online CPU in same node. */
3529 for_each_cpu(dest_cpu, nodemask) {
9ae606bc 3530 if (is_cpu_allowed(p, dest_cpu))
aa00d89c
TC
3531 return dest_cpu;
3532 }
2baab4e9 3533 }
5da9a0fb 3534
2baab4e9
PZ
3535 for (;;) {
3536 /* Any allowed, online CPU? */
3bd37062 3537 for_each_cpu(dest_cpu, p->cpus_ptr) {
175f0e25 3538 if (!is_cpu_allowed(p, dest_cpu))
2baab4e9 3539 continue;
175f0e25 3540
2baab4e9
PZ
3541 goto out;
3542 }
5da9a0fb 3543
e73e85f0 3544 /* No more Mr. Nice Guy. */
2baab4e9
PZ
3545 switch (state) {
3546 case cpuset:
97c0054d 3547 if (cpuset_cpus_allowed_fallback(p)) {
e73e85f0
ON
3548 state = possible;
3549 break;
3550 }
df561f66 3551 fallthrough;
2baab4e9 3552 case possible:
af449901
PZ
3553 /*
3554 * XXX When called from select_task_rq() we only
3555 * hold p->pi_lock and again violate locking order.
3556 *
3557 * More yuck to audit.
3558 */
3a544661 3559 do_set_cpus_allowed(p, task_cpu_fallback_mask(p));
2baab4e9
PZ
3560 state = fail;
3561 break;
2baab4e9
PZ
3562 case fail:
3563 BUG();
3564 break;
3565 }
3566 }
3567
3568out:
3569 if (state != cpuset) {
3570 /*
3571 * Don't tell them about moving exiting tasks or
3572 * kernel threads (both mm NULL), since they never
3573 * leave kernel.
3574 */
3575 if (p->mm && printk_ratelimit()) {
aac74dc4 3576 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
2baab4e9
PZ
3577 task_pid_nr(p), p->comm, cpu);
3578 }
5da9a0fb
PZ
3579 }
3580
3581 return dest_cpu;
3582}
3583
e2912009 3584/*
3bd37062 3585 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
e2912009 3586 */
970b13ba 3587static inline
b62933ee 3588int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
970b13ba 3589{
cbce1a68
PZ
3590 lockdep_assert_held(&p->pi_lock);
3591
f207dc2d 3592 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) {
b62933ee 3593 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
f207dc2d
TH
3594 *wake_flags |= WF_RQ_SELECTED;
3595 } else {
3bd37062 3596 cpu = cpumask_any(p->cpus_ptr);
f207dc2d 3597 }
e2912009
PZ
3598
3599 /*
3600 * In order not to call set_task_cpu() on a blocking task we need
3bd37062 3601 * to rely on ttwu() to place the task on a valid ->cpus_ptr
d1ccc66d 3602 * CPU.
e2912009
PZ
3603 *
3604 * Since this is common to all placement strategies, this lives here.
3605 *
3606 * [ this allows ->select_task() to simply return task_cpu(p) and
3607 * not worry about this generic constraint ]
3608 */
7af443ee 3609 if (unlikely(!is_cpu_allowed(p, cpu)))
5da9a0fb 3610 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
3611
3612 return cpu;
970b13ba 3613}
09a40af5 3614
f5832c19
NP
3615void sched_set_stop_task(int cpu, struct task_struct *stop)
3616{
ded467dc 3617 static struct lock_class_key stop_pi_lock;
f5832c19
NP
3618 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3619 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3620
3621 if (stop) {
3622 /*
3623 * Make it appear like a SCHED_FIFO task, its something
3624 * userspace knows about and won't get confused about.
3625 *
3626 * Also, it will make PI more or less work without too
3627 * much confusion -- but then, stop work should not
3628 * rely on PI working anyway.
3629 */
3630 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
3631
3632 stop->sched_class = &stop_sched_class;
ded467dc
PZ
3633
3634 /*
3635 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3636 * adjust the effective priority of a task. As a result,
3637 * rt_mutex_setprio() can trigger (RT) balancing operations,
3638 * which can then trigger wakeups of the stop thread to push
3639 * around the current task.
3640 *
3641 * The stop task itself will never be part of the PI-chain, it
3642 * never blocks, therefore that ->pi_lock recursion is safe.
3643 * Tell lockdep about this by placing the stop->pi_lock in its
3644 * own class.
3645 */
3646 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
f5832c19
NP
3647 }
3648
3649 cpu_rq(cpu)->stop = stop;
3650
3651 if (old_stop) {
3652 /*
3653 * Reset it back to a normal scheduling class so that
3654 * it can die in pieces.
3655 */
3656 old_stop->sched_class = &rt_sched_class;
3657 }
3658}
3659
74d862b6 3660#else /* CONFIG_SMP */
25834c73 3661
af449901
PZ
3662static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3663
3015ef4b
TG
3664static inline bool rq_has_pinned_tasks(struct rq *rq)
3665{
3666 return false;
3667}
3668
74d862b6 3669#endif /* !CONFIG_SMP */
970b13ba 3670
d7c01d27 3671static void
b84cb5df 3672ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 3673{
4fa8d299 3674 struct rq *rq;
b84cb5df 3675
4fa8d299
JP
3676 if (!schedstat_enabled())
3677 return;
3678
3679 rq = this_rq();
d7c01d27 3680
4fa8d299
JP
3681#ifdef CONFIG_SMP
3682 if (cpu == rq->cpu) {
b85c8b71 3683 __schedstat_inc(rq->ttwu_local);
ceeadb83 3684 __schedstat_inc(p->stats.nr_wakeups_local);
d7c01d27
PZ
3685 } else {
3686 struct sched_domain *sd;
3687
ceeadb83 3688 __schedstat_inc(p->stats.nr_wakeups_remote);
857d315f
PZ
3689
3690 guard(rcu)();
4fa8d299 3691 for_each_domain(rq->cpu, sd) {
d7c01d27 3692 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
b85c8b71 3693 __schedstat_inc(sd->ttwu_wake_remote);
d7c01d27
PZ
3694 break;
3695 }
3696 }
3697 }
f339b9dc
PZ
3698
3699 if (wake_flags & WF_MIGRATED)
ceeadb83 3700 __schedstat_inc(p->stats.nr_wakeups_migrate);
d7c01d27
PZ
3701#endif /* CONFIG_SMP */
3702
b85c8b71 3703 __schedstat_inc(rq->ttwu_count);
ceeadb83 3704 __schedstat_inc(p->stats.nr_wakeups);
d7c01d27
PZ
3705
3706 if (wake_flags & WF_SYNC)
ceeadb83 3707 __schedstat_inc(p->stats.nr_wakeups_sync);
d7c01d27
PZ
3708}
3709
23f41eeb 3710/*
160fb0d8 3711 * Mark the task runnable.
23f41eeb 3712 */
160fb0d8 3713static inline void ttwu_do_wakeup(struct task_struct *p)
9ed3811a 3714{
2f064a59 3715 WRITE_ONCE(p->__state, TASK_RUNNING);
fbd705a0 3716 trace_sched_wakeup(p);
160fb0d8
CZ
3717}
3718
3719static void
3720ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3721 struct rq_flags *rf)
3722{
3723 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3724
3725 lockdep_assert_rq_held(rq);
3726
3727 if (p->sched_contributes_to_load)
3728 rq->nr_uninterruptible--;
3729
3730#ifdef CONFIG_SMP
f207dc2d
TH
3731 if (wake_flags & WF_RQ_SELECTED)
3732 en_flags |= ENQUEUE_RQ_SELECTED;
160fb0d8
CZ
3733 if (wake_flags & WF_MIGRATED)
3734 en_flags |= ENQUEUE_MIGRATED;
3735 else
3736#endif
3737 if (p->in_iowait) {
3738 delayacct_blkio_end(p);
3739 atomic_dec(&task_rq(p)->nr_iowait);
3740 }
3741
3742 activate_task(rq, p, en_flags);
e23edc86 3743 wakeup_preempt(rq, p, wake_flags);
160fb0d8
CZ
3744
3745 ttwu_do_wakeup(p);
fbd705a0 3746
9ed3811a 3747#ifdef CONFIG_SMP
4c9a4bc8
PZ
3748 if (p->sched_class->task_woken) {
3749 /*
b19a888c 3750 * Our task @p is fully woken up and running; so it's safe to
cbce1a68 3751 * drop the rq->lock, hereafter rq is only used for statistics.
4c9a4bc8 3752 */
d8ac8971 3753 rq_unpin_lock(rq, rf);
9ed3811a 3754 p->sched_class->task_woken(rq, p);
d8ac8971 3755 rq_repin_lock(rq, rf);
4c9a4bc8 3756 }
9ed3811a 3757
e69c6341 3758 if (rq->idle_stamp) {
78becc27 3759 u64 delta = rq_clock(rq) - rq->idle_stamp;
9bd721c5 3760 u64 max = 2*rq->max_idle_balance_cost;
9ed3811a 3761
abfafa54
JL
3762 update_avg(&rq->avg_idle, delta);
3763
3764 if (rq->avg_idle > max)
9ed3811a 3765 rq->avg_idle = max;
abfafa54 3766
9ed3811a
TH
3767 rq->idle_stamp = 0;
3768 }
3769#endif
3770}
3771
c05fbafb 3772/*
58877d34
PZ
3773 * Consider @p being inside a wait loop:
3774 *
3775 * for (;;) {
3776 * set_current_state(TASK_UNINTERRUPTIBLE);
3777 *
3778 * if (CONDITION)
3779 * break;
3780 *
3781 * schedule();
3782 * }
3783 * __set_current_state(TASK_RUNNING);
3784 *
3785 * between set_current_state() and schedule(). In this case @p is still
3786 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3787 * an atomic manner.
3788 *
3789 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3790 * then schedule() must still happen and p->state can be changed to
3791 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3792 * need to do a full wakeup with enqueue.
3793 *
3794 * Returns: %true when the wakeup is done,
3795 * %false otherwise.
c05fbafb 3796 */
58877d34 3797static int ttwu_runnable(struct task_struct *p, int wake_flags)
c05fbafb 3798{
eb580751 3799 struct rq_flags rf;
c05fbafb
PZ
3800 struct rq *rq;
3801 int ret = 0;
3802
eb580751 3803 rq = __task_rq_lock(p, &rf);
da0c1e65 3804 if (task_on_rq_queued(p)) {
abc158c8
PZ
3805 update_rq_clock(rq);
3806 if (p->se.sched_delayed)
3807 enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
efe09385
CZ
3808 if (!task_on_cpu(rq, p)) {
3809 /*
3810 * When on_rq && !on_cpu the task is preempted, see if
3811 * it should preempt the task that is current now.
3812 */
e23edc86 3813 wakeup_preempt(rq, p, wake_flags);
efe09385 3814 }
160fb0d8 3815 ttwu_do_wakeup(p);
c05fbafb
PZ
3816 ret = 1;
3817 }
eb580751 3818 __task_rq_unlock(rq, &rf);
c05fbafb
PZ
3819
3820 return ret;
3821}
3822
317f3941 3823#ifdef CONFIG_SMP
a1488664 3824void sched_ttwu_pending(void *arg)
317f3941 3825{
a1488664 3826 struct llist_node *llist = arg;
317f3941 3827 struct rq *rq = this_rq();
73215849 3828 struct task_struct *p, *t;
d8ac8971 3829 struct rq_flags rf;
317f3941 3830
e3baac47
PZ
3831 if (!llist)
3832 return;
3833
8a8c69c3 3834 rq_lock_irqsave(rq, &rf);
77558e4d 3835 update_rq_clock(rq);
317f3941 3836
8c4890d1 3837 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
b6e13e85
PZ
3838 if (WARN_ON_ONCE(p->on_cpu))
3839 smp_cond_load_acquire(&p->on_cpu, !VAL);
3840
3841 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3842 set_task_cpu(p, cpu_of(rq));
3843
73215849 3844 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
b6e13e85 3845 }
317f3941 3846
d6962c4f
TD
3847 /*
3848 * Must be after enqueueing at least once task such that
3849 * idle_cpu() does not observe a false-negative -- if it does,
3850 * it is possible for select_idle_siblings() to stack a number
3851 * of tasks on this CPU during that window.
3852 *
402de7fc
IM
3853 * It is OK to clear ttwu_pending when another task pending.
3854 * We will receive IPI after local IRQ enabled and then enqueue it.
d6962c4f
TD
3855 * Since now nr_running > 0, idle_cpu() will always get correct result.
3856 */
3857 WRITE_ONCE(rq->ttwu_pending, 0);
8a8c69c3 3858 rq_unlock_irqrestore(rq, &rf);
317f3941
PZ
3859}
3860
68f4ff04
VS
3861/*
3862 * Prepare the scene for sending an IPI for a remote smp_call
3863 *
3864 * Returns true if the caller can proceed with sending the IPI.
3865 * Returns false otherwise.
3866 */
3867bool call_function_single_prep_ipi(int cpu)
317f3941 3868{
68f4ff04 3869 if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
b2a02fc4 3870 trace_sched_wake_idle_without_ipi(cpu);
68f4ff04 3871 return false;
cc9cb0a7 3872 }
68f4ff04
VS
3873
3874 return true;
317f3941
PZ
3875}
3876
2ebb1771
MG
3877/*
3878 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3879 * necessary. The wakee CPU on receipt of the IPI will queue the task
3880 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3881 * of the wakeup instead of the waker.
3882 */
3883static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
317f3941 3884{
e3baac47
PZ
3885 struct rq *rq = cpu_rq(cpu);
3886
b7e7ade3
PZ
3887 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3888
126c2092 3889 WRITE_ONCE(rq->ttwu_pending, 1);
8c4890d1 3890 __smp_call_single_queue(cpu, &p->wake_entry.llist);
317f3941 3891}
d6aa8f85 3892
f6be8af1
CL
3893void wake_up_if_idle(int cpu)
3894{
3895 struct rq *rq = cpu_rq(cpu);
fd7de1e8 3896
4eb054f9
PZ
3897 guard(rcu)();
3898 if (is_idle_task(rcu_dereference(rq->curr))) {
3899 guard(rq_lock_irqsave)(rq);
3900 if (is_idle_task(rq->curr))
3901 resched_curr(rq);
3902 }
f6be8af1
CL
3903}
3904
b361c902
QY
3905bool cpus_equal_capacity(int this_cpu, int that_cpu)
3906{
3907 if (!sched_asym_cpucap_active())
3908 return true;
3909
3910 if (this_cpu == that_cpu)
3911 return true;
3912
3913 return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3914}
3915
39be3501 3916bool cpus_share_cache(int this_cpu, int that_cpu)
518cd623 3917{
42dc938a
VD
3918 if (this_cpu == that_cpu)
3919 return true;
3920
518cd623
PZ
3921 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3922}
c6e7bd7a 3923
b95303e0
BS
3924/*
3925 * Whether CPUs are share cache resources, which means LLC on non-cluster
3926 * machines and LLC tag or L2 on machines with clusters.
3927 */
3928bool cpus_share_resources(int this_cpu, int that_cpu)
3929{
3930 if (this_cpu == that_cpu)
3931 return true;
3932
3933 return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3934}
3935
751d4cbc 3936static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
2ebb1771 3937{
3539c641
TH
3938 /* See SCX_OPS_ALLOW_QUEUED_WAKEUP. */
3939 if (!scx_allow_ttwu_queue(p))
f0e1a064
TH
3940 return false;
3941
009836b4
PZ
3942#ifdef CONFIG_SMP
3943 if (p->sched_class == &stop_sched_class)
3944 return false;
3945#endif
3946
5ba2ffba
PZ
3947 /*
3948 * Do not complicate things with the async wake_list while the CPU is
3949 * in hotplug state.
3950 */
3951 if (!cpu_active(cpu))
3952 return false;
3953
751d4cbc
MG
3954 /* Ensure the task will still be allowed to run on the CPU. */
3955 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3956 return false;
3957
2ebb1771
MG
3958 /*
3959 * If the CPU does not share cache, then queue the task on the
3960 * remote rqs wakelist to avoid accessing remote data.
3961 */
3962 if (!cpus_share_cache(smp_processor_id(), cpu))
3963 return true;
3964
f3dd3f67
TD
3965 if (cpu == smp_processor_id())
3966 return false;
3967
2ebb1771 3968 /*
f3dd3f67
TD
3969 * If the wakee cpu is idle, or the task is descheduling and the
3970 * only running task on the CPU, then use the wakelist to offload
3971 * the task activation to the idle (or soon-to-be-idle) CPU as
3972 * the current CPU is likely busy. nr_running is checked to
3973 * avoid unnecessary task stacking.
28156108
TD
3974 *
3975 * Note that we can only get here with (wakee) p->on_rq=0,
3976 * p->on_cpu can be whatever, we've done the dequeue, so
3977 * the wakee has been accounted out of ->nr_running.
2ebb1771 3978 */
f3dd3f67 3979 if (!cpu_rq(cpu)->nr_running)
2ebb1771
MG
3980 return true;
3981
3982 return false;
3983}
3984
3985static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
c6e7bd7a 3986{
751d4cbc 3987 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
c6e7bd7a 3988 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
2ebb1771 3989 __ttwu_queue_wakelist(p, cpu, wake_flags);
c6e7bd7a
PZ
3990 return true;
3991 }
3992
3993 return false;
3994}
58877d34
PZ
3995
3996#else /* !CONFIG_SMP */
3997
3998static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3999{
4000 return false;
4001}
4002
d6aa8f85 4003#endif /* CONFIG_SMP */
317f3941 4004
b5179ac7 4005static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
c05fbafb
PZ
4006{
4007 struct rq *rq = cpu_rq(cpu);
d8ac8971 4008 struct rq_flags rf;
c05fbafb 4009
2ebb1771 4010 if (ttwu_queue_wakelist(p, cpu, wake_flags))
317f3941 4011 return;
317f3941 4012
8a8c69c3 4013 rq_lock(rq, &rf);
77558e4d 4014 update_rq_clock(rq);
d8ac8971 4015 ttwu_do_activate(rq, p, wake_flags, &rf);
8a8c69c3 4016 rq_unlock(rq, &rf);
9ed3811a
TH
4017}
4018
43295d73
TG
4019/*
4020 * Invoked from try_to_wake_up() to check whether the task can be woken up.
4021 *
4022 * The caller holds p::pi_lock if p != current or has preemption
4023 * disabled when p == current.
5f220be2 4024 *
8f0eed4a 4025 * The rules of saved_state:
5f220be2
TG
4026 *
4027 * The related locking code always holds p::pi_lock when updating
4028 * p::saved_state, which means the code is fully serialized in both cases.
4029 *
8f0eed4a
EB
4030 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
4031 * No other bits set. This allows to distinguish all wakeup scenarios.
4032 *
4033 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
4034 * allows us to prevent early wakeup of tasks before they can be run on
4035 * asymmetric ISA architectures (eg ARMv9).
43295d73
TG
4036 */
4037static __always_inline
4038bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
4039{
1c069187
PZ
4040 int match;
4041
5f220be2
TG
4042 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
4043 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
4044 state != TASK_RTLOCK_WAIT);
4045 }
4046
1c069187 4047 *success = !!(match = __task_state_match(p, state));
5f220be2 4048
5f220be2
TG
4049 /*
4050 * Saved state preserves the task state across blocking on
8f0eed4a
EB
4051 * an RT lock or TASK_FREEZABLE tasks. If the state matches,
4052 * set p::saved_state to TASK_RUNNING, but do not wake the task
4053 * because it waits for a lock wakeup or __thaw_task(). Also
4054 * indicate success because from the regular waker's point of
4055 * view this has succeeded.
5f220be2
TG
4056 *
4057 * After acquiring the lock the task will restore p::__state
4058 * from p::saved_state which ensures that the regular
4059 * wakeup is not lost. The restore will also set
4060 * p::saved_state to TASK_RUNNING so any further tests will
4061 * not result in false positives vs. @success
4062 */
1c069187 4063 if (match < 0)
5f220be2 4064 p->saved_state = TASK_RUNNING;
fbaa6a18 4065
1c069187 4066 return match > 0;
43295d73
TG
4067}
4068
8643cda5
PZ
4069/*
4070 * Notes on Program-Order guarantees on SMP systems.
4071 *
4072 * MIGRATION
4073 *
4074 * The basic program-order guarantee on SMP systems is that when a task [t]
d1ccc66d
IM
4075 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4076 * execution on its new CPU [c1].
8643cda5
PZ
4077 *
4078 * For migration (of runnable tasks) this is provided by the following means:
4079 *
4080 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4081 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4082 * rq(c1)->lock (if not at the same time, then in that order).
4083 * C) LOCK of the rq(c1)->lock scheduling in task
4084 *
7696f991 4085 * Release/acquire chaining guarantees that B happens after A and C after B.
d1ccc66d 4086 * Note: the CPU doing B need not be c0 or c1
8643cda5
PZ
4087 *
4088 * Example:
4089 *
4090 * CPU0 CPU1 CPU2
4091 *
4092 * LOCK rq(0)->lock
4093 * sched-out X
4094 * sched-in Y
4095 * UNLOCK rq(0)->lock
4096 *
4097 * LOCK rq(0)->lock // orders against CPU0
4098 * dequeue X
4099 * UNLOCK rq(0)->lock
4100 *
4101 * LOCK rq(1)->lock
4102 * enqueue X
4103 * UNLOCK rq(1)->lock
4104 *
4105 * LOCK rq(1)->lock // orders against CPU2
4106 * sched-out Z
4107 * sched-in X
4108 * UNLOCK rq(1)->lock
4109 *
4110 *
4111 * BLOCKING -- aka. SLEEP + WAKEUP
4112 *
4113 * For blocking we (obviously) need to provide the same guarantee as for
4114 * migration. However the means are completely different as there is no lock
4115 * chain to provide order. Instead we do:
4116 *
58877d34
PZ
4117 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4118 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
8643cda5
PZ
4119 *
4120 * Example:
4121 *
4122 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4123 *
4124 * LOCK rq(0)->lock LOCK X->pi_lock
4125 * dequeue X
4126 * sched-out X
4127 * smp_store_release(X->on_cpu, 0);
4128 *
1f03e8d2 4129 * smp_cond_load_acquire(&X->on_cpu, !VAL);
8643cda5
PZ
4130 * X->state = WAKING
4131 * set_task_cpu(X,2)
4132 *
4133 * LOCK rq(2)->lock
4134 * enqueue X
4135 * X->state = RUNNING
4136 * UNLOCK rq(2)->lock
4137 *
4138 * LOCK rq(2)->lock // orders against CPU1
4139 * sched-out Z
4140 * sched-in X
4141 * UNLOCK rq(2)->lock
4142 *
4143 * UNLOCK X->pi_lock
4144 * UNLOCK rq(0)->lock
4145 *
4146 *
7696f991
AP
4147 * However, for wakeups there is a second guarantee we must provide, namely we
4148 * must ensure that CONDITION=1 done by the caller can not be reordered with
4149 * accesses to the task state; see try_to_wake_up() and set_current_state().
8643cda5
PZ
4150 */
4151
9ed3811a 4152/**
1da177e4 4153 * try_to_wake_up - wake up a thread
9ed3811a 4154 * @p: the thread to be awakened
1da177e4 4155 * @state: the mask of task states that can be woken
9ed3811a 4156 * @wake_flags: wake modifier flags (WF_*)
1da177e4 4157 *
58877d34
PZ
4158 * Conceptually does:
4159 *
4160 * If (@state & @p->state) @p->state = TASK_RUNNING.
1da177e4 4161 *
a2250238
PZ
4162 * If the task was not queued/runnable, also place it back on a runqueue.
4163 *
58877d34
PZ
4164 * This function is atomic against schedule() which would dequeue the task.
4165 *
4166 * It issues a full memory barrier before accessing @p->state, see the comment
4167 * with set_current_state().
a2250238 4168 *
58877d34 4169 * Uses p->pi_lock to serialize against concurrent wake-ups.
a2250238 4170 *
58877d34
PZ
4171 * Relies on p->pi_lock stabilizing:
4172 * - p->sched_class
4173 * - p->cpus_ptr
4174 * - p->sched_task_group
4175 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4176 *
4177 * Tries really hard to only take one task_rq(p)->lock for performance.
4178 * Takes rq->lock in:
4179 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4180 * - ttwu_queue() -- new rq, for enqueue of the task;
4181 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4182 *
4183 * As a consequence we race really badly with just about everything. See the
4184 * many memory barriers and their comments for details.
7696f991 4185 *
a2250238
PZ
4186 * Return: %true if @p->state changes (an actual wakeup was done),
4187 * %false otherwise.
1da177e4 4188 */
ab83f455 4189int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 4190{
857d315f 4191 guard(preempt)();
c05fbafb 4192 int cpu, success = 0;
2398f2c6 4193
b62933ee
TH
4194 wake_flags |= WF_TTWU;
4195
aacedf26
PZ
4196 if (p == current) {
4197 /*
4198 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4199 * == smp_processor_id()'. Together this means we can special
58877d34 4200 * case the whole 'p->on_rq && ttwu_runnable()' case below
aacedf26
PZ
4201 * without taking any locks.
4202 *
abc158c8
PZ
4203 * Specifically, given current runs ttwu() we must be before
4204 * schedule()'s block_task(), as such this must not observe
4205 * sched_delayed.
4206 *
aacedf26
PZ
4207 * In particular:
4208 * - we rely on Program-Order guarantees for all the ordering,
4209 * - we're serialized against set_special_state() by virtue of
4210 * it disabling IRQs (this allows not taking ->pi_lock).
4211 */
f7d2728c 4212 WARN_ON_ONCE(p->se.sched_delayed);
43295d73 4213 if (!ttwu_state_match(p, state, &success))
e3d85487 4214 goto out;
aacedf26 4215
aacedf26 4216 trace_sched_waking(p);
160fb0d8 4217 ttwu_do_wakeup(p);
aacedf26
PZ
4218 goto out;
4219 }
4220
e0acd0a6
ON
4221 /*
4222 * If we are going to wake up a thread waiting for CONDITION we
4223 * need to ensure that CONDITION=1 done by the caller can not be
58877d34
PZ
4224 * reordered with p->state check below. This pairs with smp_store_mb()
4225 * in set_current_state() that the waiting thread does.
e0acd0a6 4226 */
857d315f
PZ
4227 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4228 smp_mb__after_spinlock();
4229 if (!ttwu_state_match(p, state, &success))
4230 break;
1da177e4 4231
857d315f 4232 trace_sched_waking(p);
fbd705a0 4233
857d315f
PZ
4234 /*
4235 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4236 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4237 * in smp_cond_load_acquire() below.
4238 *
4239 * sched_ttwu_pending() try_to_wake_up()
4240 * STORE p->on_rq = 1 LOAD p->state
4241 * UNLOCK rq->lock
4242 *
4243 * __schedule() (switch to task 'p')
4244 * LOCK rq->lock smp_rmb();
4245 * smp_mb__after_spinlock();
4246 * UNLOCK rq->lock
4247 *
4248 * [task p]
4249 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4250 *
4251 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4252 * __schedule(). See the comment for smp_mb__after_spinlock().
4253 *
ea41bb51 4254 * A similar smp_rmb() lives in __task_needs_rq_lock().
857d315f
PZ
4255 */
4256 smp_rmb();
4257 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4258 break;
1da177e4 4259
1da177e4 4260#ifdef CONFIG_SMP
857d315f
PZ
4261 /*
4262 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4263 * possible to, falsely, observe p->on_cpu == 0.
4264 *
4265 * One must be running (->on_cpu == 1) in order to remove oneself
4266 * from the runqueue.
4267 *
4268 * __schedule() (switch to task 'p') try_to_wake_up()
4269 * STORE p->on_cpu = 1 LOAD p->on_rq
4270 * UNLOCK rq->lock
4271 *
4272 * __schedule() (put 'p' to sleep)
4273 * LOCK rq->lock smp_rmb();
4274 * smp_mb__after_spinlock();
4275 * STORE p->on_rq = 0 LOAD p->on_cpu
4276 *
4277 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4278 * __schedule(). See the comment for smp_mb__after_spinlock().
4279 *
4280 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4281 * schedule()'s deactivate_task() has 'happened' and p will no longer
4282 * care about it's own p->state. See the comment in __schedule().
4283 */
4284 smp_acquire__after_ctrl_dep();
dbfb089d 4285
857d315f
PZ
4286 /*
4287 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4288 * == 0), which means we need to do an enqueue, change p->state to
4289 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4290 * enqueue, such as ttwu_queue_wakelist().
4291 */
4292 WRITE_ONCE(p->__state, TASK_WAKING);
ecf7d01c 4293
857d315f
PZ
4294 /*
4295 * If the owning (remote) CPU is still in the middle of schedule() with
4296 * this task as prev, considering queueing p on the remote CPUs wake_list
4297 * which potentially sends an IPI instead of spinning on p->on_cpu to
4298 * let the waker make forward progress. This is safe because IRQs are
4299 * disabled and the IPI will deliver after on_cpu is cleared.
4300 *
4301 * Ensure we load task_cpu(p) after p->on_cpu:
4302 *
4303 * set_task_cpu(p, cpu);
4304 * STORE p->cpu = @cpu
4305 * __schedule() (switch to task 'p')
4306 * LOCK rq->lock
4307 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4308 * STORE p->on_cpu = 1 LOAD p->cpu
4309 *
4310 * to ensure we observe the correct CPU on which the task is currently
4311 * scheduling.
4312 */
4313 if (smp_load_acquire(&p->on_cpu) &&
4314 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4315 break;
c6e7bd7a 4316
857d315f
PZ
4317 /*
4318 * If the owning (remote) CPU is still in the middle of schedule() with
4319 * this task as prev, wait until it's done referencing the task.
4320 *
4321 * Pairs with the smp_store_release() in finish_task().
4322 *
4323 * This ensures that tasks getting woken will be fully ordered against
4324 * their previous state and preserve Program Order.
4325 */
4326 smp_cond_load_acquire(&p->on_cpu, !VAL);
1da177e4 4327
b62933ee 4328 cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
857d315f
PZ
4329 if (task_cpu(p) != cpu) {
4330 if (p->in_iowait) {
4331 delayacct_blkio_end(p);
4332 atomic_dec(&task_rq(p)->nr_iowait);
4333 }
ec618b84 4334
857d315f
PZ
4335 wake_flags |= WF_MIGRATED;
4336 psi_ttwu_dequeue(p);
4337 set_task_cpu(p, cpu);
4338 }
b6e13e85 4339#else
857d315f 4340 cpu = task_cpu(p);
1da177e4 4341#endif /* CONFIG_SMP */
1da177e4 4342
857d315f
PZ
4343 ttwu_queue(p, cpu, wake_flags);
4344 }
aacedf26
PZ
4345out:
4346 if (success)
b6e13e85 4347 ttwu_stat(p, task_cpu(p), wake_flags);
1da177e4
LT
4348
4349 return success;
4350}
4351
91dabf33
PZ
4352static bool __task_needs_rq_lock(struct task_struct *p)
4353{
4354 unsigned int state = READ_ONCE(p->__state);
4355
4356 /*
4357 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4358 * the task is blocked. Make sure to check @state since ttwu() can drop
4359 * locks at the end, see ttwu_queue_wakelist().
4360 */
4361 if (state == TASK_RUNNING || state == TASK_WAKING)
4362 return true;
4363
4364 /*
4365 * Ensure we load p->on_rq after p->__state, otherwise it would be
4366 * possible to, falsely, observe p->on_rq == 0.
4367 *
4368 * See try_to_wake_up() for a longer comment.
4369 */
4370 smp_rmb();
4371 if (p->on_rq)
4372 return true;
4373
4374#ifdef CONFIG_SMP
4375 /*
4376 * Ensure the task has finished __schedule() and will not be referenced
4377 * anymore. Again, see try_to_wake_up() for a longer comment.
4378 */
4379 smp_rmb();
4380 smp_cond_load_acquire(&p->on_cpu, !VAL);
4381#endif
4382
4383 return false;
4384}
4385
2beaf328 4386/**
9b3c4ab3 4387 * task_call_func - Invoke a function on task in fixed state
1b7af295 4388 * @p: Process for which the function is to be invoked, can be @current.
2beaf328
PM
4389 * @func: Function to invoke.
4390 * @arg: Argument to function.
4391 *
f6ac18fa 4392 * Fix the task in it's current state by avoiding wakeups and or rq operations
cd9626e9
PZ
4393 * and call @func(@arg) on it. This function can use task_is_runnable() and
4394 * task_curr() to work out what the state is, if required. Given that @func
4395 * can be invoked with a runqueue lock held, it had better be quite
4396 * lightweight.
2beaf328
PM
4397 *
4398 * Returns:
f6ac18fa 4399 * Whatever @func returns
2beaf328 4400 */
9b3c4ab3 4401int task_call_func(struct task_struct *p, task_call_f func, void *arg)
2beaf328 4402{
f6ac18fa 4403 struct rq *rq = NULL;
2beaf328 4404 struct rq_flags rf;
9b3c4ab3 4405 int ret;
2beaf328 4406
1b7af295 4407 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
f6ac18fa 4408
91dabf33 4409 if (__task_needs_rq_lock(p))
2beaf328 4410 rq = __task_rq_lock(p, &rf);
f6ac18fa
PZ
4411
4412 /*
4413 * At this point the task is pinned; either:
4414 * - blocked and we're holding off wakeups (pi->lock)
4415 * - woken, and we're holding off enqueue (rq->lock)
4416 * - queued, and we're holding off schedule (rq->lock)
4417 * - running, and we're holding off de-schedule (rq->lock)
4418 *
4419 * The called function (@func) can use: task_curr(), p->on_rq and
4420 * p->__state to differentiate between these states.
4421 */
4422 ret = func(p, arg);
4423
4424 if (rq)
2beaf328 4425 rq_unlock(rq, &rf);
f6ac18fa 4426
1b7af295 4427 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2beaf328
PM
4428 return ret;
4429}
4430
e386b672
PM
4431/**
4432 * cpu_curr_snapshot - Return a snapshot of the currently running task
4433 * @cpu: The CPU on which to snapshot the task.
4434 *
4435 * Returns the task_struct pointer of the task "currently" running on
399ced95 4436 * the specified CPU.
e386b672
PM
4437 *
4438 * If the specified CPU was offline, the return value is whatever it
4439 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4440 * task, but there is no guarantee. Callers wishing a useful return
4441 * value must take some action to ensure that the specified CPU remains
4442 * online throughout.
4443 *
4444 * This function executes full memory barriers before and after fetching
4445 * the pointer, which permits the caller to confine this function's fetch
4446 * with respect to the caller's accesses to other shared variables.
4447 */
4448struct task_struct *cpu_curr_snapshot(int cpu)
4449{
399ced95 4450 struct rq *rq = cpu_rq(cpu);
e386b672 4451 struct task_struct *t;
399ced95 4452 struct rq_flags rf;
e386b672 4453
399ced95
FW
4454 rq_lock_irqsave(rq, &rf);
4455 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
e386b672 4456 t = rcu_dereference(cpu_curr(cpu));
399ced95 4457 rq_unlock_irqrestore(rq, &rf);
e386b672 4458 smp_mb(); /* Pairing determined by caller's synchronization design. */
399ced95 4459
e386b672
PM
4460 return t;
4461}
4462
50fa610a
DH
4463/**
4464 * wake_up_process - Wake up a specific process
4465 * @p: The process to be woken up.
4466 *
4467 * Attempt to wake up the nominated process and move it to the set of runnable
e69f6186
YB
4468 * processes.
4469 *
4470 * Return: 1 if the process was woken up, 0 if it was already running.
50fa610a 4471 *
7696f991 4472 * This function executes a full memory barrier before accessing the task state.
50fa610a 4473 */
7ad5b3a5 4474int wake_up_process(struct task_struct *p)
1da177e4 4475{
9067ac85 4476 return try_to_wake_up(p, TASK_NORMAL, 0);
1da177e4 4477}
1da177e4
LT
4478EXPORT_SYMBOL(wake_up_process);
4479
7ad5b3a5 4480int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
4481{
4482 return try_to_wake_up(p, state, 0);
4483}
4484
1da177e4
LT
4485/*
4486 * Perform scheduler related setup for a newly forked process p.
4487 * p is forked by current.
dd41f596 4488 *
b23decf8
TG
4489 * __sched_fork() is basic setup which is also used by sched_init() to
4490 * initialize the boot CPU's idle task.
dd41f596 4491 */
5e1576ed 4492static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 4493{
fd2f4419
PZ
4494 p->on_rq = 0;
4495
4496 p->se.on_rq = 0;
dd41f596
IM
4497 p->se.exec_start = 0;
4498 p->se.sum_exec_runtime = 0;
f6cf891c 4499 p->se.prev_sum_exec_runtime = 0;
6c594c21 4500 p->se.nr_migrations = 0;
da7a735e 4501 p->se.vruntime = 0;
86bfbb7c 4502 p->se.vlag = 0;
fd2f4419 4503 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d 4504
abc158c8 4505 /* A delayed task cannot be in clone(). */
f7d2728c 4506 WARN_ON_ONCE(p->se.sched_delayed);
abc158c8 4507
ad936d86
BP
4508#ifdef CONFIG_FAIR_GROUP_SCHED
4509 p->se.cfs_rq = NULL;
4510#endif
4511
6cfb0d5d 4512#ifdef CONFIG_SCHEDSTATS
cb251765 4513 /* Even if schedstat is disabled, there should not be garbage */
ceeadb83 4514 memset(&p->stats, 0, sizeof(p->stats));
6cfb0d5d 4515#endif
476d139c 4516
9e07d45c 4517 init_dl_entity(&p->dl);
aab03e05 4518
fa717060 4519 INIT_LIST_HEAD(&p->rt.run_list);
ff77e468
PZ
4520 p->rt.timeout = 0;
4521 p->rt.time_slice = sched_rr_timeslice;
4522 p->rt.on_rq = 0;
4523 p->rt.on_list = 0;
476d139c 4524
f0e1a064
TH
4525#ifdef CONFIG_SCHED_CLASS_EXT
4526 init_scx_entity(&p->scx);
4527#endif
4528
e107be36
AK
4529#ifdef CONFIG_PREEMPT_NOTIFIERS
4530 INIT_HLIST_HEAD(&p->preempt_notifiers);
4531#endif
cbee9f88 4532
5e1f0f09
MG
4533#ifdef CONFIG_COMPACTION
4534 p->capture_control = NULL;
4535#endif
13784475 4536 init_numa_balancing(clone_flags, p);
a1488664 4537#ifdef CONFIG_SMP
8c4890d1 4538 p->wake_entry.u_flags = CSD_TYPE_TTWU;
6d337eab 4539 p->migration_pending = NULL;
a1488664 4540#endif
223baf9d 4541 init_sched_mm_cid(p);
dd41f596
IM
4542}
4543
2a595721
SD
4544DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4545
1a687c2e 4546#ifdef CONFIG_NUMA_BALANCING
c3b9bc5b 4547
c574bbe9
HY
4548int sysctl_numa_balancing_mode;
4549
4550static void __set_numabalancing_state(bool enabled)
1a687c2e
MG
4551{
4552 if (enabled)
2a595721 4553 static_branch_enable(&sched_numa_balancing);
1a687c2e 4554 else
2a595721 4555 static_branch_disable(&sched_numa_balancing);
1a687c2e 4556}
54a43d54 4557
c574bbe9
HY
4558void set_numabalancing_state(bool enabled)
4559{
4560 if (enabled)
4561 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4562 else
4563 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4564 __set_numabalancing_state(enabled);
4565}
4566
54a43d54 4567#ifdef CONFIG_PROC_SYSCTL
c959924b
HY
4568static void reset_memory_tiering(void)
4569{
4570 struct pglist_data *pgdat;
4571
4572 for_each_online_pgdat(pgdat) {
4573 pgdat->nbp_threshold = 0;
4574 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4575 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4576 }
4577}
4578
78eb4ea2 4579static int sysctl_numa_balancing(const struct ctl_table *table, int write,
32927393 4580 void *buffer, size_t *lenp, loff_t *ppos)
54a43d54
AK
4581{
4582 struct ctl_table t;
4583 int err;
c574bbe9 4584 int state = sysctl_numa_balancing_mode;
54a43d54
AK
4585
4586 if (write && !capable(CAP_SYS_ADMIN))
4587 return -EPERM;
4588
4589 t = *table;
4590 t.data = &state;
4591 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4592 if (err < 0)
4593 return err;
c574bbe9 4594 if (write) {
c959924b
HY
4595 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4596 (state & NUMA_BALANCING_MEMORY_TIERING))
4597 reset_memory_tiering();
c574bbe9
HY
4598 sysctl_numa_balancing_mode = state;
4599 __set_numabalancing_state(state);
4600 }
54a43d54
AK
4601 return err;
4602}
4603#endif
4604#endif
dd41f596 4605
4698f88c
JP
4606#ifdef CONFIG_SCHEDSTATS
4607
cb251765
MG
4608DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4609
cb251765
MG
4610static void set_schedstats(bool enabled)
4611{
4612 if (enabled)
4613 static_branch_enable(&sched_schedstats);
4614 else
4615 static_branch_disable(&sched_schedstats);
4616}
4617
4618void force_schedstat_enabled(void)
4619{
4620 if (!schedstat_enabled()) {
4621 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4622 static_branch_enable(&sched_schedstats);
4623 }
4624}
4625
4626static int __init setup_schedstats(char *str)
4627{
4628 int ret = 0;
4629 if (!str)
4630 goto out;
4631
4632 if (!strcmp(str, "enable")) {
1faa491a 4633 set_schedstats(true);
cb251765
MG
4634 ret = 1;
4635 } else if (!strcmp(str, "disable")) {
1faa491a 4636 set_schedstats(false);
cb251765
MG
4637 ret = 1;
4638 }
4639out:
4640 if (!ret)
4641 pr_warn("Unable to parse schedstats=\n");
4642
4643 return ret;
4644}
4645__setup("schedstats=", setup_schedstats);
4646
4647#ifdef CONFIG_PROC_SYSCTL
78eb4ea2 4648static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
32927393 4649 size_t *lenp, loff_t *ppos)
cb251765
MG
4650{
4651 struct ctl_table t;
4652 int err;
4653 int state = static_branch_likely(&sched_schedstats);
4654
4655 if (write && !capable(CAP_SYS_ADMIN))
4656 return -EPERM;
4657
4658 t = *table;
4659 t.data = &state;
4660 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4661 if (err < 0)
4662 return err;
4663 if (write)
4664 set_schedstats(state);
4665 return err;
4666}
4698f88c 4667#endif /* CONFIG_PROC_SYSCTL */
4698f88c 4668#endif /* CONFIG_SCHEDSTATS */
dd41f596 4669
3267e015 4670#ifdef CONFIG_SYSCTL
1751f872 4671static const struct ctl_table sched_core_sysctls[] = {
3267e015 4672#ifdef CONFIG_SCHEDSTATS
f5ef06d5
ZN
4673 {
4674 .procname = "sched_schedstats",
4675 .data = NULL,
4676 .maxlen = sizeof(unsigned int),
4677 .mode = 0644,
4678 .proc_handler = sysctl_schedstats,
4679 .extra1 = SYSCTL_ZERO,
4680 .extra2 = SYSCTL_ONE,
4681 },
3267e015
ZN
4682#endif /* CONFIG_SCHEDSTATS */
4683#ifdef CONFIG_UCLAMP_TASK
4684 {
4685 .procname = "sched_util_clamp_min",
4686 .data = &sysctl_sched_uclamp_util_min,
4687 .maxlen = sizeof(unsigned int),
4688 .mode = 0644,
4689 .proc_handler = sysctl_sched_uclamp_handler,
4690 },
4691 {
4692 .procname = "sched_util_clamp_max",
4693 .data = &sysctl_sched_uclamp_util_max,
4694 .maxlen = sizeof(unsigned int),
4695 .mode = 0644,
4696 .proc_handler = sysctl_sched_uclamp_handler,
4697 },
4698 {
4699 .procname = "sched_util_clamp_min_rt_default",
4700 .data = &sysctl_sched_uclamp_util_min_rt_default,
4701 .maxlen = sizeof(unsigned int),
4702 .mode = 0644,
4703 .proc_handler = sysctl_sched_uclamp_handler,
4704 },
4705#endif /* CONFIG_UCLAMP_TASK */
0dff89c4
KW
4706#ifdef CONFIG_NUMA_BALANCING
4707 {
4708 .procname = "numa_balancing",
4709 .data = NULL, /* filled in by handler */
4710 .maxlen = sizeof(unsigned int),
4711 .mode = 0644,
4712 .proc_handler = sysctl_numa_balancing,
4713 .extra1 = SYSCTL_ZERO,
4714 .extra2 = SYSCTL_FOUR,
4715 },
4716#endif /* CONFIG_NUMA_BALANCING */
f5ef06d5 4717};
3267e015 4718static int __init sched_core_sysctl_init(void)
f5ef06d5 4719{
3267e015 4720 register_sysctl_init("kernel", sched_core_sysctls);
f5ef06d5
ZN
4721 return 0;
4722}
3267e015
ZN
4723late_initcall(sched_core_sysctl_init);
4724#endif /* CONFIG_SYSCTL */
dd41f596
IM
4725
4726/*
4727 * fork()/clone()-time setup:
4728 */
aab03e05 4729int sched_fork(unsigned long clone_flags, struct task_struct *p)
dd41f596 4730{
5e1576ed 4731 __sched_fork(clone_flags, p);
06b83b5f 4732 /*
7dc603c9 4733 * We mark the process as NEW here. This guarantees that
06b83b5f
PZ
4734 * nobody will actually run it, and a signal or other external
4735 * event cannot wake it up and insert it on the runqueue either.
4736 */
2f064a59 4737 p->__state = TASK_NEW;
dd41f596 4738
c350a04e
MG
4739 /*
4740 * Make sure we do not leak PI boosting priority to the child.
4741 */
4742 p->prio = current->normal_prio;
4743
e8f14172
PB
4744 uclamp_fork(p);
4745
b9dc29e7
MG
4746 /*
4747 * Revert to default priority/policy on fork if requested.
4748 */
4749 if (unlikely(p->sched_reset_on_fork)) {
aab03e05 4750 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
b9dc29e7 4751 p->policy = SCHED_NORMAL;
6c697bdf 4752 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
4753 p->rt_priority = 0;
4754 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4755 p->static_prio = NICE_TO_PRIO(0);
4756
f558c2b8 4757 p->prio = p->normal_prio = p->static_prio;
b1e82065 4758 set_load_weight(p, false);
857b158d
PZ
4759 p->se.custom_slice = 0;
4760 p->se.slice = sysctl_sched_base_slice;
6c697bdf 4761
b9dc29e7
MG
4762 /*
4763 * We don't need the reset flag anymore after the fork. It has
4764 * fulfilled its duty:
4765 */
4766 p->sched_reset_on_fork = 0;
4767 }
ca94c442 4768
af0fffd9 4769 if (dl_prio(p->prio))
aab03e05 4770 return -EAGAIN;
60564acb 4771
a7a9fc54
TH
4772 scx_pre_fork(p);
4773
60564acb 4774 if (rt_prio(p->prio)) {
aab03e05 4775 p->sched_class = &rt_sched_class;
f0e1a064 4776#ifdef CONFIG_SCHED_CLASS_EXT
5db91545 4777 } else if (task_should_scx(p->policy)) {
f0e1a064
TH
4778 p->sched_class = &ext_sched_class;
4779#endif
a7a9fc54 4780 } else {
2ddbf952 4781 p->sched_class = &fair_sched_class;
a7a9fc54 4782 }
b29739f9 4783
7dc603c9 4784 init_entity_runnable_average(&p->se);
cd29fe6f 4785
b1e82065 4786
f6db8347 4787#ifdef CONFIG_SCHED_INFO
dd41f596 4788 if (likely(sched_info_on()))
52f17b6c 4789 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 4790#endif
3ca7a440
PZ
4791#if defined(CONFIG_SMP)
4792 p->on_cpu = 0;
4866cde0 4793#endif
01028747 4794 init_task_preempt_count(p);
806c09a7 4795#ifdef CONFIG_SMP
917b627d 4796 plist_node_init(&p->pushable_tasks, MAX_PRIO);
1baca4ce 4797 RB_CLEAR_NODE(&p->pushable_dl_tasks);
806c09a7 4798#endif
aab03e05 4799 return 0;
1da177e4
LT
4800}
4801
304b3f2b 4802int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
13685c4a 4803{
4ef0c5c6 4804 unsigned long flags;
4ef0c5c6 4805
b1e82065
PZ
4806 /*
4807 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4808 * required yet, but lockdep gets upset if rules are violated.
4809 */
4ef0c5c6
ZQ
4810 raw_spin_lock_irqsave(&p->pi_lock, flags);
4811#ifdef CONFIG_CGROUP_SCHED
b1e82065
PZ
4812 if (1) {
4813 struct task_group *tg;
4814 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4815 struct task_group, css);
4816 tg = autogroup_task_group(p, tg);
4817 p->sched_task_group = tg;
4818 }
4ef0c5c6
ZQ
4819#endif
4820 rseq_migrate(p);
4821 /*
4822 * We're setting the CPU for the first time, we don't migrate,
4823 * so use __set_task_cpu().
4824 */
4825 __set_task_cpu(p, smp_processor_id());
4826 if (p->sched_class->task_fork)
4827 p->sched_class->task_fork(p);
4828 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
304b3f2b 4829
a7a9fc54 4830 return scx_fork(p);
304b3f2b
TH
4831}
4832
4833void sched_cancel_fork(struct task_struct *p)
4834{
a7a9fc54 4835 scx_cancel_fork(p);
b1e82065 4836}
4ef0c5c6 4837
b1e82065
PZ
4838void sched_post_fork(struct task_struct *p)
4839{
13685c4a 4840 uclamp_post_fork(p);
a7a9fc54 4841 scx_post_fork(p);
13685c4a
QY
4842}
4843
332ac17e
DF
4844unsigned long to_ratio(u64 period, u64 runtime)
4845{
4846 if (runtime == RUNTIME_INF)
c52f14d3 4847 return BW_UNIT;
332ac17e
DF
4848
4849 /*
4850 * Doing this here saves a lot of checks in all
4851 * the calling paths, and returning zero seems
4852 * safe for them anyway.
4853 */
4854 if (period == 0)
4855 return 0;
4856
c52f14d3 4857 return div64_u64(runtime << BW_SHIFT, period);
332ac17e
DF
4858}
4859
1da177e4
LT
4860/*
4861 * wake_up_new_task - wake up a newly created task for the first time.
4862 *
4863 * This function will do some initial scheduler statistics housekeeping
4864 * that must be done for every newly created context, then puts the task
4865 * on the runqueue and wakes it.
4866 */
3e51e3ed 4867void wake_up_new_task(struct task_struct *p)
1da177e4 4868{
eb580751 4869 struct rq_flags rf;
dd41f596 4870 struct rq *rq;
b62933ee 4871 int wake_flags = WF_FORK;
fabf318e 4872
eb580751 4873 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2f064a59 4874 WRITE_ONCE(p->__state, TASK_RUNNING);
fabf318e
PZ
4875#ifdef CONFIG_SMP
4876 /*
4877 * Fork balancing, do it here and not earlier because:
3bd37062 4878 * - cpus_ptr can change in the fork path
d1ccc66d 4879 * - any previously selected CPU might disappear through hotplug
e210bffd
PZ
4880 *
4881 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4882 * as we're not fully set-up yet.
fabf318e 4883 */
32e839dd 4884 p->recent_used_cpu = task_cpu(p);
ce3614da 4885 rseq_migrate(p);
b62933ee 4886 __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
0017d735 4887#endif
b7fa30c9 4888 rq = __task_rq_lock(p, &rf);
4126bad6 4889 update_rq_clock(rq);
d0fe0b9c 4890 post_init_entity_util_avg(p);
0017d735 4891
c40dd90a 4892 activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
fbd705a0 4893 trace_sched_wakeup_new(p);
b62933ee 4894 wakeup_preempt(rq, p, wake_flags);
9a897c5a 4895#ifdef CONFIG_SMP
0aaafaab
PZ
4896 if (p->sched_class->task_woken) {
4897 /*
b19a888c 4898 * Nothing relies on rq->lock after this, so it's fine to
0aaafaab
PZ
4899 * drop it.
4900 */
d8ac8971 4901 rq_unpin_lock(rq, &rf);
efbbd05a 4902 p->sched_class->task_woken(rq, p);
d8ac8971 4903 rq_repin_lock(rq, &rf);
0aaafaab 4904 }
9a897c5a 4905#endif
eb580751 4906 task_rq_unlock(rq, p, &rf);
1da177e4
LT
4907}
4908
e107be36
AK
4909#ifdef CONFIG_PREEMPT_NOTIFIERS
4910
b7203428 4911static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
1cde2930 4912
2ecd9d29
PZ
4913void preempt_notifier_inc(void)
4914{
b7203428 4915 static_branch_inc(&preempt_notifier_key);
2ecd9d29
PZ
4916}
4917EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4918
4919void preempt_notifier_dec(void)
4920{
b7203428 4921 static_branch_dec(&preempt_notifier_key);
2ecd9d29
PZ
4922}
4923EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4924
e107be36 4925/**
80dd99b3 4926 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 4927 * @notifier: notifier struct to register
e107be36
AK
4928 */
4929void preempt_notifier_register(struct preempt_notifier *notifier)
4930{
b7203428 4931 if (!static_branch_unlikely(&preempt_notifier_key))
2ecd9d29
PZ
4932 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4933
e107be36
AK
4934 hlist_add_head(&notifier->link, &current->preempt_notifiers);
4935}
4936EXPORT_SYMBOL_GPL(preempt_notifier_register);
4937
4938/**
4939 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 4940 * @notifier: notifier struct to unregister
e107be36 4941 *
d84525a8 4942 * This is *not* safe to call from within a preemption notifier.
e107be36
AK
4943 */
4944void preempt_notifier_unregister(struct preempt_notifier *notifier)
4945{
4946 hlist_del(&notifier->link);
4947}
4948EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4949
1cde2930 4950static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
4951{
4952 struct preempt_notifier *notifier;
e107be36 4953
b67bfe0d 4954 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
4955 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4956}
4957
1cde2930
PZ
4958static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4959{
b7203428 4960 if (static_branch_unlikely(&preempt_notifier_key))
1cde2930
PZ
4961 __fire_sched_in_preempt_notifiers(curr);
4962}
4963
e107be36 4964static void
1cde2930
PZ
4965__fire_sched_out_preempt_notifiers(struct task_struct *curr,
4966 struct task_struct *next)
e107be36
AK
4967{
4968 struct preempt_notifier *notifier;
e107be36 4969
b67bfe0d 4970 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
4971 notifier->ops->sched_out(notifier, next);
4972}
4973
1cde2930
PZ
4974static __always_inline void
4975fire_sched_out_preempt_notifiers(struct task_struct *curr,
4976 struct task_struct *next)
4977{
b7203428 4978 if (static_branch_unlikely(&preempt_notifier_key))
1cde2930
PZ
4979 __fire_sched_out_preempt_notifiers(curr, next);
4980}
4981
6d6bc0ad 4982#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36 4983
1cde2930 4984static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
e107be36
AK
4985{
4986}
4987
1cde2930 4988static inline void
e107be36
AK
4989fire_sched_out_preempt_notifiers(struct task_struct *curr,
4990 struct task_struct *next)
4991{
4992}
4993
6d6bc0ad 4994#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 4995
31cb1bc0 4996static inline void prepare_task(struct task_struct *next)
4997{
4998#ifdef CONFIG_SMP
4999 /*
5000 * Claim the task as running, we do this before switching to it
5001 * such that any running task will have this set.
58877d34 5002 *
f3dd3f67
TD
5003 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
5004 * its ordering comment.
31cb1bc0 5005 */
58877d34 5006 WRITE_ONCE(next->on_cpu, 1);
31cb1bc0 5007#endif
5008}
5009
5010static inline void finish_task(struct task_struct *prev)
5011{
5012#ifdef CONFIG_SMP
5013 /*
58877d34
PZ
5014 * This must be the very last reference to @prev from this CPU. After
5015 * p->on_cpu is cleared, the task can be moved to a different CPU. We
5016 * must ensure this doesn't happen until the switch is completely
31cb1bc0 5017 * finished.
5018 *
5019 * In particular, the load of prev->state in finish_task_switch() must
5020 * happen before this.
5021 *
5022 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
5023 */
5024 smp_store_release(&prev->on_cpu, 0);
5025#endif
5026}
5027
565790d2
PZ
5028#ifdef CONFIG_SMP
5029
8e5bad7d 5030static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
565790d2
PZ
5031{
5032 void (*func)(struct rq *rq);
8e5bad7d 5033 struct balance_callback *next;
565790d2 5034
5cb9eaa3 5035 lockdep_assert_rq_held(rq);
565790d2
PZ
5036
5037 while (head) {
5038 func = (void (*)(struct rq *))head->func;
5039 next = head->next;
5040 head->next = NULL;
5041 head = next;
5042
5043 func(rq);
5044 }
5045}
5046
ae792702
PZ
5047static void balance_push(struct rq *rq);
5048
04193d59
PZ
5049/*
5050 * balance_push_callback is a right abuse of the callback interface and plays
5051 * by significantly different rules.
5052 *
5053 * Where the normal balance_callback's purpose is to be ran in the same context
5054 * that queued it (only later, when it's safe to drop rq->lock again),
5055 * balance_push_callback is specifically targeted at __schedule().
5056 *
5057 * This abuse is tolerated because it places all the unlikely/odd cases behind
5058 * a single test, namely: rq->balance_callback == NULL.
5059 */
8e5bad7d 5060struct balance_callback balance_push_callback = {
ae792702 5061 .next = NULL,
8e5bad7d 5062 .func = balance_push,
ae792702
PZ
5063};
5064
8e5bad7d 5065static inline struct balance_callback *
04193d59 5066__splice_balance_callbacks(struct rq *rq, bool split)
565790d2 5067{
8e5bad7d 5068 struct balance_callback *head = rq->balance_callback;
565790d2 5069
04193d59
PZ
5070 if (likely(!head))
5071 return NULL;
5072
5cb9eaa3 5073 lockdep_assert_rq_held(rq);
04193d59
PZ
5074 /*
5075 * Must not take balance_push_callback off the list when
5076 * splice_balance_callbacks() and balance_callbacks() are not
5077 * in the same rq->lock section.
5078 *
5079 * In that case it would be possible for __schedule() to interleave
5080 * and observe the list empty.
5081 */
5082 if (split && head == &balance_push_callback)
5083 head = NULL;
5084 else
565790d2
PZ
5085 rq->balance_callback = NULL;
5086
5087 return head;
5088}
5089
04746ed8 5090struct balance_callback *splice_balance_callbacks(struct rq *rq)
04193d59
PZ
5091{
5092 return __splice_balance_callbacks(rq, true);
5093}
5094
565790d2
PZ
5095static void __balance_callbacks(struct rq *rq)
5096{
04193d59 5097 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
565790d2
PZ
5098}
5099
04746ed8 5100void balance_callbacks(struct rq *rq, struct balance_callback *head)
565790d2
PZ
5101{
5102 unsigned long flags;
5103
5104 if (unlikely(head)) {
5cb9eaa3 5105 raw_spin_rq_lock_irqsave(rq, flags);
565790d2 5106 do_balance_callbacks(rq, head);
5cb9eaa3 5107 raw_spin_rq_unlock_irqrestore(rq, flags);
565790d2
PZ
5108 }
5109}
5110
5111#else
5112
5113static inline void __balance_callbacks(struct rq *rq)
5114{
5115}
5116
565790d2
PZ
5117#endif
5118
269d5992
PZ
5119static inline void
5120prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
31cb1bc0 5121{
269d5992
PZ
5122 /*
5123 * Since the runqueue lock will be released by the next
5124 * task (which is an invalid locking op but in the case
5125 * of the scheduler it's an obvious special-case), so we
5126 * do an early lockdep release here:
5127 */
5128 rq_unpin_lock(rq, rf);
9ef7e7e3 5129 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
31cb1bc0 5130#ifdef CONFIG_DEBUG_SPINLOCK
5131 /* this is a valid case when another task releases the spinlock */
5cb9eaa3 5132 rq_lockp(rq)->owner = next;
31cb1bc0 5133#endif
269d5992
PZ
5134}
5135
5136static inline void finish_lock_switch(struct rq *rq)
5137{
31cb1bc0 5138 /*
5139 * If we are tracking spinlock dependencies then we have to
5140 * fix up the runqueue lock - which gets 'carried over' from
5141 * prev into current:
5142 */
9ef7e7e3 5143 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
ae792702 5144 __balance_callbacks(rq);
5cb9eaa3 5145 raw_spin_rq_unlock_irq(rq);
31cb1bc0 5146}
5147
325ea10c
IM
5148/*
5149 * NOP if the arch has not defined these:
5150 */
5151
5152#ifndef prepare_arch_switch
5153# define prepare_arch_switch(next) do { } while (0)
5154#endif
5155
5156#ifndef finish_arch_post_lock_switch
5157# define finish_arch_post_lock_switch() do { } while (0)
5158#endif
5159
5fbda3ec
TG
5160static inline void kmap_local_sched_out(void)
5161{
5162#ifdef CONFIG_KMAP_LOCAL
5163 if (unlikely(current->kmap_ctrl.idx))
5164 __kmap_local_sched_out();
5165#endif
5166}
5167
5168static inline void kmap_local_sched_in(void)
5169{
5170#ifdef CONFIG_KMAP_LOCAL
5171 if (unlikely(current->kmap_ctrl.idx))
5172 __kmap_local_sched_in();
5173#endif
5174}
5175
4866cde0
NP
5176/**
5177 * prepare_task_switch - prepare to switch tasks
5178 * @rq: the runqueue preparing to switch
421cee29 5179 * @prev: the current task that is being switched out
4866cde0
NP
5180 * @next: the task we are going to switch to.
5181 *
5182 * This is called with the rq lock held and interrupts off. It must
5183 * be paired with a subsequent finish_task_switch after the context
5184 * switch.
5185 *
5186 * prepare_task_switch sets up locking and calls architecture specific
5187 * hooks.
5188 */
e107be36
AK
5189static inline void
5190prepare_task_switch(struct rq *rq, struct task_struct *prev,
5191 struct task_struct *next)
4866cde0 5192{
0ed557aa 5193 kcov_prepare_switch(prev);
43148951 5194 sched_info_switch(rq, prev, next);
fe4b04fa 5195 perf_event_task_sched_out(prev, next);
d7822b1e 5196 rseq_preempt(prev);
e107be36 5197 fire_sched_out_preempt_notifiers(prev, next);
5fbda3ec 5198 kmap_local_sched_out();
31cb1bc0 5199 prepare_task(next);
4866cde0
NP
5200 prepare_arch_switch(next);
5201}
5202
1da177e4
LT
5203/**
5204 * finish_task_switch - clean up after a task-switch
5205 * @prev: the thread we just switched away from.
5206 *
4866cde0
NP
5207 * finish_task_switch must be called after the context switch, paired
5208 * with a prepare_task_switch call before the context switch.
5209 * finish_task_switch will reconcile locking set up by prepare_task_switch,
5210 * and do any other architecture-specific cleanup actions.
1da177e4
LT
5211 *
5212 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 5213 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
5214 * with the lock held can cause deadlocks; see schedule() for
5215 * details.)
dfa50b60
ON
5216 *
5217 * The context switch have flipped the stack from under us and restored the
5218 * local variables which were saved when this task called schedule() in the
402de7fc 5219 * past. 'prev == current' is still correct but we need to recalculate this_rq
dfa50b60 5220 * because prev may have moved to another CPU.
1da177e4 5221 */
dfa50b60 5222static struct rq *finish_task_switch(struct task_struct *prev)
1da177e4
LT
5223 __releases(rq->lock)
5224{
dfa50b60 5225 struct rq *rq = this_rq();
1da177e4 5226 struct mm_struct *mm = rq->prev_mm;
fa2c3254 5227 unsigned int prev_state;
1da177e4 5228
609ca066
PZ
5229 /*
5230 * The previous task will have left us with a preempt_count of 2
5231 * because it left us after:
5232 *
5233 * schedule()
5234 * preempt_disable(); // 1
5235 * __schedule()
5236 * raw_spin_lock_irq(&rq->lock) // 2
5237 *
5238 * Also, see FORK_PREEMPT_COUNT.
5239 */
e2bf1c4b
PZ
5240 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5241 "corrupted preempt_count: %s/%d/0x%x\n",
5242 current->comm, current->pid, preempt_count()))
5243 preempt_count_set(FORK_PREEMPT_COUNT);
609ca066 5244
1da177e4
LT
5245 rq->prev_mm = NULL;
5246
5247 /*
5248 * A task struct has one reference for the use as "current".
c394cc9f 5249 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
5250 * schedule one last time. The schedule call will never return, and
5251 * the scheduled task must drop that reference.
95913d97
PZ
5252 *
5253 * We must observe prev->state before clearing prev->on_cpu (in
31cb1bc0 5254 * finish_task), otherwise a concurrent wakeup can get prev
95913d97
PZ
5255 * running on another CPU and we could rave with its RUNNING -> DEAD
5256 * transition, resulting in a double drop.
1da177e4 5257 */
2f064a59 5258 prev_state = READ_ONCE(prev->__state);
bf9fae9f 5259 vtime_task_switch(prev);
a8d757ef 5260 perf_event_task_sched_in(prev, current);
31cb1bc0 5261 finish_task(prev);
0fdcccfa 5262 tick_nohz_task_switch();
31cb1bc0 5263 finish_lock_switch(rq);
01f23e16 5264 finish_arch_post_lock_switch();
0ed557aa 5265 kcov_finish_switch(current);
5fbda3ec
TG
5266 /*
5267 * kmap_local_sched_out() is invoked with rq::lock held and
5268 * interrupts disabled. There is no requirement for that, but the
5269 * sched out code does not have an interrupt enabled section.
5270 * Restoring the maps on sched in does not require interrupts being
5271 * disabled either.
5272 */
5273 kmap_local_sched_in();
e8fa1362 5274
e107be36 5275 fire_sched_in_preempt_notifiers(current);
306e0604 5276 /*
70216e18
MD
5277 * When switching through a kernel thread, the loop in
5278 * membarrier_{private,global}_expedited() may have observed that
5279 * kernel thread and not issued an IPI. It is therefore possible to
5280 * schedule between user->kernel->user threads without passing though
5281 * switch_mm(). Membarrier requires a barrier after storing to
5282 * rq->curr, before returning to userspace, so provide them here:
5283 *
5284 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
aa464ba9 5285 * provided by mmdrop_lazy_tlb(),
70216e18 5286 * - a sync_core for SYNC_CORE.
306e0604 5287 */
70216e18
MD
5288 if (mm) {
5289 membarrier_mm_sync_core_before_usermode(mm);
aa464ba9 5290 mmdrop_lazy_tlb_sched(mm);
70216e18 5291 }
aa464ba9 5292
1cef1150
PZ
5293 if (unlikely(prev_state == TASK_DEAD)) {
5294 if (prev->sched_class->task_dead)
5295 prev->sched_class->task_dead(prev);
68f24b08 5296
1cef1150
PZ
5297 /* Task is done with its stack. */
5298 put_task_stack(prev);
5299
0ff7b2cf 5300 put_task_struct_rcu_user(prev);
c6fd91f0 5301 }
99e5ada9 5302
dfa50b60 5303 return rq;
1da177e4
LT
5304}
5305
5306/**
5307 * schedule_tail - first thing a freshly forked thread must call.
5308 * @prev: the thread we just switched away from.
5309 */
722a9f92 5310asmlinkage __visible void schedule_tail(struct task_struct *prev)
1da177e4
LT
5311 __releases(rq->lock)
5312{
609ca066
PZ
5313 /*
5314 * New tasks start with FORK_PREEMPT_COUNT, see there and
5315 * finish_task_switch() for details.
5316 *
5317 * finish_task_switch() will drop rq->lock() and lower preempt_count
5318 * and the preempt_enable() will end up enabling preemption (on
5319 * PREEMPT_COUNT kernels).
5320 */
5321
13c2235b 5322 finish_task_switch(prev);
26f80681
GM
5323 /*
5324 * This is a special case: the newly created task has just
5325 * switched the context for the first time. It is returning from
5326 * schedule for the first time in this path.
5327 */
5328 trace_sched_exit_tp(true, CALLER_ADDR0);
1a43a14a 5329 preempt_enable();
70b97a7f 5330
1da177e4 5331 if (current->set_child_tid)
b488893a 5332 put_user(task_pid_vnr(current), current->set_child_tid);
088fe47c
EB
5333
5334 calculate_sigpending();
1da177e4
LT
5335}
5336
5337/*
dfa50b60 5338 * context_switch - switch to the new MM and the new thread's register state.
1da177e4 5339 */
04936948 5340static __always_inline struct rq *
70b97a7f 5341context_switch(struct rq *rq, struct task_struct *prev,
d8ac8971 5342 struct task_struct *next, struct rq_flags *rf)
1da177e4 5343{
e107be36 5344 prepare_task_switch(rq, prev, next);
fe4b04fa 5345
9226d125
ZA
5346 /*
5347 * For paravirt, this is coupled with an exit in switch_to to
5348 * combine the page table reload and the switch backend into
5349 * one hypercall.
5350 */
224101ed 5351 arch_start_context_switch(prev);
9226d125 5352
306e0604 5353 /*
139d025c 5354 * kernel -> kernel lazy + transfer active
aa464ba9 5355 * user -> kernel lazy + mmgrab_lazy_tlb() active
139d025c 5356 *
aa464ba9 5357 * kernel -> user switch + mmdrop_lazy_tlb() active
139d025c 5358 * user -> user switch
223baf9d
MD
5359 *
5360 * switch_mm_cid() needs to be updated if the barriers provided
5361 * by context_switch() are modified.
306e0604 5362 */
139d025c
PZ
5363 if (!next->mm) { // to kernel
5364 enter_lazy_tlb(prev->active_mm, next);
5365
5366 next->active_mm = prev->active_mm;
5367 if (prev->mm) // from user
aa464ba9 5368 mmgrab_lazy_tlb(prev->active_mm);
139d025c
PZ
5369 else
5370 prev->active_mm = NULL;
5371 } else { // to user
227a4aad 5372 membarrier_switch_mm(rq, prev->active_mm, next->mm);
139d025c
PZ
5373 /*
5374 * sys_membarrier() requires an smp_mb() between setting
227a4aad 5375 * rq->curr / membarrier_switch_mm() and returning to userspace.
139d025c
PZ
5376 *
5377 * The below provides this either through switch_mm(), or in
5378 * case 'prev->active_mm == next->mm' through
5379 * finish_task_switch()'s mmdrop().
5380 */
139d025c 5381 switch_mm_irqs_off(prev->active_mm, next->mm, next);
bd74fdae 5382 lru_gen_use_mm(next->mm);
1da177e4 5383
139d025c 5384 if (!prev->mm) { // from kernel
aa464ba9 5385 /* will mmdrop_lazy_tlb() in finish_task_switch(). */
139d025c
PZ
5386 rq->prev_mm = prev->active_mm;
5387 prev->active_mm = NULL;
5388 }
1da177e4 5389 }
92509b73 5390
223baf9d
MD
5391 /* switch_mm_cid() requires the memory barriers above. */
5392 switch_mm_cid(rq, prev, next);
5393
269d5992 5394 prepare_lock_switch(rq, next, rf);
1da177e4
LT
5395
5396 /* Here we just switch the register state and the stack. */
5397 switch_to(prev, next, prev);
dd41f596 5398 barrier();
dfa50b60
ON
5399
5400 return finish_task_switch(prev);
1da177e4
LT
5401}
5402
5403/*
1c3e8264 5404 * nr_running and nr_context_switches:
1da177e4
LT
5405 *
5406 * externally visible scheduler statistics: current number of runnable
1c3e8264 5407 * threads, total number of context switches performed since bootup.
1da177e4 5408 */
01aee8fd 5409unsigned int nr_running(void)
1da177e4 5410{
01aee8fd 5411 unsigned int i, sum = 0;
1da177e4
LT
5412
5413 for_each_online_cpu(i)
5414 sum += cpu_rq(i)->nr_running;
5415
5416 return sum;
f711f609 5417}
1da177e4 5418
2ee507c4 5419/*
d1ccc66d 5420 * Check if only the current task is running on the CPU.
00cc1633
DD
5421 *
5422 * Caution: this function does not check that the caller has disabled
5423 * preemption, thus the result might have a time-of-check-to-time-of-use
5424 * race. The caller is responsible to use it correctly, for example:
5425 *
dfcb245e 5426 * - from a non-preemptible section (of course)
00cc1633
DD
5427 *
5428 * - from a thread that is bound to a single CPU
5429 *
5430 * - in a loop with very short iterations (e.g. a polling loop)
2ee507c4
TC
5431 */
5432bool single_task_running(void)
5433{
00cc1633 5434 return raw_rq()->nr_running == 1;
2ee507c4
TC
5435}
5436EXPORT_SYMBOL(single_task_running);
5437
7c182722
ZL
5438unsigned long long nr_context_switches_cpu(int cpu)
5439{
5440 return cpu_rq(cpu)->nr_switches;
5441}
5442
1da177e4 5443unsigned long long nr_context_switches(void)
46cb4b7c 5444{
cc94abfc
SR
5445 int i;
5446 unsigned long long sum = 0;
46cb4b7c 5447
0a945022 5448 for_each_possible_cpu(i)
1da177e4 5449 sum += cpu_rq(i)->nr_switches;
46cb4b7c 5450
1da177e4
LT
5451 return sum;
5452}
483b4ee6 5453
145d952a
DL
5454/*
5455 * Consumers of these two interfaces, like for example the cpuidle menu
5456 * governor, are using nonsensical data. Preferring shallow idle state selection
5457 * for a CPU that has IO-wait which might not even end up running the task when
5458 * it does become runnable.
5459 */
5460
8fc2858e 5461unsigned int nr_iowait_cpu(int cpu)
145d952a
DL
5462{
5463 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5464}
5465
e33a9bba 5466/*
b19a888c 5467 * IO-wait accounting, and how it's mostly bollocks (on SMP).
e33a9bba
TH
5468 *
5469 * The idea behind IO-wait account is to account the idle time that we could
5470 * have spend running if it were not for IO. That is, if we were to improve the
5471 * storage performance, we'd have a proportional reduction in IO-wait time.
5472 *
5473 * This all works nicely on UP, where, when a task blocks on IO, we account
5474 * idle time as IO-wait, because if the storage were faster, it could've been
5475 * running and we'd not be idle.
5476 *
5477 * This has been extended to SMP, by doing the same for each CPU. This however
5478 * is broken.
5479 *
5480 * Imagine for instance the case where two tasks block on one CPU, only the one
5481 * CPU will have IO-wait accounted, while the other has regular idle. Even
5482 * though, if the storage were faster, both could've ran at the same time,
5483 * utilising both CPUs.
5484 *
5485 * This means, that when looking globally, the current IO-wait accounting on
5486 * SMP is a lower bound, by reason of under accounting.
5487 *
5488 * Worse, since the numbers are provided per CPU, they are sometimes
5489 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5490 * associated with any one particular CPU, it can wake to another CPU than it
5491 * blocked on. This means the per CPU IO-wait number is meaningless.
5492 *
5493 * Task CPU affinities can make all that even more 'interesting'.
5494 */
5495
97455168 5496unsigned int nr_iowait(void)
1da177e4 5497{
97455168 5498 unsigned int i, sum = 0;
483b4ee6 5499
0a945022 5500 for_each_possible_cpu(i)
145d952a 5501 sum += nr_iowait_cpu(i);
46cb4b7c 5502
1da177e4
LT
5503 return sum;
5504}
483b4ee6 5505
dd41f596 5506#ifdef CONFIG_SMP
8a0be9ef 5507
46cb4b7c 5508/*
38022906
PZ
5509 * sched_exec - execve() is a valuable balancing opportunity, because at
5510 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 5511 */
38022906 5512void sched_exec(void)
46cb4b7c 5513{
38022906 5514 struct task_struct *p = current;
4bdada79 5515 struct migration_arg arg;
0017d735 5516 int dest_cpu;
46cb4b7c 5517
4bdada79
PZ
5518 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5519 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5520 if (dest_cpu == smp_processor_id())
5521 return;
38022906 5522
4bdada79
PZ
5523 if (unlikely(!cpu_active(dest_cpu)))
5524 return;
46cb4b7c 5525
4bdada79 5526 arg = (struct migration_arg){ p, dest_cpu };
1da177e4 5527 }
4bdada79 5528 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4 5529}
dd41f596 5530
1da177e4
LT
5531#endif
5532
1da177e4 5533DEFINE_PER_CPU(struct kernel_stat, kstat);
3292beb3 5534DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
1da177e4
LT
5535
5536EXPORT_PER_CPU_SYMBOL(kstat);
3292beb3 5537EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
1da177e4 5538
6075620b
GG
5539/*
5540 * The function fair_sched_class.update_curr accesses the struct curr
5541 * and its field curr->exec_start; when called from task_sched_runtime(),
5542 * we observe a high rate of cache misses in practice.
5543 * Prefetching this data results in improved performance.
5544 */
5545static inline void prefetch_curr_exec_start(struct task_struct *p)
5546{
5547#ifdef CONFIG_FAIR_GROUP_SCHED
85c9a8f4 5548 struct sched_entity *curr = p->se.cfs_rq->curr;
6075620b 5549#else
85c9a8f4 5550 struct sched_entity *curr = task_rq(p)->cfs.curr;
6075620b
GG
5551#endif
5552 prefetch(curr);
5553 prefetch(&curr->exec_start);
5554}
5555
c5f8d995
HS
5556/*
5557 * Return accounted runtime for the task.
5558 * In case the task is currently running, return the runtime plus current's
5559 * pending runtime that have not been accounted yet.
5560 */
5561unsigned long long task_sched_runtime(struct task_struct *p)
5562{
eb580751 5563 struct rq_flags rf;
c5f8d995 5564 struct rq *rq;
6e998916 5565 u64 ns;
c5f8d995 5566
911b2898
PZ
5567#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5568 /*
97fb7a0a 5569 * 64-bit doesn't need locks to atomically read a 64-bit value.
911b2898 5570 * So we have a optimization chance when the task's delta_exec is 0.
402de7fc 5571 * Reading ->on_cpu is racy, but this is OK.
911b2898 5572 *
d1ccc66d
IM
5573 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5574 * If we race with it entering CPU, unaccounted time is 0. This is
911b2898 5575 * indistinguishable from the read occurring a few cycles earlier.
4036ac15
MG
5576 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5577 * been accounted, so we're correct here as well.
911b2898 5578 */
da0c1e65 5579 if (!p->on_cpu || !task_on_rq_queued(p))
911b2898
PZ
5580 return p->se.sum_exec_runtime;
5581#endif
5582
eb580751 5583 rq = task_rq_lock(p, &rf);
6e998916
SG
5584 /*
5585 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5586 * project cycles that may never be accounted to this
5587 * thread, breaking clock_gettime().
5588 */
af0c8b2b 5589 if (task_current_donor(rq, p) && task_on_rq_queued(p)) {
6075620b 5590 prefetch_curr_exec_start(p);
6e998916
SG
5591 update_rq_clock(rq);
5592 p->sched_class->update_curr(rq);
5593 }
5594 ns = p->se.sum_exec_runtime;
eb580751 5595 task_rq_unlock(rq, p, &rf);
c5f8d995
HS
5596
5597 return ns;
5598}
48f24c4d 5599
c006fac5
PT
5600static u64 cpu_resched_latency(struct rq *rq)
5601{
5602 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5603 u64 resched_latency, now = rq_clock(rq);
5604 static bool warned_once;
5605
5606 if (sysctl_resched_latency_warn_once && warned_once)
5607 return 0;
5608
5609 if (!need_resched() || !latency_warn_ms)
5610 return 0;
5611
5612 if (system_state == SYSTEM_BOOTING)
5613 return 0;
5614
5615 if (!rq->last_seen_need_resched_ns) {
5616 rq->last_seen_need_resched_ns = now;
5617 rq->ticks_without_resched = 0;
5618 return 0;
5619 }
5620
5621 rq->ticks_without_resched++;
5622 resched_latency = now - rq->last_seen_need_resched_ns;
5623 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5624 return 0;
5625
5626 warned_once = true;
5627
5628 return resched_latency;
5629}
5630
5631static int __init setup_resched_latency_warn_ms(char *str)
5632{
5633 long val;
5634
5635 if ((kstrtol(str, 0, &val))) {
5636 pr_warn("Unable to set resched_latency_warn_ms\n");
5637 return 1;
5638 }
5639
5640 sysctl_resched_latency_warn_ms = val;
5641 return 1;
5642}
5643__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
c006fac5 5644
7835b98b
CL
5645/*
5646 * This function gets called by the timer code, with HZ frequency.
5647 * We call it with interrupts disabled.
7835b98b 5648 */
86dd6c04 5649void sched_tick(void)
7835b98b 5650{
7835b98b
CL
5651 int cpu = smp_processor_id();
5652 struct rq *rq = cpu_rq(cpu);
af0c8b2b
PZ
5653 /* accounting goes to the donor task */
5654 struct task_struct *donor;
8a8c69c3 5655 struct rq_flags rf;
d4dbc991 5656 unsigned long hw_pressure;
c006fac5 5657 u64 resched_latency;
3e51f33f 5658
c907cd44 5659 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
7fb3ff22
YP
5660 arch_scale_freq_tick();
5661
3e51f33f 5662 sched_clock_tick();
dd41f596 5663
8a8c69c3 5664 rq_lock(rq, &rf);
af0c8b2b 5665 donor = rq->donor;
8a8c69c3 5666
af0c8b2b 5667 psi_account_irqtime(rq, donor, NULL);
ddae0ca2 5668
3e51f33f 5669 update_rq_clock(rq);
d4dbc991 5670 hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
97450eb9 5671 update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
7c70cb94
PZ
5672
5673 if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY))
5674 resched_curr(rq);
5675
af0c8b2b 5676 donor->sched_class->task_tick(rq, donor, 0);
c006fac5
PT
5677 if (sched_feat(LATENCY_WARN))
5678 resched_latency = cpu_resched_latency(rq);
3289bdb4 5679 calc_global_load_tick(rq);
4feee7d1 5680 sched_core_tick(rq);
af0c8b2b 5681 task_tick_mm_cid(rq, donor);
8a010b81 5682 scx_tick(rq);
8a8c69c3
PZ
5683
5684 rq_unlock(rq, &rf);
7835b98b 5685
c006fac5
PT
5686 if (sched_feat(LATENCY_WARN) && resched_latency)
5687 resched_latency_warn(cpu, resched_latency);
5688
e9d2b064 5689 perf_event_task_tick();
e220d2dc 5690
af0c8b2b
PZ
5691 if (donor->flags & PF_WQ_WORKER)
5692 wq_worker_tick(donor);
616db877 5693
e418e1c2 5694#ifdef CONFIG_SMP
f0e1a064
TH
5695 if (!scx_switched_all()) {
5696 rq->idle_balance = idle_cpu(cpu);
5697 sched_balance_trigger(rq);
5698 }
e418e1c2 5699#endif
1da177e4
LT
5700}
5701
265f22a9 5702#ifdef CONFIG_NO_HZ_FULL
d84b3131
FW
5703
5704struct tick_work {
5705 int cpu;
b55bd585 5706 atomic_t state;
d84b3131
FW
5707 struct delayed_work work;
5708};
b55bd585
PM
5709/* Values for ->state, see diagram below. */
5710#define TICK_SCHED_REMOTE_OFFLINE 0
5711#define TICK_SCHED_REMOTE_OFFLINING 1
5712#define TICK_SCHED_REMOTE_RUNNING 2
5713
5714/*
5715 * State diagram for ->state:
5716 *
5717 *
5718 * TICK_SCHED_REMOTE_OFFLINE
5719 * | ^
5720 * | |
5721 * | | sched_tick_remote()
5722 * | |
5723 * | |
5724 * +--TICK_SCHED_REMOTE_OFFLINING
5725 * | ^
5726 * | |
5727 * sched_tick_start() | | sched_tick_stop()
5728 * | |
5729 * V |
5730 * TICK_SCHED_REMOTE_RUNNING
5731 *
5732 *
5733 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5734 * and sched_tick_start() are happy to leave the state in RUNNING.
5735 */
d84b3131
FW
5736
5737static struct tick_work __percpu *tick_work_cpu;
5738
5739static void sched_tick_remote(struct work_struct *work)
5740{
5741 struct delayed_work *dwork = to_delayed_work(work);
5742 struct tick_work *twork = container_of(dwork, struct tick_work, work);
5743 int cpu = twork->cpu;
5744 struct rq *rq = cpu_rq(cpu);
b55bd585 5745 int os;
d84b3131
FW
5746
5747 /*
5748 * Handle the tick only if it appears the remote CPU is running in full
5749 * dynticks mode. The check is racy by nature, but missing a tick or
5750 * having one too much is no big deal because the scheduler tick updates
5751 * statistics and checks timeslices in a time-independent way, regardless
5752 * of when exactly it is running.
5753 */
6dafc713
PZ
5754 if (tick_nohz_tick_stopped_cpu(cpu)) {
5755 guard(rq_lock_irq)(rq);
5756 struct task_struct *curr = rq->curr;
d84b3131 5757
6dafc713 5758 if (cpu_online(cpu)) {
af0c8b2b
PZ
5759 /*
5760 * Since this is a remote tick for full dynticks mode,
5761 * we are always sure that there is no proxy (only a
5762 * single task is running).
5763 */
f7d2728c 5764 WARN_ON_ONCE(rq->curr != rq->donor);
6dafc713 5765 update_rq_clock(rq);
d84b3131 5766
6dafc713
PZ
5767 if (!is_idle_task(curr)) {
5768 /*
5769 * Make sure the next tick runs within a
5770 * reasonable amount of time.
5771 */
5772 u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5773 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5774 }
5775 curr->sched_class->task_tick(rq, curr, 0);
d9c0ffca 5776
6dafc713
PZ
5777 calc_load_nohz_remote(rq);
5778 }
488603b8 5779 }
ebc0f83c 5780
d84b3131
FW
5781 /*
5782 * Run the remote tick once per second (1Hz). This arbitrary
5783 * frequency is large enough to avoid overload but short enough
b55bd585
PM
5784 * to keep scheduler internal stats reasonably up to date. But
5785 * first update state to reflect hotplug activity if required.
d84b3131 5786 */
b55bd585
PM
5787 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5788 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5789 if (os == TICK_SCHED_REMOTE_RUNNING)
5790 queue_delayed_work(system_unbound_wq, dwork, HZ);
d84b3131
FW
5791}
5792
5793static void sched_tick_start(int cpu)
5794{
b55bd585 5795 int os;
d84b3131
FW
5796 struct tick_work *twork;
5797
c907cd44 5798 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
d84b3131
FW
5799 return;
5800
5801 WARN_ON_ONCE(!tick_work_cpu);
5802
5803 twork = per_cpu_ptr(tick_work_cpu, cpu);
b55bd585
PM
5804 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5805 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5806 if (os == TICK_SCHED_REMOTE_OFFLINE) {
5807 twork->cpu = cpu;
5808 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5809 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5810 }
d84b3131
FW
5811}
5812
5813#ifdef CONFIG_HOTPLUG_CPU
5814static void sched_tick_stop(int cpu)
5815{
5816 struct tick_work *twork;
b55bd585 5817 int os;
d84b3131 5818
c907cd44 5819 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
d84b3131
FW
5820 return;
5821
5822 WARN_ON_ONCE(!tick_work_cpu);
5823
5824 twork = per_cpu_ptr(tick_work_cpu, cpu);
b55bd585
PM
5825 /* There cannot be competing actions, but don't rely on stop-machine. */
5826 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5827 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5828 /* Don't cancel, as this would mess up the state machine. */
d84b3131
FW
5829}
5830#endif /* CONFIG_HOTPLUG_CPU */
5831
5832int __init sched_tick_offload_init(void)
5833{
5834 tick_work_cpu = alloc_percpu(struct tick_work);
5835 BUG_ON(!tick_work_cpu);
d84b3131
FW
5836 return 0;
5837}
5838
5839#else /* !CONFIG_NO_HZ_FULL */
5840static inline void sched_tick_start(int cpu) { }
5841static inline void sched_tick_stop(int cpu) { }
265f22a9 5842#endif
1da177e4 5843
c1a280b6 5844#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
c3bc8fd6 5845 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
47252cfb
SR
5846/*
5847 * If the value passed in is equal to the current preempt count
5848 * then we just disabled preemption. Start timing the latency.
5849 */
5850static inline void preempt_latency_start(int val)
5851{
5852 if (preempt_count() == val) {
5853 unsigned long ip = get_lock_parent_ip();
5854#ifdef CONFIG_DEBUG_PREEMPT
5855 current->preempt_disable_ip = ip;
5856#endif
5857 trace_preempt_off(CALLER_ADDR0, ip);
5858 }
5859}
7e49fcce 5860
edafe3a5 5861void preempt_count_add(int val)
1da177e4 5862{
6cd8a4bb 5863#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
5864 /*
5865 * Underflow?
5866 */
9a11b49a
IM
5867 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5868 return;
6cd8a4bb 5869#endif
bdb43806 5870 __preempt_count_add(val);
6cd8a4bb 5871#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
5872 /*
5873 * Spinlock count overflowing soon?
5874 */
33859f7f
MOS
5875 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5876 PREEMPT_MASK - 10);
6cd8a4bb 5877#endif
47252cfb 5878 preempt_latency_start(val);
1da177e4 5879}
bdb43806 5880EXPORT_SYMBOL(preempt_count_add);
edafe3a5 5881NOKPROBE_SYMBOL(preempt_count_add);
1da177e4 5882
47252cfb
SR
5883/*
5884 * If the value passed in equals to the current preempt count
5885 * then we just enabled preemption. Stop timing the latency.
5886 */
5887static inline void preempt_latency_stop(int val)
5888{
5889 if (preempt_count() == val)
5890 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5891}
5892
edafe3a5 5893void preempt_count_sub(int val)
1da177e4 5894{
6cd8a4bb 5895#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
5896 /*
5897 * Underflow?
5898 */
01e3eb82 5899 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 5900 return;
1da177e4
LT
5901 /*
5902 * Is the spinlock portion underflowing?
5903 */
9a11b49a
IM
5904 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5905 !(preempt_count() & PREEMPT_MASK)))
5906 return;
6cd8a4bb 5907#endif
9a11b49a 5908
47252cfb 5909 preempt_latency_stop(val);
bdb43806 5910 __preempt_count_sub(val);
1da177e4 5911}
bdb43806 5912EXPORT_SYMBOL(preempt_count_sub);
edafe3a5 5913NOKPROBE_SYMBOL(preempt_count_sub);
1da177e4 5914
47252cfb
SR
5915#else
5916static inline void preempt_latency_start(int val) { }
5917static inline void preempt_latency_stop(int val) { }
1da177e4
LT
5918#endif
5919
59ddbcb2
IM
5920static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5921{
5922#ifdef CONFIG_DEBUG_PREEMPT
5923 return p->preempt_disable_ip;
5924#else
5925 return 0;
5926#endif
5927}
5928
1da177e4 5929/*
dd41f596 5930 * Print scheduling while atomic bug:
1da177e4 5931 */
dd41f596 5932static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 5933{
d1c6d149
VN
5934 /* Save this before calling printk(), since that will clobber it */
5935 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5936
664dfa65
DJ
5937 if (oops_in_progress)
5938 return;
5939
3df0fc5b
PZ
5940 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5941 prev->comm, prev->pid, preempt_count());
838225b4 5942
dd41f596 5943 debug_show_held_locks(prev);
e21f5b15 5944 print_modules();
dd41f596
IM
5945 if (irqs_disabled())
5946 print_irqtrace_events(prev);
dc461c48 5947 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
8f47b187 5948 pr_err("Preemption disabled at:");
2062a4e8 5949 print_ip_sym(KERN_ERR, preempt_disable_ip);
8f47b187 5950 }
79cc1ba7 5951 check_panic_on_warn("scheduling while atomic");
748c7201 5952
6135fc1e 5953 dump_stack();
373d4d09 5954 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
dd41f596 5955}
1da177e4 5956
dd41f596
IM
5957/*
5958 * Various schedule()-time debugging checks and statistics:
5959 */
312364f3 5960static inline void schedule_debug(struct task_struct *prev, bool preempt)
dd41f596 5961{
0d9e2632 5962#ifdef CONFIG_SCHED_STACK_END_CHECK
29d64551
JH
5963 if (task_stack_end_corrupted(prev))
5964 panic("corrupted stack end detected inside scheduler\n");
88485be5
WD
5965
5966 if (task_scs_end_corrupted(prev))
5967 panic("corrupted shadow stack detected inside scheduler\n");
0d9e2632 5968#endif
b99def8b 5969
312364f3 5970#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
2f064a59 5971 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
312364f3
DV
5972 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5973 prev->comm, prev->pid, prev->non_block_count);
5974 dump_stack();
5975 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5976 }
5977#endif
5978
1dc0fffc 5979 if (unlikely(in_atomic_preempt_off())) {
dd41f596 5980 __schedule_bug(prev);
1dc0fffc
PZ
5981 preempt_count_set(PREEMPT_DISABLED);
5982 }
b3fbab05 5983 rcu_sleep_check();
f7d2728c 5984 WARN_ON_ONCE(ct_state() == CT_STATE_USER);
dd41f596 5985
1da177e4
LT
5986 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5987
ae92882e 5988 schedstat_inc(this_rq()->sched_count);
dd41f596
IM
5989}
5990
260598f1
PZ
5991static void prev_balance(struct rq *rq, struct task_struct *prev,
5992 struct rq_flags *rf)
457d1f46 5993{
744d8360 5994 const struct sched_class *start_class = prev->sched_class;
457d1f46 5995 const struct sched_class *class;
744d8360
TH
5996
5997#ifdef CONFIG_SCHED_CLASS_EXT
5998 /*
a6250aa2
TH
5999 * SCX requires a balance() call before every pick_task() including when
6000 * waking up from SCHED_IDLE. If @start_class is below SCX, start from
6001 * SCX instead. Also, set a flag to detect missing balance() call.
744d8360 6002 */
a6250aa2
TH
6003 if (scx_enabled()) {
6004 rq->scx.flags |= SCX_RQ_BAL_PENDING;
6005 if (sched_class_above(&ext_sched_class, start_class))
6006 start_class = &ext_sched_class;
6007 }
744d8360
TH
6008#endif
6009
457d1f46
CY
6010 /*
6011 * We must do the balancing pass before put_prev_task(), such
6012 * that when we release the rq->lock the task is in the same
6013 * state as before we took rq->lock.
6014 *
6015 * We can terminate the balance pass as soon as we know there is
6016 * a runnable task of @class priority or higher.
6017 */
744d8360 6018 for_active_class_range(class, start_class, &idle_sched_class) {
a735d43c 6019 if (class->balance && class->balance(rq, prev, rf))
457d1f46
CY
6020 break;
6021 }
457d1f46
CY
6022}
6023
dd41f596
IM
6024/*
6025 * Pick up the highest-prio task:
6026 */
6027static inline struct task_struct *
539f6512 6028__pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
dd41f596 6029{
49ee5768 6030 const struct sched_class *class;
dd41f596 6031 struct task_struct *p;
1da177e4 6032
bd9bbc96
PZ
6033 rq->dl_server = NULL;
6034
a7a9fc54
TH
6035 if (scx_enabled())
6036 goto restart;
6037
1da177e4 6038 /*
0ba87bb2
PZ
6039 * Optimization: we know that if all tasks are in the fair class we can
6040 * call that function directly, but only if the @prev task wasn't of a
b19a888c 6041 * higher scheduling class, because otherwise those lose the
0ba87bb2 6042 * opportunity to pull in more work from other CPUs.
1da177e4 6043 */
546a3fee 6044 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
7b8a702d 6045 rq->nr_running == rq->cfs.h_nr_queued)) {
0ba87bb2 6046
5d7d6056 6047 p = pick_next_task_fair(rq, prev, rf);
6ccdc84b 6048 if (unlikely(p == RETRY_TASK))
67692435 6049 goto restart;
6ccdc84b 6050
1699949d 6051 /* Assume the next prioritized class is idle_sched_class */
5d7d6056 6052 if (!p) {
fd03c5b8 6053 p = pick_task_idle(rq);
436f3eed 6054 put_prev_set_next_task(rq, prev, p);
f488e105 6055 }
6ccdc84b
PZ
6056
6057 return p;
1da177e4
LT
6058 }
6059
67692435 6060restart:
260598f1 6061 prev_balance(rq, prev, rf);
63ba8422 6062
a7a9fc54 6063 for_each_active_class(class) {
fd03c5b8
PZ
6064 if (class->pick_next_task) {
6065 p = class->pick_next_task(rq, prev);
6066 if (p)
6067 return p;
6068 } else {
6069 p = class->pick_task(rq);
6070 if (p) {
436f3eed 6071 put_prev_set_next_task(rq, prev, p);
fd03c5b8
PZ
6072 return p;
6073 }
6074 }
dd41f596 6075 }
34f971f6 6076
bc9ffef3 6077 BUG(); /* The idle class should always have a runnable task. */
dd41f596 6078}
1da177e4 6079
9edeaea1 6080#ifdef CONFIG_SCHED_CORE
539f6512
PZ
6081static inline bool is_task_rq_idle(struct task_struct *t)
6082{
6083 return (task_rq(t)->idle == t);
6084}
6085
6086static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
6087{
6088 return is_task_rq_idle(a) || (a->core_cookie == cookie);
6089}
6090
6091static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6092{
6093 if (is_task_rq_idle(a) || is_task_rq_idle(b))
6094 return true;
6095
6096 return a->core_cookie == b->core_cookie;
6097}
6098
bc9ffef3 6099static inline struct task_struct *pick_task(struct rq *rq)
539f6512 6100{
bc9ffef3
PZ
6101 const struct sched_class *class;
6102 struct task_struct *p;
539f6512 6103
bd9bbc96
PZ
6104 rq->dl_server = NULL;
6105
a7a9fc54 6106 for_each_active_class(class) {
bc9ffef3
PZ
6107 p = class->pick_task(rq);
6108 if (p)
6109 return p;
539f6512
PZ
6110 }
6111
bc9ffef3 6112 BUG(); /* The idle class should always have a runnable task. */
539f6512
PZ
6113}
6114
c6047c2e
JFG
6115extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6116
5b6547ed
PZ
6117static void queue_core_balance(struct rq *rq);
6118
539f6512
PZ
6119static struct task_struct *
6120pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6121{
bc9ffef3 6122 struct task_struct *next, *p, *max = NULL;
539f6512 6123 const struct cpumask *smt_mask;
c6047c2e 6124 bool fi_before = false;
4feee7d1 6125 bool core_clock_updated = (rq == rq->core);
bc9ffef3
PZ
6126 unsigned long cookie;
6127 int i, cpu, occ = 0;
6128 struct rq *rq_i;
539f6512 6129 bool need_sync;
539f6512
PZ
6130
6131 if (!sched_core_enabled(rq))
6132 return __pick_next_task(rq, prev, rf);
6133
6134 cpu = cpu_of(rq);
6135
6136 /* Stopper task is switching into idle, no need core-wide selection. */
6137 if (cpu_is_offline(cpu)) {
6138 /*
6139 * Reset core_pick so that we don't enter the fastpath when
6140 * coming online. core_pick would already be migrated to
6141 * another cpu during offline.
6142 */
6143 rq->core_pick = NULL;
bd9bbc96 6144 rq->core_dl_server = NULL;
539f6512
PZ
6145 return __pick_next_task(rq, prev, rf);
6146 }
6147
6148 /*
6149 * If there were no {en,de}queues since we picked (IOW, the task
6150 * pointers are all still valid), and we haven't scheduled the last
6151 * pick yet, do so now.
6152 *
6153 * rq->core_pick can be NULL if no selection was made for a CPU because
6154 * it was either offline or went offline during a sibling's core-wide
6155 * selection. In this case, do a core-wide selection.
6156 */
6157 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6158 rq->core->core_pick_seq != rq->core_sched_seq &&
6159 rq->core_pick) {
6160 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6161
6162 next = rq->core_pick;
bd9bbc96 6163 rq->dl_server = rq->core_dl_server;
539f6512 6164 rq->core_pick = NULL;
bd9bbc96 6165 rq->core_dl_server = NULL;
436f3eed 6166 goto out_set_next;
539f6512
PZ
6167 }
6168
260598f1 6169 prev_balance(rq, prev, rf);
539f6512
PZ
6170
6171 smt_mask = cpu_smt_mask(cpu);
7afbba11
JFG
6172 need_sync = !!rq->core->core_cookie;
6173
6174 /* reset state */
6175 rq->core->core_cookie = 0UL;
4feee7d1
JD
6176 if (rq->core->core_forceidle_count) {
6177 if (!core_clock_updated) {
6178 update_rq_clock(rq->core);
6179 core_clock_updated = true;
6180 }
6181 sched_core_account_forceidle(rq);
6182 /* reset after accounting force idle */
6183 rq->core->core_forceidle_start = 0;
6184 rq->core->core_forceidle_count = 0;
6185 rq->core->core_forceidle_occupation = 0;
7afbba11
JFG
6186 need_sync = true;
6187 fi_before = true;
7afbba11 6188 }
539f6512
PZ
6189
6190 /*
6191 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6192 *
6193 * @task_seq guards the task state ({en,de}queues)
6194 * @pick_seq is the @task_seq we did a selection on
6195 * @sched_seq is the @pick_seq we scheduled
6196 *
6197 * However, preemptions can cause multiple picks on the same task set.
6198 * 'Fix' this by also increasing @task_seq for every pick.
6199 */
6200 rq->core->core_task_seq++;
539f6512 6201
7afbba11
JFG
6202 /*
6203 * Optimize for common case where this CPU has no cookies
6204 * and there are no cookied tasks running on siblings.
6205 */
6206 if (!need_sync) {
bc9ffef3 6207 next = pick_task(rq);
7afbba11
JFG
6208 if (!next->core_cookie) {
6209 rq->core_pick = NULL;
bd9bbc96 6210 rq->core_dl_server = NULL;
c6047c2e
JFG
6211 /*
6212 * For robustness, update the min_vruntime_fi for
6213 * unconstrained picks as well.
6214 */
6215 WARN_ON_ONCE(fi_before);
6216 task_vruntime_update(rq, next, false);
5b6547ed 6217 goto out_set_next;
7afbba11 6218 }
8039e96f 6219 }
7afbba11 6220
bc9ffef3
PZ
6221 /*
6222 * For each thread: do the regular task pick and find the max prio task
6223 * amongst them.
6224 *
6225 * Tie-break prio towards the current CPU
6226 */
6227 for_each_cpu_wrap(i, smt_mask, cpu) {
6228 rq_i = cpu_rq(i);
539f6512 6229
4feee7d1
JD
6230 /*
6231 * Current cpu always has its clock updated on entrance to
6232 * pick_next_task(). If the current cpu is not the core,
6233 * the core may also have been updated above.
6234 */
6235 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
539f6512 6236 update_rq_clock(rq_i);
bc9ffef3 6237
bd9bbc96
PZ
6238 rq_i->core_pick = p = pick_task(rq_i);
6239 rq_i->core_dl_server = rq_i->dl_server;
6240
bc9ffef3
PZ
6241 if (!max || prio_less(max, p, fi_before))
6242 max = p;
539f6512
PZ
6243 }
6244
bc9ffef3
PZ
6245 cookie = rq->core->core_cookie = max->core_cookie;
6246
539f6512 6247 /*
bc9ffef3
PZ
6248 * For each thread: try and find a runnable task that matches @max or
6249 * force idle.
539f6512 6250 */
bc9ffef3
PZ
6251 for_each_cpu(i, smt_mask) {
6252 rq_i = cpu_rq(i);
6253 p = rq_i->core_pick;
539f6512 6254
bc9ffef3
PZ
6255 if (!cookie_equals(p, cookie)) {
6256 p = NULL;
6257 if (cookie)
6258 p = sched_core_find(rq_i, cookie);
7afbba11 6259 if (!p)
bc9ffef3
PZ
6260 p = idle_sched_class.pick_task(rq_i);
6261 }
539f6512 6262
bc9ffef3 6263 rq_i->core_pick = p;
bd9bbc96 6264 rq_i->core_dl_server = NULL;
d2dfa17b 6265
bc9ffef3
PZ
6266 if (p == rq_i->idle) {
6267 if (rq_i->nr_running) {
4feee7d1 6268 rq->core->core_forceidle_count++;
c6047c2e
JFG
6269 if (!fi_before)
6270 rq->core->core_forceidle_seq++;
6271 }
bc9ffef3
PZ
6272 } else {
6273 occ++;
539f6512 6274 }
539f6512
PZ
6275 }
6276
4feee7d1 6277 if (schedstat_enabled() && rq->core->core_forceidle_count) {
b171501f 6278 rq->core->core_forceidle_start = rq_clock(rq->core);
4feee7d1
JD
6279 rq->core->core_forceidle_occupation = occ;
6280 }
6281
539f6512
PZ
6282 rq->core->core_pick_seq = rq->core->core_task_seq;
6283 next = rq->core_pick;
6284 rq->core_sched_seq = rq->core->core_pick_seq;
6285
6286 /* Something should have been selected for current CPU */
6287 WARN_ON_ONCE(!next);
6288
6289 /*
6290 * Reschedule siblings
6291 *
6292 * NOTE: L1TF -- at this point we're no longer running the old task and
6293 * sending an IPI (below) ensures the sibling will no longer be running
6294 * their task. This ensures there is no inter-sibling overlap between
6295 * non-matching user state.
6296 */
6297 for_each_cpu(i, smt_mask) {
bc9ffef3 6298 rq_i = cpu_rq(i);
539f6512
PZ
6299
6300 /*
6301 * An online sibling might have gone offline before a task
6302 * could be picked for it, or it might be offline but later
6303 * happen to come online, but its too late and nothing was
6304 * picked for it. That's Ok - it will pick tasks for itself,
6305 * so ignore it.
6306 */
6307 if (!rq_i->core_pick)
6308 continue;
6309
c6047c2e
JFG
6310 /*
6311 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6312 * fi_before fi update?
6313 * 0 0 1
6314 * 0 1 1
6315 * 1 0 1
6316 * 1 1 0
6317 */
4feee7d1
JD
6318 if (!(fi_before && rq->core->core_forceidle_count))
6319 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
539f6512 6320
d2dfa17b
PZ
6321 rq_i->core_pick->core_occupation = occ;
6322
539f6512
PZ
6323 if (i == cpu) {
6324 rq_i->core_pick = NULL;
bd9bbc96 6325 rq_i->core_dl_server = NULL;
539f6512
PZ
6326 continue;
6327 }
6328
6329 /* Did we break L1TF mitigation requirements? */
6330 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6331
6332 if (rq_i->curr == rq_i->core_pick) {
6333 rq_i->core_pick = NULL;
bd9bbc96 6334 rq_i->core_dl_server = NULL;
539f6512
PZ
6335 continue;
6336 }
6337
6338 resched_curr(rq_i);
6339 }
6340
5b6547ed 6341out_set_next:
436f3eed 6342 put_prev_set_next_task(rq, prev, next);
5b6547ed
PZ
6343 if (rq->core->core_forceidle_count && next == rq->idle)
6344 queue_core_balance(rq);
6345
539f6512
PZ
6346 return next;
6347}
9edeaea1 6348
d2dfa17b
PZ
6349static bool try_steal_cookie(int this, int that)
6350{
6351 struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6352 struct task_struct *p;
6353 unsigned long cookie;
6354 bool success = false;
6355
b4e1fa1e
PZ
6356 guard(irq)();
6357 guard(double_rq_lock)(dst, src);
d2dfa17b
PZ
6358
6359 cookie = dst->core->core_cookie;
6360 if (!cookie)
b4e1fa1e 6361 return false;
d2dfa17b
PZ
6362
6363 if (dst->curr != dst->idle)
b4e1fa1e 6364 return false;
d2dfa17b
PZ
6365
6366 p = sched_core_find(src, cookie);
530bfad1 6367 if (!p)
b4e1fa1e 6368 return false;
d2dfa17b
PZ
6369
6370 do {
6371 if (p == src->core_pick || p == src->curr)
6372 goto next;
6373
386ef214 6374 if (!is_cpu_allowed(p, this))
d2dfa17b
PZ
6375 goto next;
6376
6377 if (p->core_occupation > dst->idle->core_occupation)
6378 goto next;
530bfad1 6379 /*
b4e1fa1e
PZ
6380 * sched_core_find() and sched_core_next() will ensure
6381 * that task @p is not throttled now, we also need to
6382 * check whether the runqueue of the destination CPU is
6383 * being throttled.
530bfad1
HJ
6384 */
6385 if (sched_task_is_throttled(p, this))
6386 goto next;
d2dfa17b 6387
2b05a0b4 6388 move_queued_task_locked(src, dst, p);
d2dfa17b
PZ
6389 resched_curr(dst);
6390
6391 success = true;
6392 break;
6393
6394next:
6395 p = sched_core_next(p, cookie);
6396 } while (p);
6397
d2dfa17b
PZ
6398 return success;
6399}
6400
6401static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6402{
6403 int i;
6404
8589018a 6405 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
d2dfa17b
PZ
6406 if (i == cpu)
6407 continue;
6408
6409 if (need_resched())
6410 break;
6411
6412 if (try_steal_cookie(cpu, i))
6413 return true;
6414 }
6415
6416 return false;
6417}
6418
6419static void sched_core_balance(struct rq *rq)
6420{
6421 struct sched_domain *sd;
6422 int cpu = cpu_of(rq);
6423
0e34600a
PZ
6424 guard(preempt)();
6425 guard(rcu)();
6426
d2dfa17b
PZ
6427 raw_spin_rq_unlock_irq(rq);
6428 for_each_domain(cpu, sd) {
6429 if (need_resched())
6430 break;
6431
6432 if (steal_cookie_task(cpu, sd))
6433 break;
6434 }
6435 raw_spin_rq_lock_irq(rq);
d2dfa17b
PZ
6436}
6437
8e5bad7d 6438static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
d2dfa17b 6439
5b6547ed 6440static void queue_core_balance(struct rq *rq)
d2dfa17b
PZ
6441{
6442 if (!sched_core_enabled(rq))
6443 return;
6444
6445 if (!rq->core->core_cookie)
6446 return;
6447
6448 if (!rq->nr_running) /* not forced idle */
6449 return;
6450
6451 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6452}
6453
7170509c
PZ
6454DEFINE_LOCK_GUARD_1(core_lock, int,
6455 sched_core_lock(*_T->lock, &_T->flags),
6456 sched_core_unlock(*_T->lock, &_T->flags),
6457 unsigned long flags)
6458
3c474b32 6459static void sched_core_cpu_starting(unsigned int cpu)
9edeaea1
PZ
6460{
6461 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
3c474b32 6462 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
3c474b32 6463 int t;
9edeaea1 6464
7170509c 6465 guard(core_lock)(&cpu);
9edeaea1 6466
3c474b32
PZ
6467 WARN_ON_ONCE(rq->core != rq);
6468
6469 /* if we're the first, we'll be our own leader */
6470 if (cpumask_weight(smt_mask) == 1)
7170509c 6471 return;
3c474b32
PZ
6472
6473 /* find the leader */
6474 for_each_cpu(t, smt_mask) {
6475 if (t == cpu)
6476 continue;
6477 rq = cpu_rq(t);
6478 if (rq->core == rq) {
6479 core_rq = rq;
6480 break;
9edeaea1 6481 }
3c474b32 6482 }
9edeaea1 6483
3c474b32 6484 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
7170509c 6485 return;
9edeaea1 6486
3c474b32
PZ
6487 /* install and validate core_rq */
6488 for_each_cpu(t, smt_mask) {
6489 rq = cpu_rq(t);
9edeaea1 6490
3c474b32 6491 if (t == cpu)
9edeaea1 6492 rq->core = core_rq;
3c474b32
PZ
6493
6494 WARN_ON_ONCE(rq->core != core_rq);
9edeaea1
PZ
6495 }
6496}
3c474b32
PZ
6497
6498static void sched_core_cpu_deactivate(unsigned int cpu)
6499{
6500 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6501 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
3c474b32
PZ
6502 int t;
6503
7170509c 6504 guard(core_lock)(&cpu);
3c474b32
PZ
6505
6506 /* if we're the last man standing, nothing to do */
6507 if (cpumask_weight(smt_mask) == 1) {
6508 WARN_ON_ONCE(rq->core != rq);
7170509c 6509 return;
3c474b32
PZ
6510 }
6511
6512 /* if we're not the leader, nothing to do */
6513 if (rq->core != rq)
7170509c 6514 return;
3c474b32
PZ
6515
6516 /* find a new leader */
6517 for_each_cpu(t, smt_mask) {
6518 if (t == cpu)
6519 continue;
6520 core_rq = cpu_rq(t);
6521 break;
6522 }
6523
6524 if (WARN_ON_ONCE(!core_rq)) /* impossible */
7170509c 6525 return;
3c474b32
PZ
6526
6527 /* copy the shared state to the new leader */
4feee7d1
JD
6528 core_rq->core_task_seq = rq->core_task_seq;
6529 core_rq->core_pick_seq = rq->core_pick_seq;
6530 core_rq->core_cookie = rq->core_cookie;
6531 core_rq->core_forceidle_count = rq->core_forceidle_count;
6532 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6533 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6534
6535 /*
6536 * Accounting edge for forced idle is handled in pick_next_task().
6537 * Don't need another one here, since the hotplug thread shouldn't
6538 * have a cookie.
6539 */
6540 core_rq->core_forceidle_start = 0;
3c474b32
PZ
6541
6542 /* install new leader */
6543 for_each_cpu(t, smt_mask) {
6544 rq = cpu_rq(t);
6545 rq->core = core_rq;
6546 }
3c474b32
PZ
6547}
6548
6549static inline void sched_core_cpu_dying(unsigned int cpu)
6550{
6551 struct rq *rq = cpu_rq(cpu);
6552
6553 if (rq->core != rq)
6554 rq->core = rq;
6555}
6556
9edeaea1
PZ
6557#else /* !CONFIG_SCHED_CORE */
6558
6559static inline void sched_core_cpu_starting(unsigned int cpu) {}
3c474b32
PZ
6560static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
6561static inline void sched_core_cpu_dying(unsigned int cpu) {}
9edeaea1 6562
539f6512
PZ
6563static struct task_struct *
6564pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6565{
6566 return __pick_next_task(rq, prev, rf);
6567}
6568
9edeaea1
PZ
6569#endif /* CONFIG_SCHED_CORE */
6570
b4bfa3fc
TG
6571/*
6572 * Constants for the sched_mode argument of __schedule().
6573 *
6574 * The mode argument allows RT enabled kernels to differentiate a
3dcac251 6575 * preemption from blocking on an 'sleeping' spin/rwlock.
b4bfa3fc 6576 */
3dcac251
PZ
6577#define SM_IDLE (-1)
6578#define SM_NONE 0
6579#define SM_PREEMPT 1
6580#define SM_RTLOCK_WAIT 2
b4bfa3fc 6581
7b3d61f6
JS
6582/*
6583 * Helper function for __schedule()
6584 *
6585 * If a task does not have signals pending, deactivate it
6586 * Otherwise marks the task's __state as RUNNING
6587 */
6588static bool try_to_block_task(struct rq *rq, struct task_struct *p,
8feb053d 6589 unsigned long *task_state_p)
7b3d61f6 6590{
8feb053d 6591 unsigned long task_state = *task_state_p;
7b3d61f6
JS
6592 int flags = DEQUEUE_NOCLOCK;
6593
6594 if (signal_pending_state(task_state, p)) {
6595 WRITE_ONCE(p->__state, TASK_RUNNING);
8feb053d 6596 *task_state_p = TASK_RUNNING;
7b3d61f6
JS
6597 return false;
6598 }
6599
6600 p->sched_contributes_to_load =
6601 (task_state & TASK_UNINTERRUPTIBLE) &&
6602 !(task_state & TASK_NOLOAD) &&
6603 !(task_state & TASK_FROZEN);
6604
6605 if (unlikely(is_special_task_state(task_state)))
6606 flags |= DEQUEUE_SPECIAL;
6607
6608 /*
6609 * __schedule() ttwu()
6610 * prev_state = prev->state; if (p->on_rq && ...)
6611 * if (prev_state) goto out;
6612 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6613 * p->state = TASK_WAKING
6614 *
6615 * Where __schedule() and ttwu() have matching control dependencies.
6616 *
6617 * After this, schedule() must not care about p->state any more.
6618 */
6619 block_task(rq, p, flags);
6620 return true;
6621}
6622
dd41f596 6623/*
c259e01a 6624 * __schedule() is the main scheduler function.
edde96ea
PE
6625 *
6626 * The main means of driving the scheduler and thus entering this function are:
6627 *
6628 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6629 *
6630 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6631 * paths. For example, see arch/x86/entry_64.S.
6632 *
6633 * To drive preemption between tasks, the scheduler sets the flag in timer
86dd6c04 6634 * interrupt handler sched_tick().
edde96ea
PE
6635 *
6636 * 3. Wakeups don't really cause entry into schedule(). They add a
6637 * task to the run-queue and that's it.
6638 *
6639 * Now, if the new task added to the run-queue preempts the current
6640 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6641 * called on the nearest possible occasion:
6642 *
c1a280b6 6643 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
edde96ea
PE
6644 *
6645 * - in syscall or exception context, at the next outmost
6646 * preempt_enable(). (this might be as soon as the wake_up()'s
6647 * spin_unlock()!)
6648 *
6649 * - in IRQ context, return from interrupt-handler to
6650 * preemptible context
6651 *
c1a280b6 6652 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
edde96ea
PE
6653 * then at the next:
6654 *
6655 * - cond_resched() call
6656 * - explicit schedule() call
6657 * - return from syscall or exception to user-space
6658 * - return from interrupt-handler to user-space
bfd9b2b5 6659 *
b30f0e3f 6660 * WARNING: must be called with preemption disabled!
dd41f596 6661 */
3dcac251 6662static void __sched notrace __schedule(int sched_mode)
dd41f596
IM
6663{
6664 struct task_struct *prev, *next;
3dcac251
PZ
6665 /*
6666 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6667 * as a preemption by schedule_debug() and RCU.
6668 */
6669 bool preempt = sched_mode > SM_NONE;
26f80681 6670 bool is_switch = false;
67ca7bde 6671 unsigned long *switch_count;
dbfb089d 6672 unsigned long prev_state;
d8ac8971 6673 struct rq_flags rf;
dd41f596 6674 struct rq *rq;
31656519 6675 int cpu;
dd41f596 6676
26f80681
GM
6677 trace_sched_entry_tp(preempt, CALLER_ADDR0);
6678
dd41f596
IM
6679 cpu = smp_processor_id();
6680 rq = cpu_rq(cpu);
dd41f596 6681 prev = rq->curr;
dd41f596 6682
3dcac251 6683 schedule_debug(prev, preempt);
1da177e4 6684
e0ee463c 6685 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
f333fdc9 6686 hrtick_clear(rq);
8f4d37ec 6687
676e8cf7
PZ
6688 klp_sched_try_switch(prev);
6689
46a5d164 6690 local_irq_disable();
3dcac251 6691 rcu_note_context_switch(preempt);
46a5d164 6692
e0acd0a6
ON
6693 /*
6694 * Make sure that signal_pending_state()->signal_pending() below
6695 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
dbfb089d
PZ
6696 * done by the caller to avoid the race with signal_wake_up():
6697 *
6698 * __set_current_state(@state) signal_wake_up()
6699 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
6700 * wake_up_state(p, state)
6701 * LOCK rq->lock LOCK p->pi_state
6702 * smp_mb__after_spinlock() smp_mb__after_spinlock()
6703 * if (signal_pending_state()) if (p->state & @state)
306e0604 6704 *
dbfb089d 6705 * Also, the membarrier system call requires a full memory barrier
a14d11a0
AP
6706 * after coming from user-space, before storing to rq->curr; this
6707 * barrier matches a full barrier in the proximity of the membarrier
6708 * system call exit.
e0acd0a6 6709 */
8a8c69c3 6710 rq_lock(rq, &rf);
d89e588c 6711 smp_mb__after_spinlock();
1da177e4 6712
d1ccc66d
IM
6713 /* Promote REQ to ACT */
6714 rq->clock_update_flags <<= 1;
bce4dc80 6715 update_rq_clock(rq);
5ebde09d 6716 rq->clock_update_flags = RQCF_UPDATED;
9edfbfed 6717
246d86b5 6718 switch_count = &prev->nivcsw;
d136122f 6719
3dcac251
PZ
6720 /* Task state changes only considers SM_PREEMPT as preemption */
6721 preempt = sched_mode == SM_PREEMPT;
6722
dbfb089d 6723 /*
d136122f 6724 * We must load prev->state once (task_struct::state is volatile), such
2500ad1c 6725 * that we form a control dependency vs deactivate_task() below.
dbfb089d 6726 */
2f064a59 6727 prev_state = READ_ONCE(prev->__state);
3dcac251 6728 if (sched_mode == SM_IDLE) {
edf1c586
PS
6729 /* SCX must consult the BPF scheduler to tell if rq is empty */
6730 if (!rq->nr_running && !scx_enabled()) {
3dcac251
PZ
6731 next = prev;
6732 goto picked;
6733 }
6734 } else if (!preempt && prev_state) {
8feb053d 6735 try_to_block_task(rq, prev, &prev_state);
dd41f596 6736 switch_count = &prev->nvcsw;
1da177e4
LT
6737 }
6738
d8ac8971 6739 next = pick_next_task(rq, prev, &rf);
af0c8b2b 6740 rq_set_donor(rq, next);
3dcac251 6741picked:
f26f9aff 6742 clear_tsk_need_resched(prev);
f27dde8d 6743 clear_preempt_need_resched();
c006fac5 6744 rq->last_seen_need_resched_ns = 0;
1da177e4 6745
26f80681
GM
6746 is_switch = prev != next;
6747 if (likely(is_switch)) {
1da177e4 6748 rq->nr_switches++;
5311a98f
EB
6749 /*
6750 * RCU users of rcu_dereference(rq->curr) may not see
6751 * changes to task_struct made by pick_next_task().
6752 */
6753 RCU_INIT_POINTER(rq->curr, next);
22e4ebb9
MD
6754 /*
6755 * The membarrier system call requires each architecture
6756 * to have a full memory barrier after updating
306e0604
MD
6757 * rq->curr, before returning to user-space.
6758 *
6759 * Here are the schemes providing that barrier on the
6760 * various architectures:
d6cfd177
AP
6761 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6762 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
6763 * on PowerPC and on RISC-V.
306e0604
MD
6764 * - finish_lock_switch() for weakly-ordered
6765 * architectures where spin_unlock is a full barrier,
6766 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6767 * is a RELEASE barrier),
a14d11a0
AP
6768 *
6769 * The barrier matches a full barrier in the proximity of
6770 * the membarrier system call entry.
cd9b2901
AP
6771 *
6772 * On RISC-V, this barrier pairing is also needed for the
6773 * SYNC_CORE command when switching between processes, cf.
6774 * the inline comments in membarrier_arch_switch_mm().
22e4ebb9 6775 */
1da177e4
LT
6776 ++*switch_count;
6777
af449901 6778 migrate_disable_switch(rq, prev);
ddae0ca2 6779 psi_account_irqtime(rq, prev, next);
7d9da040
CZ
6780 psi_sched_switch(prev, next, !task_on_rq_queued(prev) ||
6781 prev->se.sched_delayed);
b05e75d6 6782
3dcac251 6783 trace_sched_switch(preempt, prev, next, prev_state);
d1ccc66d
IM
6784
6785 /* Also unlocks the rq: */
6786 rq = context_switch(rq, prev, next, &rf);
cbce1a68 6787 } else {
565790d2
PZ
6788 rq_unpin_lock(rq, &rf);
6789 __balance_callbacks(rq);
5cb9eaa3 6790 raw_spin_rq_unlock_irq(rq);
565790d2 6791 }
26f80681 6792 trace_sched_exit_tp(is_switch, CALLER_ADDR0);
1da177e4 6793}
c259e01a 6794
9af6528e
PZ
6795void __noreturn do_task_dead(void)
6796{
d1ccc66d 6797 /* Causes final put_task_struct in finish_task_switch(): */
b5bf9a90 6798 set_special_state(TASK_DEAD);
d1ccc66d
IM
6799
6800 /* Tell freezer to ignore us: */
6801 current->flags |= PF_NOFREEZE;
6802
b4bfa3fc 6803 __schedule(SM_NONE);
9af6528e 6804 BUG();
d1ccc66d
IM
6805
6806 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
9af6528e 6807 for (;;)
d1ccc66d 6808 cpu_relax();
9af6528e
PZ
6809}
6810
9c40cef2
TG
6811static inline void sched_submit_work(struct task_struct *tsk)
6812{
28bc55f6 6813 static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
c1cecf88
SAS
6814 unsigned int task_flags;
6815
28bc55f6
PZ
6816 /*
6817 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6818 * will use a blocking primitive -- which would lead to recursion.
6819 */
6820 lock_map_acquire_try(&sched_map);
6821
c1cecf88 6822 task_flags = tsk->flags;
6d25be57 6823 /*
b945efcd
TG
6824 * If a worker goes to sleep, notify and ask workqueue whether it
6825 * wants to wake up a task to maintain concurrency.
6d25be57 6826 */
3eafe225
WJ
6827 if (task_flags & PF_WQ_WORKER)
6828 wq_worker_sleeping(tsk);
6829 else if (task_flags & PF_IO_WORKER)
6830 io_wq_worker_sleeping(tsk);
6d25be57 6831
401e4963
JK
6832 /*
6833 * spinlock and rwlock must not flush block requests. This will
6834 * deadlock if the callback attempts to acquire a lock which is
6835 * already acquired.
6836 */
f7d2728c 6837 WARN_ON_ONCE(current->__state & TASK_RTLOCK_WAIT);
b0fdc013 6838
9c40cef2
TG
6839 /*
6840 * If we are going to sleep and we have plugged IO queued,
6841 * make sure to submit it to avoid deadlocks.
6842 */
aa8dccca 6843 blk_flush_plug(tsk->plug, true);
28bc55f6
PZ
6844
6845 lock_map_release(&sched_map);
9c40cef2
TG
6846}
6847
6d25be57
TG
6848static void sched_update_worker(struct task_struct *tsk)
6849{
06b23f92
JA
6850 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6851 if (tsk->flags & PF_BLOCK_TS)
6852 blk_plug_invalidate_ts(tsk);
771b53d0
JA
6853 if (tsk->flags & PF_WQ_WORKER)
6854 wq_worker_running(tsk);
06b23f92 6855 else if (tsk->flags & PF_IO_WORKER)
771b53d0
JA
6856 io_wq_worker_running(tsk);
6857 }
6d25be57
TG
6858}
6859
3dcac251 6860static __always_inline void __schedule_loop(int sched_mode)
c259e01a 6861{
bfd9b2b5 6862 do {
b30f0e3f 6863 preempt_disable();
de1474b4 6864 __schedule(sched_mode);
b30f0e3f 6865 sched_preempt_enable_no_resched();
bfd9b2b5 6866 } while (need_resched());
de1474b4
TG
6867}
6868
6869asmlinkage __visible void __sched schedule(void)
6870{
6871 struct task_struct *tsk = current;
6872
6b596e62
PZ
6873#ifdef CONFIG_RT_MUTEXES
6874 lockdep_assert(!tsk->sched_rt_mutex);
6875#endif
6876
6877 if (!task_is_running(tsk))
6878 sched_submit_work(tsk);
de1474b4 6879 __schedule_loop(SM_NONE);
6d25be57 6880 sched_update_worker(tsk);
c259e01a 6881}
1da177e4
LT
6882EXPORT_SYMBOL(schedule);
6883
8663effb
SRV
6884/*
6885 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6886 * state (have scheduled out non-voluntarily) by making sure that all
6887 * tasks have either left the run queue or have gone into user space.
6888 * As idle tasks do not do either, they must not ever be preempted
6889 * (schedule out non-voluntarily).
6890 *
6891 * schedule_idle() is similar to schedule_preempt_disable() except that it
6892 * never enables preemption because it does not call sched_submit_work().
6893 */
6894void __sched schedule_idle(void)
6895{
6896 /*
6897 * As this skips calling sched_submit_work(), which the idle task does
402de7fc 6898 * regardless because that function is a NOP when the task is in a
8663effb
SRV
6899 * TASK_RUNNING state, make sure this isn't used someplace that the
6900 * current task can be in any other state. Note, idle is always in the
6901 * TASK_RUNNING state.
6902 */
2f064a59 6903 WARN_ON_ONCE(current->__state);
8663effb 6904 do {
3dcac251 6905 __schedule(SM_IDLE);
8663effb
SRV
6906 } while (need_resched());
6907}
6908
24a9c541 6909#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
722a9f92 6910asmlinkage __visible void __sched schedule_user(void)
20ab65e3
FW
6911{
6912 /*
6913 * If we come here after a random call to set_need_resched(),
6914 * or we have been woken up remotely but the IPI has not yet arrived,
6915 * we haven't yet exited the RCU idle mode. Do it here manually until
6916 * we find a better solution.
7cc78f8f
AL
6917 *
6918 * NB: There are buggy callers of this function. Ideally we
d65d411c 6919 * should warn if prev_state != CT_STATE_USER, but that will trigger
7cc78f8f 6920 * too frequently to make sense yet.
20ab65e3 6921 */
7cc78f8f 6922 enum ctx_state prev_state = exception_enter();
20ab65e3 6923 schedule();
7cc78f8f 6924 exception_exit(prev_state);
20ab65e3
FW
6925}
6926#endif
6927
c5491ea7
TG
6928/**
6929 * schedule_preempt_disabled - called with preemption disabled
6930 *
6931 * Returns with preemption disabled. Note: preempt_count must be 1
6932 */
6933void __sched schedule_preempt_disabled(void)
6934{
ba74c144 6935 sched_preempt_enable_no_resched();
c5491ea7
TG
6936 schedule();
6937 preempt_disable();
6938}
6939
6991436c
TG
6940#ifdef CONFIG_PREEMPT_RT
6941void __sched notrace schedule_rtlock(void)
6942{
de1474b4 6943 __schedule_loop(SM_RTLOCK_WAIT);
6991436c
TG
6944}
6945NOKPROBE_SYMBOL(schedule_rtlock);
6946#endif
6947
06b1f808 6948static void __sched notrace preempt_schedule_common(void)
a18b5d01
FW
6949{
6950 do {
47252cfb
SR
6951 /*
6952 * Because the function tracer can trace preempt_count_sub()
6953 * and it also uses preempt_enable/disable_notrace(), if
6954 * NEED_RESCHED is set, the preempt_enable_notrace() called
6955 * by the function tracer will call this function again and
6956 * cause infinite recursion.
6957 *
6958 * Preemption must be disabled here before the function
6959 * tracer can trace. Break up preempt_disable() into two
6960 * calls. One to disable preemption without fear of being
6961 * traced. The other to still record the preemption latency,
6962 * which can also be traced by the function tracer.
6963 */
499d7955 6964 preempt_disable_notrace();
47252cfb 6965 preempt_latency_start(1);
b4bfa3fc 6966 __schedule(SM_PREEMPT);
47252cfb 6967 preempt_latency_stop(1);
499d7955 6968 preempt_enable_no_resched_notrace();
a18b5d01
FW
6969
6970 /*
6971 * Check again in case we missed a preemption opportunity
6972 * between schedule and now.
6973 */
a18b5d01
FW
6974 } while (need_resched());
6975}
6976
c1a280b6 6977#ifdef CONFIG_PREEMPTION
1da177e4 6978/*
a49b4f40
VS
6979 * This is the entry point to schedule() from in-kernel preemption
6980 * off of preempt_enable.
1da177e4 6981 */
722a9f92 6982asmlinkage __visible void __sched notrace preempt_schedule(void)
1da177e4 6983{
1da177e4
LT
6984 /*
6985 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 6986 * we do not want to preempt the current task. Just return..
1da177e4 6987 */
fbb00b56 6988 if (likely(!preemptible()))
1da177e4 6989 return;
a18b5d01 6990 preempt_schedule_common();
1da177e4 6991}
376e2424 6992NOKPROBE_SYMBOL(preempt_schedule);
1da177e4 6993EXPORT_SYMBOL(preempt_schedule);
009f60e2 6994
2c9a98d3 6995#ifdef CONFIG_PREEMPT_DYNAMIC
99cf983c 6996#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
8a69fe0b
MR
6997#ifndef preempt_schedule_dynamic_enabled
6998#define preempt_schedule_dynamic_enabled preempt_schedule
6999#define preempt_schedule_dynamic_disabled NULL
7000#endif
7001DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
ef72661e 7002EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
99cf983c
MR
7003#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7004static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
7005void __sched notrace dynamic_preempt_schedule(void)
7006{
7007 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
7008 return;
7009 preempt_schedule();
7010}
7011NOKPROBE_SYMBOL(dynamic_preempt_schedule);
7012EXPORT_SYMBOL(dynamic_preempt_schedule);
7013#endif
2c9a98d3 7014#endif
2c9a98d3 7015
009f60e2 7016/**
4eaca0a8 7017 * preempt_schedule_notrace - preempt_schedule called by tracing
009f60e2
ON
7018 *
7019 * The tracing infrastructure uses preempt_enable_notrace to prevent
7020 * recursion and tracing preempt enabling caused by the tracing
7021 * infrastructure itself. But as tracing can happen in areas coming
7022 * from userspace or just about to enter userspace, a preempt enable
7023 * can occur before user_exit() is called. This will cause the scheduler
7024 * to be called when the system is still in usermode.
7025 *
7026 * To prevent this, the preempt_enable_notrace will use this function
7027 * instead of preempt_schedule() to exit user context if needed before
7028 * calling the scheduler.
7029 */
4eaca0a8 7030asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
009f60e2
ON
7031{
7032 enum ctx_state prev_ctx;
7033
7034 if (likely(!preemptible()))
7035 return;
7036
7037 do {
47252cfb
SR
7038 /*
7039 * Because the function tracer can trace preempt_count_sub()
7040 * and it also uses preempt_enable/disable_notrace(), if
7041 * NEED_RESCHED is set, the preempt_enable_notrace() called
7042 * by the function tracer will call this function again and
7043 * cause infinite recursion.
7044 *
7045 * Preemption must be disabled here before the function
7046 * tracer can trace. Break up preempt_disable() into two
7047 * calls. One to disable preemption without fear of being
7048 * traced. The other to still record the preemption latency,
7049 * which can also be traced by the function tracer.
7050 */
3d8f74dd 7051 preempt_disable_notrace();
47252cfb 7052 preempt_latency_start(1);
009f60e2
ON
7053 /*
7054 * Needs preempt disabled in case user_exit() is traced
7055 * and the tracer calls preempt_enable_notrace() causing
7056 * an infinite recursion.
7057 */
7058 prev_ctx = exception_enter();
b4bfa3fc 7059 __schedule(SM_PREEMPT);
009f60e2
ON
7060 exception_exit(prev_ctx);
7061
47252cfb 7062 preempt_latency_stop(1);
3d8f74dd 7063 preempt_enable_no_resched_notrace();
009f60e2
ON
7064 } while (need_resched());
7065}
4eaca0a8 7066EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
009f60e2 7067
2c9a98d3 7068#ifdef CONFIG_PREEMPT_DYNAMIC
99cf983c 7069#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
8a69fe0b
MR
7070#ifndef preempt_schedule_notrace_dynamic_enabled
7071#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
7072#define preempt_schedule_notrace_dynamic_disabled NULL
2c9a98d3 7073#endif
8a69fe0b 7074DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
ef72661e 7075EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
99cf983c
MR
7076#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7077static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
7078void __sched notrace dynamic_preempt_schedule_notrace(void)
c597bfdd 7079{
99cf983c
MR
7080 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
7081 return;
7082 preempt_schedule_notrace();
c597bfdd 7083}
99cf983c
MR
7084NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
7085EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
7086#endif
2c9a98d3 7087#endif
c597bfdd 7088
c1a280b6 7089#endif /* CONFIG_PREEMPTION */
826bfeb3 7090
1da177e4 7091/*
a49b4f40 7092 * This is the entry point to schedule() from kernel preemption
402de7fc
IM
7093 * off of IRQ context.
7094 * Note, that this is called and return with IRQs disabled. This will
7095 * protect us against recursive calling from IRQ contexts.
1da177e4 7096 */
722a9f92 7097asmlinkage __visible void __sched preempt_schedule_irq(void)
1da177e4 7098{
b22366cd 7099 enum ctx_state prev_state;
6478d880 7100
2ed6e34f 7101 /* Catch callers which need to be fixed */
f27dde8d 7102 BUG_ON(preempt_count() || !irqs_disabled());
1da177e4 7103
b22366cd
FW
7104 prev_state = exception_enter();
7105
3a5c359a 7106 do {
3d8f74dd 7107 preempt_disable();
3a5c359a 7108 local_irq_enable();
b4bfa3fc 7109 __schedule(SM_PREEMPT);
3a5c359a 7110 local_irq_disable();
3d8f74dd 7111 sched_preempt_enable_no_resched();
5ed0cec0 7112 } while (need_resched());
b22366cd
FW
7113
7114 exception_exit(prev_state);
1da177e4
LT
7115}
7116
ac6424b9 7117int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
95cdf3b7 7118 void *key)
1da177e4 7119{
dd5bdaf2 7120 WARN_ON_ONCE(wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
63859d4f 7121 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 7122}
1da177e4
LT
7123EXPORT_SYMBOL(default_wake_function);
7124
5db91545 7125const struct sched_class *__setscheduler_class(int policy, int prio)
f558c2b8
PZ
7126{
7127 if (dl_prio(prio))
98442f0c
PZ
7128 return &dl_sched_class;
7129
7130 if (rt_prio(prio))
7131 return &rt_sched_class;
7132
f0e1a064 7133#ifdef CONFIG_SCHED_CLASS_EXT
5db91545 7134 if (task_should_scx(policy))
98442f0c 7135 return &ext_sched_class;
f0e1a064 7136#endif
f558c2b8 7137
98442f0c 7138 return &fair_sched_class;
f558c2b8
PZ
7139}
7140
b29739f9
IM
7141#ifdef CONFIG_RT_MUTEXES
7142
6b596e62
PZ
7143/*
7144 * Would be more useful with typeof()/auto_type but they don't mix with
7145 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7146 * name such that if someone were to implement this function we get to compare
7147 * notes.
7148 */
7149#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7150
7151void rt_mutex_pre_schedule(void)
7152{
7153 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7154 sched_submit_work(current);
7155}
7156
7157void rt_mutex_schedule(void)
7158{
7159 lockdep_assert(current->sched_rt_mutex);
7160 __schedule_loop(SM_NONE);
7161}
7162
7163void rt_mutex_post_schedule(void)
7164{
7165 sched_update_worker(current);
7166 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7167}
7168
b29739f9
IM
7169/*
7170 * rt_mutex_setprio - set the current priority of a task
acd58620
PZ
7171 * @p: task to boost
7172 * @pi_task: donor task
b29739f9
IM
7173 *
7174 * This function changes the 'effective' priority of a task. It does
7175 * not touch ->normal_prio like __setscheduler().
7176 *
c365c292
TG
7177 * Used by the rt_mutex code to implement priority inheritance
7178 * logic. Call site only calls if the priority of the task changed.
b29739f9 7179 */
acd58620 7180void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
b29739f9 7181{
acd58620 7182 int prio, oldprio, queued, running, queue_flag =
7a57f32a 7183 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
98442f0c 7184 const struct sched_class *prev_class, *next_class;
eb580751
PZ
7185 struct rq_flags rf;
7186 struct rq *rq;
b29739f9 7187
acd58620
PZ
7188 /* XXX used to be waiter->prio, not waiter->task->prio */
7189 prio = __rt_effective_prio(pi_task, p->normal_prio);
7190
7191 /*
7192 * If nothing changed; bail early.
7193 */
7194 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7195 return;
b29739f9 7196
eb580751 7197 rq = __task_rq_lock(p, &rf);
80f5c1b8 7198 update_rq_clock(rq);
acd58620
PZ
7199 /*
7200 * Set under pi_lock && rq->lock, such that the value can be used under
7201 * either lock.
7202 *
7203 * Note that there is loads of tricky to make this pointer cache work
7204 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7205 * ensure a task is de-boosted (pi_task is set to NULL) before the
7206 * task is allowed to run again (and can exit). This ensures the pointer
b19a888c 7207 * points to a blocked task -- which guarantees the task is present.
acd58620
PZ
7208 */
7209 p->pi_top_task = pi_task;
7210
7211 /*
7212 * For FIFO/RR we only need to set prio, if that matches we're done.
7213 */
7214 if (prio == p->prio && !dl_prio(prio))
7215 goto out_unlock;
b29739f9 7216
1c4dd99b 7217 /*
402de7fc 7218 * Idle task boosting is a no-no in general. There is one
1c4dd99b
TG
7219 * exception, when PREEMPT_RT and NOHZ is active:
7220 *
7221 * The idle task calls get_next_timer_interrupt() and holds
7222 * the timer wheel base->lock on the CPU and another CPU wants
7223 * to access the timer (probably to cancel it). We can safely
7224 * ignore the boosting request, as the idle CPU runs this code
7225 * with interrupts disabled and will complete the lock
7226 * protected section without being interrupted. So there is no
7227 * real need to boost.
7228 */
7229 if (unlikely(p == rq->idle)) {
7230 WARN_ON(p != rq->curr);
7231 WARN_ON(p->pi_blocked_on);
7232 goto out_unlock;
7233 }
7234
b91473ff 7235 trace_sched_pi_setprio(p, pi_task);
d5f9f942 7236 oldprio = p->prio;
ff77e468
PZ
7237
7238 if (oldprio == prio)
7239 queue_flag &= ~DEQUEUE_MOVE;
7240
83ab0aa0 7241 prev_class = p->sched_class;
5db91545 7242 next_class = __setscheduler_class(p->policy, prio);
98442f0c
PZ
7243
7244 if (prev_class != next_class && p->se.sched_delayed)
7245 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
7246
da0c1e65 7247 queued = task_on_rq_queued(p);
af0c8b2b 7248 running = task_current_donor(rq, p);
da0c1e65 7249 if (queued)
ff77e468 7250 dequeue_task(rq, p, queue_flag);
0e1f3483 7251 if (running)
f3cd1c4e 7252 put_prev_task(rq, p);
dd41f596 7253
2d3d891d
DF
7254 /*
7255 * Boosting condition are:
7256 * 1. -rt task is running and holds mutex A
7257 * --> -dl task blocks on mutex A
7258 *
7259 * 2. -dl task is running and holds mutex A
7260 * --> -dl task blocks on mutex A and could preempt the
7261 * running task
7262 */
7263 if (dl_prio(prio)) {
466af29b 7264 if (!dl_prio(p->normal_prio) ||
740797ce
JL
7265 (pi_task && dl_prio(pi_task->prio) &&
7266 dl_entity_preempt(&pi_task->dl, &p->dl))) {
2279f540 7267 p->dl.pi_se = pi_task->dl.pi_se;
ff77e468 7268 queue_flag |= ENQUEUE_REPLENISH;
2279f540
JL
7269 } else {
7270 p->dl.pi_se = &p->dl;
7271 }
2d3d891d
DF
7272 } else if (rt_prio(prio)) {
7273 if (dl_prio(oldprio))
2279f540 7274 p->dl.pi_se = &p->dl;
2d3d891d 7275 if (oldprio < prio)
ff77e468 7276 queue_flag |= ENQUEUE_HEAD;
2d3d891d
DF
7277 } else {
7278 if (dl_prio(oldprio))
2279f540 7279 p->dl.pi_se = &p->dl;
746db944
BS
7280 if (rt_prio(oldprio))
7281 p->rt.timeout = 0;
2d3d891d 7282 }
dd41f596 7283
98442f0c
PZ
7284 p->sched_class = next_class;
7285 p->prio = prio;
7286
d8c7bc2e 7287 check_class_changing(rq, p, prev_class);
b29739f9 7288
da0c1e65 7289 if (queued)
ff77e468 7290 enqueue_task(rq, p, queue_flag);
a399d233 7291 if (running)
03b7fad1 7292 set_next_task(rq, p);
cb469845 7293
da7a735e 7294 check_class_changed(rq, p, prev_class, oldprio);
1c4dd99b 7295out_unlock:
d1ccc66d
IM
7296 /* Avoid rq from going away on us: */
7297 preempt_disable();
4c9a4bc8 7298
565790d2
PZ
7299 rq_unpin_lock(rq, &rf);
7300 __balance_callbacks(rq);
5cb9eaa3 7301 raw_spin_rq_unlock(rq);
565790d2 7302
4c9a4bc8 7303 preempt_enable();
b29739f9 7304}
b29739f9 7305#endif
d50dde5a 7306
04746ed8
IM
7307#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
7308int __sched __cond_resched(void)
1da177e4 7309{
82c387ef 7310 if (should_resched(0) && !irqs_disabled()) {
04746ed8
IM
7311 preempt_schedule_common();
7312 return 1;
1da177e4 7313 }
5443a0be 7314 /*
2c00e119 7315 * In PREEMPT_RCU kernels, ->rcu_read_lock_nesting tells the tick
04746ed8
IM
7316 * whether the current CPU is in an RCU read-side critical section,
7317 * so the tick can report quiescent states even for CPUs looping
7318 * in kernel context. In contrast, in non-preemptible kernels,
7319 * RCU readers leave no in-memory hints, which means that CPU-bound
7320 * processes executing in kernel context might never report an
7321 * RCU quiescent state. Therefore, the following code causes
7322 * cond_resched() to report a quiescent state, but only when RCU
7323 * is in urgent need of one.
2c00e119
AA
7324 * A third case, preemptible, but non-PREEMPT_RCU provides for
7325 * urgently needed quiescent states via rcu_flavor_sched_clock_irq().
5443a0be 7326 */
04746ed8
IM
7327#ifndef CONFIG_PREEMPT_RCU
7328 rcu_all_qs();
7329#endif
7330 return 0;
1da177e4 7331}
04746ed8
IM
7332EXPORT_SYMBOL(__cond_resched);
7333#endif
1da177e4 7334
04746ed8
IM
7335#ifdef CONFIG_PREEMPT_DYNAMIC
7336#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7337#define cond_resched_dynamic_enabled __cond_resched
7338#define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
7339DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7340EXPORT_STATIC_CALL_TRAMP(cond_resched);
48f24c4d 7341
04746ed8
IM
7342#define might_resched_dynamic_enabled __cond_resched
7343#define might_resched_dynamic_disabled ((void *)&__static_call_return0)
7344DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7345EXPORT_STATIC_CALL_TRAMP(might_resched);
7346#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7347static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
7348int __sched dynamic_cond_resched(void)
7349{
04746ed8
IM
7350 if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7351 return 0;
7352 return __cond_resched();
700a7833 7353}
04746ed8 7354EXPORT_SYMBOL(dynamic_cond_resched);
700a7833 7355
04746ed8
IM
7356static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
7357int __sched dynamic_might_resched(void)
700a7833 7358{
04746ed8
IM
7359 if (!static_branch_unlikely(&sk_dynamic_might_resched))
7360 return 0;
7361 return __cond_resched();
e43379f1 7362}
04746ed8
IM
7363EXPORT_SYMBOL(dynamic_might_resched);
7364#endif
7365#endif
1da177e4
LT
7366
7367/*
04746ed8
IM
7368 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7369 * call schedule, and on return reacquire the lock.
1da177e4 7370 *
04746ed8
IM
7371 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7372 * operations here to prevent schedule() from being called twice (once via
7373 * spin_unlock(), once by hand).
1da177e4 7374 */
04746ed8 7375int __cond_resched_lock(spinlock_t *lock)
1da177e4 7376{
04746ed8
IM
7377 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7378 int ret = 0;
1da177e4 7379
04746ed8 7380 lockdep_assert_held(lock);
1da177e4 7381
04746ed8
IM
7382 if (spin_needbreak(lock) || resched) {
7383 spin_unlock(lock);
7384 if (!_cond_resched())
7385 cpu_relax();
7386 ret = 1;
7387 spin_lock(lock);
7388 }
7389 return ret;
1da177e4 7390}
04746ed8 7391EXPORT_SYMBOL(__cond_resched_lock);
e43379f1 7392
04746ed8 7393int __cond_resched_rwlock_read(rwlock_t *lock)
1da177e4 7394{
04746ed8
IM
7395 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7396 int ret = 0;
1da177e4 7397
04746ed8 7398 lockdep_assert_held_read(lock);
1da177e4 7399
04746ed8
IM
7400 if (rwlock_needbreak(lock) || resched) {
7401 read_unlock(lock);
7402 if (!_cond_resched())
7403 cpu_relax();
7404 ret = 1;
7405 read_lock(lock);
7406 }
7407 return ret;
7408}
7409EXPORT_SYMBOL(__cond_resched_rwlock_read);
1da177e4 7410
04746ed8 7411int __cond_resched_rwlock_write(rwlock_t *lock)
1da177e4 7412{
04746ed8
IM
7413 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7414 int ret = 0;
f3d4b4b1
BG
7415
7416 lockdep_assert_held_write(lock);
7417
7418 if (rwlock_needbreak(lock) || resched) {
7419 write_unlock(lock);
7e406d1f 7420 if (!_cond_resched())
f3d4b4b1
BG
7421 cpu_relax();
7422 ret = 1;
7423 write_lock(lock);
7424 }
7425 return ret;
7426}
7427EXPORT_SYMBOL(__cond_resched_rwlock_write);
7428
4c748558
MR
7429#ifdef CONFIG_PREEMPT_DYNAMIC
7430
33c64734 7431#ifdef CONFIG_GENERIC_ENTRY
4c748558 7432#include <linux/entry-common.h>
33c64734 7433#endif
4c748558
MR
7434
7435/*
7436 * SC:cond_resched
7437 * SC:might_resched
7438 * SC:preempt_schedule
7439 * SC:preempt_schedule_notrace
7440 * SC:irqentry_exit_cond_resched
7441 *
7442 *
7443 * NONE:
7444 * cond_resched <- __cond_resched
7445 * might_resched <- RET0
7446 * preempt_schedule <- NOP
7447 * preempt_schedule_notrace <- NOP
7448 * irqentry_exit_cond_resched <- NOP
7c70cb94 7449 * dynamic_preempt_lazy <- false
4c748558
MR
7450 *
7451 * VOLUNTARY:
7452 * cond_resched <- __cond_resched
7453 * might_resched <- __cond_resched
7454 * preempt_schedule <- NOP
7455 * preempt_schedule_notrace <- NOP
7456 * irqentry_exit_cond_resched <- NOP
7c70cb94 7457 * dynamic_preempt_lazy <- false
4c748558
MR
7458 *
7459 * FULL:
7460 * cond_resched <- RET0
7461 * might_resched <- RET0
7462 * preempt_schedule <- preempt_schedule
7463 * preempt_schedule_notrace <- preempt_schedule_notrace
7464 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7c70cb94
PZ
7465 * dynamic_preempt_lazy <- false
7466 *
7467 * LAZY:
7468 * cond_resched <- RET0
7469 * might_resched <- RET0
7470 * preempt_schedule <- preempt_schedule
7471 * preempt_schedule_notrace <- preempt_schedule_notrace
7472 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7473 * dynamic_preempt_lazy <- true
4c748558
MR
7474 */
7475
7476enum {
7477 preempt_dynamic_undefined = -1,
7478 preempt_dynamic_none,
7479 preempt_dynamic_voluntary,
7480 preempt_dynamic_full,
7c70cb94 7481 preempt_dynamic_lazy,
4c748558
MR
7482};
7483
7484int preempt_dynamic_mode = preempt_dynamic_undefined;
7485
7486int sched_dynamic_mode(const char *str)
7487{
35772d62 7488#ifndef CONFIG_PREEMPT_RT
4c748558
MR
7489 if (!strcmp(str, "none"))
7490 return preempt_dynamic_none;
7491
7492 if (!strcmp(str, "voluntary"))
7493 return preempt_dynamic_voluntary;
35772d62 7494#endif
4c748558
MR
7495
7496 if (!strcmp(str, "full"))
7497 return preempt_dynamic_full;
7498
7c70cb94
PZ
7499#ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7500 if (!strcmp(str, "lazy"))
7501 return preempt_dynamic_lazy;
7502#endif
7503
4c748558
MR
7504 return -EINVAL;
7505}
7506
7c70cb94
PZ
7507#define preempt_dynamic_key_enable(f) static_key_enable(&sk_dynamic_##f.key)
7508#define preempt_dynamic_key_disable(f) static_key_disable(&sk_dynamic_##f.key)
7509
99cf983c 7510#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
8a69fe0b
MR
7511#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
7512#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
99cf983c 7513#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7c70cb94
PZ
7514#define preempt_dynamic_enable(f) preempt_dynamic_key_enable(f)
7515#define preempt_dynamic_disable(f) preempt_dynamic_key_disable(f)
99cf983c
MR
7516#else
7517#error "Unsupported PREEMPT_DYNAMIC mechanism"
7518#endif
8a69fe0b 7519
9b8e1781 7520static DEFINE_MUTEX(sched_dynamic_mutex);
e3ff7c60
JP
7521
7522static void __sched_dynamic_update(int mode)
4c748558
MR
7523{
7524 /*
7525 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7526 * the ZERO state, which is invalid.
7527 */
676e8cf7 7528 preempt_dynamic_enable(cond_resched);
8a69fe0b
MR
7529 preempt_dynamic_enable(might_resched);
7530 preempt_dynamic_enable(preempt_schedule);
7531 preempt_dynamic_enable(preempt_schedule_notrace);
7532 preempt_dynamic_enable(irqentry_exit_cond_resched);
7c70cb94 7533 preempt_dynamic_key_disable(preempt_lazy);
4c748558
MR
7534
7535 switch (mode) {
7536 case preempt_dynamic_none:
676e8cf7 7537 preempt_dynamic_enable(cond_resched);
8a69fe0b
MR
7538 preempt_dynamic_disable(might_resched);
7539 preempt_dynamic_disable(preempt_schedule);
7540 preempt_dynamic_disable(preempt_schedule_notrace);
7541 preempt_dynamic_disable(irqentry_exit_cond_resched);
7c70cb94 7542 preempt_dynamic_key_disable(preempt_lazy);
e3ff7c60
JP
7543 if (mode != preempt_dynamic_mode)
7544 pr_info("Dynamic Preempt: none\n");
4c748558
MR
7545 break;
7546
7547 case preempt_dynamic_voluntary:
676e8cf7 7548 preempt_dynamic_enable(cond_resched);
8a69fe0b
MR
7549 preempt_dynamic_enable(might_resched);
7550 preempt_dynamic_disable(preempt_schedule);
7551 preempt_dynamic_disable(preempt_schedule_notrace);
7552 preempt_dynamic_disable(irqentry_exit_cond_resched);
7c70cb94 7553 preempt_dynamic_key_disable(preempt_lazy);
e3ff7c60
JP
7554 if (mode != preempt_dynamic_mode)
7555 pr_info("Dynamic Preempt: voluntary\n");
4c748558
MR
7556 break;
7557
7558 case preempt_dynamic_full:
676e8cf7 7559 preempt_dynamic_disable(cond_resched);
8a69fe0b
MR
7560 preempt_dynamic_disable(might_resched);
7561 preempt_dynamic_enable(preempt_schedule);
7562 preempt_dynamic_enable(preempt_schedule_notrace);
7563 preempt_dynamic_enable(irqentry_exit_cond_resched);
7c70cb94 7564 preempt_dynamic_key_disable(preempt_lazy);
e3ff7c60
JP
7565 if (mode != preempt_dynamic_mode)
7566 pr_info("Dynamic Preempt: full\n");
4c748558 7567 break;
7c70cb94
PZ
7568
7569 case preempt_dynamic_lazy:
676e8cf7 7570 preempt_dynamic_disable(cond_resched);
7c70cb94
PZ
7571 preempt_dynamic_disable(might_resched);
7572 preempt_dynamic_enable(preempt_schedule);
7573 preempt_dynamic_enable(preempt_schedule_notrace);
7574 preempt_dynamic_enable(irqentry_exit_cond_resched);
7575 preempt_dynamic_key_enable(preempt_lazy);
7576 if (mode != preempt_dynamic_mode)
7577 pr_info("Dynamic Preempt: lazy\n");
7578 break;
4c748558
MR
7579 }
7580
7581 preempt_dynamic_mode = mode;
7582}
7583
e3ff7c60
JP
7584void sched_dynamic_update(int mode)
7585{
7586 mutex_lock(&sched_dynamic_mutex);
7587 __sched_dynamic_update(mode);
7588 mutex_unlock(&sched_dynamic_mutex);
7589}
7590
4c748558
MR
7591static int __init setup_preempt_mode(char *str)
7592{
7593 int mode = sched_dynamic_mode(str);
7594 if (mode < 0) {
7595 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7596 return 0;
7597 }
7598
7599 sched_dynamic_update(mode);
7600 return 1;
7601}
7602__setup("preempt=", setup_preempt_mode);
7603
7604static void __init preempt_dynamic_init(void)
7605{
7606 if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7607 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7608 sched_dynamic_update(preempt_dynamic_none);
7609 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7610 sched_dynamic_update(preempt_dynamic_voluntary);
7c70cb94
PZ
7611 } else if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7612 sched_dynamic_update(preempt_dynamic_lazy);
4c748558
MR
7613 } else {
7614 /* Default static call setting, nothing to do */
7615 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7616 preempt_dynamic_mode = preempt_dynamic_full;
7617 pr_info("Dynamic Preempt: full\n");
7618 }
7619 }
7620}
7621
cfe43f47
VS
7622#define PREEMPT_MODEL_ACCESSOR(mode) \
7623 bool preempt_model_##mode(void) \
7624 { \
7625 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7626 return preempt_dynamic_mode == preempt_dynamic_##mode; \
7627 } \
7628 EXPORT_SYMBOL_GPL(preempt_model_##mode)
7629
7630PREEMPT_MODEL_ACCESSOR(none);
7631PREEMPT_MODEL_ACCESSOR(voluntary);
7632PREEMPT_MODEL_ACCESSOR(full);
7c70cb94 7633PREEMPT_MODEL_ACCESSOR(lazy);
cfe43f47 7634
402de7fc 7635#else /* !CONFIG_PREEMPT_DYNAMIC: */
4c748558 7636
8bdc5daa
SAS
7637#define preempt_dynamic_mode -1
7638
4c748558
MR
7639static inline void preempt_dynamic_init(void) { }
7640
402de7fc 7641#endif /* CONFIG_PREEMPT_DYNAMIC */
d95f4122 7642
8bdc5daa
SAS
7643const char *preempt_modes[] = {
7644 "none", "voluntary", "full", "lazy", NULL,
7645};
7646
7647const char *preempt_model_str(void)
7648{
7649 bool brace = IS_ENABLED(CONFIG_PREEMPT_RT) &&
7650 (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC) ||
7651 IS_ENABLED(CONFIG_PREEMPT_LAZY));
7652 static char buf[128];
7653
7654 if (IS_ENABLED(CONFIG_PREEMPT_BUILD)) {
7655 struct seq_buf s;
7656
7657 seq_buf_init(&s, buf, sizeof(buf));
7658 seq_buf_puts(&s, "PREEMPT");
7659
7660 if (IS_ENABLED(CONFIG_PREEMPT_RT))
7661 seq_buf_printf(&s, "%sRT%s",
7662 brace ? "_{" : "_",
7663 brace ? "," : "");
7664
7665 if (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC)) {
7666 seq_buf_printf(&s, "(%s)%s",
3ebb1b65 7667 preempt_dynamic_mode >= 0 ?
8bdc5daa
SAS
7668 preempt_modes[preempt_dynamic_mode] : "undef",
7669 brace ? "}" : "");
7670 return seq_buf_str(&s);
7671 }
7672
7673 if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7674 seq_buf_printf(&s, "LAZY%s",
7675 brace ? "}" : "");
7676 return seq_buf_str(&s);
7677 }
7678
7679 return seq_buf_str(&s);
7680 }
7681
7682 if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY_BUILD))
7683 return "VOLUNTARY";
7684
7685 return "NONE";
7686}
7687
10ab5643
TH
7688int io_schedule_prepare(void)
7689{
7690 int old_iowait = current->in_iowait;
7691
7692 current->in_iowait = 1;
aa8dccca 7693 blk_flush_plug(current->plug, true);
10ab5643
TH
7694 return old_iowait;
7695}
7696
7697void io_schedule_finish(int token)
7698{
7699 current->in_iowait = token;
7700}
7701
1da177e4 7702/*
41a2d6cf 7703 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 7704 * that process accounting knows that this is a task in IO wait state.
1da177e4 7705 */
1da177e4
LT
7706long __sched io_schedule_timeout(long timeout)
7707{
10ab5643 7708 int token;
1da177e4
LT
7709 long ret;
7710
10ab5643 7711 token = io_schedule_prepare();
1da177e4 7712 ret = schedule_timeout(timeout);
10ab5643 7713 io_schedule_finish(token);
9cff8ade 7714
1da177e4
LT
7715 return ret;
7716}
9cff8ade 7717EXPORT_SYMBOL(io_schedule_timeout);
1da177e4 7718
e3b929b0 7719void __sched io_schedule(void)
10ab5643
TH
7720{
7721 int token;
7722
7723 token = io_schedule_prepare();
7724 schedule();
7725 io_schedule_finish(token);
7726}
7727EXPORT_SYMBOL(io_schedule);
7728
82a1fcb9 7729void sched_show_task(struct task_struct *p)
1da177e4 7730{
fbe76a65 7731 unsigned long free;
4e79752c 7732 int ppid;
c930b2c0 7733
38200502
TH
7734 if (!try_get_task_stack(p))
7735 return;
20435d84 7736
cc172ff3 7737 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
20435d84 7738
b03fbd4f 7739 if (task_is_running(p))
cc172ff3 7740 pr_cont(" running task ");
7c9f8861 7741 free = stack_not_used(p);
a90e984c 7742 ppid = 0;
4e79752c 7743 rcu_read_lock();
a90e984c
ON
7744 if (pid_alive(p))
7745 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4e79752c 7746 rcu_read_unlock();
65ef17aa 7747 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n",
bc87127a 7748 free, task_pid_nr(p), task_tgid_nr(p),
65ef17aa 7749 ppid, p->flags, read_task_thread_flags(p));
1da177e4 7750
3d1cb205 7751 print_worker_info(KERN_INFO, p);
a8b62fd0 7752 print_stop_info(KERN_INFO, p);
1538e339 7753 print_scx_info(KERN_INFO, p);
9cb8f069 7754 show_stack(p, NULL, KERN_INFO);
38200502 7755 put_task_stack(p);
1da177e4 7756}
0032f4e8 7757EXPORT_SYMBOL_GPL(sched_show_task);
1da177e4 7758
5d68cc95
PZ
7759static inline bool
7760state_filter_match(unsigned long state_filter, struct task_struct *p)
7761{
2f064a59
PZ
7762 unsigned int state = READ_ONCE(p->__state);
7763
5d68cc95
PZ
7764 /* no filter, everything matches */
7765 if (!state_filter)
7766 return true;
7767
7768 /* filter, but doesn't match */
2f064a59 7769 if (!(state & state_filter))
5d68cc95
PZ
7770 return false;
7771
7772 /*
7773 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7774 * TASK_KILLABLE).
7775 */
5aec788a 7776 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
5d68cc95
PZ
7777 return false;
7778
7779 return true;
7780}
7781
7782
2f064a59 7783void show_state_filter(unsigned int state_filter)
1da177e4 7784{
36c8b586 7785 struct task_struct *g, *p;
1da177e4 7786
510f5acc 7787 rcu_read_lock();
5d07f420 7788 for_each_process_thread(g, p) {
1da177e4
LT
7789 /*
7790 * reset the NMI-timeout, listing all files on a slow
25985edc 7791 * console might take a lot of time:
57675cb9
AR
7792 * Also, reset softlockup watchdogs on all CPUs, because
7793 * another CPU might be blocked waiting for us to process
7794 * an IPI.
1da177e4
LT
7795 */
7796 touch_nmi_watchdog();
57675cb9 7797 touch_all_softlockup_watchdogs();
5d68cc95 7798 if (state_filter_match(state_filter, p))
82a1fcb9 7799 sched_show_task(p);
5d07f420 7800 }
1da177e4 7801
fb90a6e9
RV
7802 if (!state_filter)
7803 sysrq_sched_debug_show();
dd5bdaf2 7804
510f5acc 7805 rcu_read_unlock();
e59e2ae2
IM
7806 /*
7807 * Only show locks if all tasks are dumped:
7808 */
93335a21 7809 if (!state_filter)
e59e2ae2 7810 debug_show_all_locks();
1da177e4
LT
7811}
7812
f340c0d1
IM
7813/**
7814 * init_idle - set up an idle thread for a given CPU
7815 * @idle: task in question
d1ccc66d 7816 * @cpu: CPU the idle task belongs to
f340c0d1
IM
7817 *
7818 * NOTE: this function does not set the idle thread's NEED_RESCHED
7819 * flag, to make booting more robust.
7820 */
f1a0a376 7821void __init init_idle(struct task_struct *idle, int cpu)
1da177e4 7822{
713a2e21
WL
7823#ifdef CONFIG_SMP
7824 struct affinity_context ac = (struct affinity_context) {
7825 .new_mask = cpumask_of(cpu),
7826 .flags = 0,
7827 };
7828#endif
70b97a7f 7829 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
7830 unsigned long flags;
7831
25834c73 7832 raw_spin_lock_irqsave(&idle->pi_lock, flags);
5cb9eaa3 7833 raw_spin_rq_lock(rq);
5cbd54ef 7834
2f064a59 7835 idle->__state = TASK_RUNNING;
dd41f596 7836 idle->se.exec_start = sched_clock();
00b89fe0
VS
7837 /*
7838 * PF_KTHREAD should already be set at this point; regardless, make it
7839 * look like a proper per-CPU kthread.
7840 */
cff9b233 7841 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
00b89fe0 7842 kthread_set_per_cpu(idle, cpu);
dd41f596 7843
de9b8f5d
PZ
7844#ifdef CONFIG_SMP
7845 /*
b23decf8
TG
7846 * No validation and serialization required at boot time and for
7847 * setting up the idle tasks of not yet online CPUs.
de9b8f5d 7848 */
713a2e21 7849 set_cpus_allowed_common(idle, &ac);
de9b8f5d 7850#endif
6506cf6c
PZ
7851 /*
7852 * We're having a chicken and egg problem, even though we are
d1ccc66d 7853 * holding rq->lock, the CPU isn't yet set to this CPU so the
6506cf6c
PZ
7854 * lockdep check in task_group() will fail.
7855 *
7856 * Similar case to sched_fork(). / Alternatively we could
7857 * use task_rq_lock() here and obtain the other rq->lock.
7858 *
7859 * Silence PROVE_RCU
7860 */
7861 rcu_read_lock();
dd41f596 7862 __set_task_cpu(idle, cpu);
6506cf6c 7863 rcu_read_unlock();
1da177e4 7864
5311a98f 7865 rq->idle = idle;
af0c8b2b 7866 rq_set_donor(rq, idle);
5311a98f 7867 rcu_assign_pointer(rq->curr, idle);
da0c1e65 7868 idle->on_rq = TASK_ON_RQ_QUEUED;
de9b8f5d 7869#ifdef CONFIG_SMP
3ca7a440 7870 idle->on_cpu = 1;
4866cde0 7871#endif
5cb9eaa3 7872 raw_spin_rq_unlock(rq);
25834c73 7873 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
1da177e4
LT
7874
7875 /* Set the preempt count _outside_ the spinlocks! */
01028747 7876 init_idle_preempt_count(idle, cpu);
55cd5340 7877
dd41f596
IM
7878 /*
7879 * The idle tasks have their own, simple scheduling class:
7880 */
7881 idle->sched_class = &idle_sched_class;
868baf07 7882 ftrace_graph_init_idle_task(idle, cpu);
45eacc69 7883 vtime_init_idle(idle, cpu);
de9b8f5d 7884#ifdef CONFIG_SMP
f1c6f1a7
CE
7885 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7886#endif
19978ca6
IM
7887}
7888
e1d4eeec
NP
7889#ifdef CONFIG_SMP
7890
f82f8042
JL
7891int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7892 const struct cpumask *trial)
7893{
06a76fe0 7894 int ret = 1;
f82f8042 7895
1087ad4e 7896 if (cpumask_empty(cur))
bb2bc55a
MG
7897 return ret;
7898
06a76fe0 7899 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
f82f8042
JL
7900
7901 return ret;
7902}
7903
2ef269ef 7904int task_can_attach(struct task_struct *p)
7f51412a
JL
7905{
7906 int ret = 0;
7907
7908 /*
7909 * Kthreads which disallow setaffinity shouldn't be moved
d1ccc66d 7910 * to a new cpuset; we don't want to change their CPU
7f51412a
JL
7911 * affinity and isolating such threads by their set of
7912 * allowed nodes is unnecessary. Thus, cpusets are not
7913 * applicable for such threads. This prevents checking for
7914 * success of set_cpus_allowed_ptr() on all attached tasks
3bd37062 7915 * before cpus_mask may be changed.
7f51412a 7916 */
2ef269ef 7917 if (p->flags & PF_NO_SETAFFINITY)
7f51412a 7918 ret = -EINVAL;
7f51412a 7919
7f51412a
JL
7920 return ret;
7921}
7922
f2cb1360 7923bool sched_smp_initialized __read_mostly;
e26fbffd 7924
e6628d5b
MG
7925#ifdef CONFIG_NUMA_BALANCING
7926/* Migrate current task p to target_cpu */
7927int migrate_task_to(struct task_struct *p, int target_cpu)
7928{
7929 struct migration_arg arg = { p, target_cpu };
7930 int curr_cpu = task_cpu(p);
7931
7932 if (curr_cpu == target_cpu)
7933 return 0;
7934
3bd37062 7935 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
e6628d5b
MG
7936 return -EINVAL;
7937
db6cc3f4
CY
7938 /* TODO: This is not properly updating schedstats */
7939
286549dc 7940 trace_sched_move_numa(p, curr_cpu, target_cpu);
e6628d5b
MG
7941 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
7942}
0ec8aa00
PZ
7943
7944/*
7945 * Requeue a task on a given node and accurately track the number of NUMA
7946 * tasks on the runqueues
7947 */
7948void sched_setnuma(struct task_struct *p, int nid)
7949{
da0c1e65 7950 bool queued, running;
eb580751
PZ
7951 struct rq_flags rf;
7952 struct rq *rq;
0ec8aa00 7953
eb580751 7954 rq = task_rq_lock(p, &rf);
da0c1e65 7955 queued = task_on_rq_queued(p);
af0c8b2b 7956 running = task_current_donor(rq, p);
0ec8aa00 7957
da0c1e65 7958 if (queued)
1de64443 7959 dequeue_task(rq, p, DEQUEUE_SAVE);
0ec8aa00 7960 if (running)
f3cd1c4e 7961 put_prev_task(rq, p);
0ec8aa00
PZ
7962
7963 p->numa_preferred_nid = nid;
0ec8aa00 7964
da0c1e65 7965 if (queued)
7134b3e9 7966 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
a399d233 7967 if (running)
03b7fad1 7968 set_next_task(rq, p);
eb580751 7969 task_rq_unlock(rq, p, &rf);
0ec8aa00 7970}
5cc389bc 7971#endif /* CONFIG_NUMA_BALANCING */
f7b4cddc 7972
1da177e4 7973#ifdef CONFIG_HOTPLUG_CPU
054b9108 7974/*
21641bd9
NP
7975 * Invoked on the outgoing CPU in context of the CPU hotplug thread
7976 * after ensuring that there are no user space tasks left on the CPU.
7977 *
7978 * If there is a lazy mm in use on the hotplug thread, drop it and
7979 * switch to init_mm.
7980 *
7981 * The reference count on init_mm is dropped in finish_cpu().
054b9108 7982 */
21641bd9 7983static void sched_force_init_mm(void)
1da177e4 7984{
48c5ccae 7985 struct mm_struct *mm = current->active_mm;
e76bd8d9 7986
a53efe5f 7987 if (mm != &init_mm) {
21641bd9
NP
7988 mmgrab_lazy_tlb(&init_mm);
7989 local_irq_disable();
7990 current->active_mm = &init_mm;
7991 switch_mm_irqs_off(mm, &init_mm, current);
7992 local_irq_enable();
a53efe5f 7993 finish_arch_post_lock_switch();
21641bd9 7994 mmdrop_lazy_tlb(mm);
a53efe5f 7995 }
bf2c59fc
PZ
7996
7997 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
1da177e4
LT
7998}
7999
2558aacf 8000static int __balance_push_cpu_stop(void *arg)
1da177e4 8001{
2558aacf
PZ
8002 struct task_struct *p = arg;
8003 struct rq *rq = this_rq();
8004 struct rq_flags rf;
8005 int cpu;
1da177e4 8006
2558aacf
PZ
8007 raw_spin_lock_irq(&p->pi_lock);
8008 rq_lock(rq, &rf);
3f1d2a31 8009
2558aacf
PZ
8010 update_rq_clock(rq);
8011
8012 if (task_rq(p) == rq && task_on_rq_queued(p)) {
8013 cpu = select_fallback_rq(rq->cpu, p);
8014 rq = __migrate_task(rq, &rf, p, cpu);
10e7071b 8015 }
3f1d2a31 8016
2558aacf
PZ
8017 rq_unlock(rq, &rf);
8018 raw_spin_unlock_irq(&p->pi_lock);
8019
8020 put_task_struct(p);
8021
8022 return 0;
10e7071b 8023}
3f1d2a31 8024
2558aacf
PZ
8025static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
8026
48f24c4d 8027/*
2558aacf 8028 * Ensure we only run per-cpu kthreads once the CPU goes !active.
b5c44773
PZ
8029 *
8030 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
8031 * effective when the hotplug motion is down.
1da177e4 8032 */
2558aacf 8033static void balance_push(struct rq *rq)
1da177e4 8034{
2558aacf
PZ
8035 struct task_struct *push_task = rq->curr;
8036
5cb9eaa3 8037 lockdep_assert_rq_held(rq);
b5c44773 8038
ae792702
PZ
8039 /*
8040 * Ensure the thing is persistent until balance_push_set(.on = false);
8041 */
8042 rq->balance_callback = &balance_push_callback;
1da177e4 8043
b5c44773 8044 /*
868ad33b
TG
8045 * Only active while going offline and when invoked on the outgoing
8046 * CPU.
b5c44773 8047 */
868ad33b 8048 if (!cpu_dying(rq->cpu) || rq != this_rq())
b5c44773
PZ
8049 return;
8050
1da177e4 8051 /*
2558aacf
PZ
8052 * Both the cpu-hotplug and stop task are in this case and are
8053 * required to complete the hotplug process.
1da177e4 8054 */
00b89fe0 8055 if (kthread_is_per_cpu(push_task) ||
5ba2ffba
PZ
8056 is_migration_disabled(push_task)) {
8057
f2469a1f
TG
8058 /*
8059 * If this is the idle task on the outgoing CPU try to wake
8060 * up the hotplug control thread which might wait for the
8061 * last task to vanish. The rcuwait_active() check is
8062 * accurate here because the waiter is pinned on this CPU
8063 * and can't obviously be running in parallel.
3015ef4b
TG
8064 *
8065 * On RT kernels this also has to check whether there are
8066 * pinned and scheduled out tasks on the runqueue. They
8067 * need to leave the migrate disabled section first.
f2469a1f 8068 */
3015ef4b
TG
8069 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
8070 rcuwait_active(&rq->hotplug_wait)) {
5cb9eaa3 8071 raw_spin_rq_unlock(rq);
f2469a1f 8072 rcuwait_wake_up(&rq->hotplug_wait);
5cb9eaa3 8073 raw_spin_rq_lock(rq);
f2469a1f 8074 }
2558aacf 8075 return;
f2469a1f 8076 }
48f24c4d 8077
2558aacf 8078 get_task_struct(push_task);
77bd3970 8079 /*
2558aacf
PZ
8080 * Temporarily drop rq->lock such that we can wake-up the stop task.
8081 * Both preemption and IRQs are still disabled.
77bd3970 8082 */
f0498d2a 8083 preempt_disable();
5cb9eaa3 8084 raw_spin_rq_unlock(rq);
2558aacf
PZ
8085 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
8086 this_cpu_ptr(&push_work));
f0498d2a 8087 preempt_enable();
2558aacf
PZ
8088 /*
8089 * At this point need_resched() is true and we'll take the loop in
8090 * schedule(). The next pick is obviously going to be the stop task
5ba2ffba 8091 * which kthread_is_per_cpu() and will push this task away.
2558aacf 8092 */
5cb9eaa3 8093 raw_spin_rq_lock(rq);
2558aacf 8094}
77bd3970 8095
2558aacf
PZ
8096static void balance_push_set(int cpu, bool on)
8097{
8098 struct rq *rq = cpu_rq(cpu);
8099 struct rq_flags rf;
48c5ccae 8100
2558aacf 8101 rq_lock_irqsave(rq, &rf);
22f667c9
PZ
8102 if (on) {
8103 WARN_ON_ONCE(rq->balance_callback);
ae792702 8104 rq->balance_callback = &balance_push_callback;
22f667c9 8105 } else if (rq->balance_callback == &balance_push_callback) {
ae792702 8106 rq->balance_callback = NULL;
22f667c9 8107 }
2558aacf
PZ
8108 rq_unlock_irqrestore(rq, &rf);
8109}
e692ab53 8110
f2469a1f
TG
8111/*
8112 * Invoked from a CPUs hotplug control thread after the CPU has been marked
8113 * inactive. All tasks which are not per CPU kernel threads are either
8114 * pushed off this CPU now via balance_push() or placed on a different CPU
8115 * during wakeup. Wait until the CPU is quiescent.
8116 */
8117static void balance_hotplug_wait(void)
8118{
8119 struct rq *rq = this_rq();
5473e0cc 8120
3015ef4b
TG
8121 rcuwait_wait_event(&rq->hotplug_wait,
8122 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
f2469a1f
TG
8123 TASK_UNINTERRUPTIBLE);
8124}
5473e0cc 8125
2558aacf 8126#else
dce48a84 8127
2558aacf
PZ
8128static inline void balance_push(struct rq *rq)
8129{
dce48a84 8130}
dce48a84 8131
2558aacf
PZ
8132static inline void balance_push_set(int cpu, bool on)
8133{
8134}
8135
f2469a1f
TG
8136static inline void balance_hotplug_wait(void)
8137{
dce48a84 8138}
f2469a1f 8139
1da177e4
LT
8140#endif /* CONFIG_HOTPLUG_CPU */
8141
f2cb1360 8142void set_rq_online(struct rq *rq)
1f11eb6a
GH
8143{
8144 if (!rq->online) {
8145 const struct sched_class *class;
8146
c6c4927b 8147 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
8148 rq->online = 1;
8149
8150 for_each_class(class) {
8151 if (class->rq_online)
8152 class->rq_online(rq);
8153 }
8154 }
8155}
8156
f2cb1360 8157void set_rq_offline(struct rq *rq)
1f11eb6a
GH
8158{
8159 if (rq->online) {
8160 const struct sched_class *class;
8161
cab3ecae 8162 update_rq_clock(rq);
1f11eb6a
GH
8163 for_each_class(class) {
8164 if (class->rq_offline)
8165 class->rq_offline(rq);
8166 }
8167
c6c4927b 8168 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
8169 rq->online = 0;
8170 }
8171}
8172
2f027354
YY
8173static inline void sched_set_rq_online(struct rq *rq, int cpu)
8174{
8175 struct rq_flags rf;
8176
8177 rq_lock_irqsave(rq, &rf);
8178 if (rq->rd) {
8179 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8180 set_rq_online(rq);
8181 }
8182 rq_unlock_irqrestore(rq, &rf);
8183}
8184
8185static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8186{
8187 struct rq_flags rf;
8188
8189 rq_lock_irqsave(rq, &rf);
8190 if (rq->rd) {
8191 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8192 set_rq_offline(rq);
8193 }
8194 rq_unlock_irqrestore(rq, &rf);
8195}
8196
d1ccc66d
IM
8197/*
8198 * used to mark begin/end of suspend/resume:
8199 */
8200static int num_cpus_frozen;
d35be8ba 8201
1da177e4 8202/*
3a101d05
TH
8203 * Update cpusets according to cpu_active mask. If cpusets are
8204 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8205 * around partition_sched_domains().
d35be8ba
SB
8206 *
8207 * If we come here as part of a suspend/resume, don't touch cpusets because we
8208 * want to restore it back to its original state upon resume anyway.
1da177e4 8209 */
40190a78 8210static void cpuset_cpu_active(void)
e761b772 8211{
40190a78 8212 if (cpuhp_tasks_frozen) {
d35be8ba
SB
8213 /*
8214 * num_cpus_frozen tracks how many CPUs are involved in suspend
8215 * resume sequence. As long as this is not the last online
8216 * operation in the resume sequence, just build a single sched
8217 * domain, ignoring cpusets.
8218 */
2ff899e3 8219 cpuset_reset_sched_domains();
50e76632 8220 if (--num_cpus_frozen)
135fb3e1 8221 return;
d35be8ba
SB
8222 /*
8223 * This is the last CPU online operation. So fall through and
8224 * restore the original sched domains by considering the
8225 * cpuset configurations.
8226 */
50e76632 8227 cpuset_force_rebuild();
3a101d05 8228 }
30e03acd 8229 cpuset_update_active_cpus();
3a101d05 8230}
e761b772 8231
53916d5f 8232static void cpuset_cpu_inactive(unsigned int cpu)
3a101d05 8233{
40190a78 8234 if (!cpuhp_tasks_frozen) {
30e03acd 8235 cpuset_update_active_cpus();
135fb3e1 8236 } else {
d35be8ba 8237 num_cpus_frozen++;
2ff899e3 8238 cpuset_reset_sched_domains();
e761b772
MK
8239 }
8240}
e761b772 8241
31b164e2
YY
8242static inline void sched_smt_present_inc(int cpu)
8243{
8244#ifdef CONFIG_SCHED_SMT
8245 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8246 static_branch_inc_cpuslocked(&sched_smt_present);
8247#endif
8248}
8249
8250static inline void sched_smt_present_dec(int cpu)
8251{
8252#ifdef CONFIG_SCHED_SMT
8253 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8254 static_branch_dec_cpuslocked(&sched_smt_present);
8255#endif
8256}
8257
40190a78 8258int sched_cpu_activate(unsigned int cpu)
135fb3e1 8259{
7d976699 8260 struct rq *rq = cpu_rq(cpu);
7d976699 8261
22f667c9 8262 /*
b5c44773
PZ
8263 * Clear the balance_push callback and prepare to schedule
8264 * regular tasks.
22f667c9 8265 */
2558aacf
PZ
8266 balance_push_set(cpu, false);
8267
ba2591a5 8268 /*
c5511d03 8269 * When going up, increment the number of cores with SMT present.
ba2591a5 8270 */
31b164e2 8271 sched_smt_present_inc(cpu);
40190a78 8272 set_cpu_active(cpu, true);
135fb3e1 8273
40190a78 8274 if (sched_smp_initialized) {
0fb3978b 8275 sched_update_numa(cpu, true);
135fb3e1 8276 sched_domains_numa_masks_set(cpu);
40190a78 8277 cpuset_cpu_active();
e761b772 8278 }
7d976699 8279
60c27fb5
TH
8280 scx_rq_activate(rq);
8281
7d976699
TG
8282 /*
8283 * Put the rq online, if not already. This happens:
8284 *
8285 * 1) In the early boot process, because we build the real domains
d1ccc66d 8286 * after all CPUs have been brought up.
7d976699
TG
8287 *
8288 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8289 * domains.
8290 */
2f027354 8291 sched_set_rq_online(rq, cpu);
7d976699 8292
40190a78 8293 return 0;
135fb3e1
TG
8294}
8295
40190a78 8296int sched_cpu_deactivate(unsigned int cpu)
135fb3e1 8297{
120455c5 8298 struct rq *rq = cpu_rq(cpu);
135fb3e1
TG
8299 int ret;
8300
53916d5f
JL
8301 ret = dl_bw_deactivate(cpu);
8302
8303 if (ret)
8304 return ret;
8305
e0b257c3
AMB
8306 /*
8307 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8308 * load balancing when not active
8309 */
8310 nohz_balance_exit_idle(rq);
8311
40190a78 8312 set_cpu_active(cpu, false);
741ba80f
PZ
8313
8314 /*
8315 * From this point forward, this CPU will refuse to run any task that
8316 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8317 * push those tasks away until this gets cleared, see
8318 * sched_cpu_dying().
8319 */
975707f2
PZ
8320 balance_push_set(cpu, true);
8321
b2454caa 8322 /*
975707f2
PZ
8323 * We've cleared cpu_active_mask / set balance_push, wait for all
8324 * preempt-disabled and RCU users of this state to go away such that
8325 * all new such users will observe it.
b2454caa 8326 *
5ba2ffba
PZ
8327 * Specifically, we rely on ttwu to no longer target this CPU, see
8328 * ttwu_queue_cond() and is_cpu_allowed().
8329 *
402de7fc 8330 * Do sync before park smpboot threads to take care the RCU boost case.
b2454caa 8331 */
309ba859 8332 synchronize_rcu();
40190a78 8333
2f027354 8334 sched_set_rq_offline(rq, cpu);
120455c5 8335
60c27fb5
TH
8336 scx_rq_deactivate(rq);
8337
c5511d03
PZI
8338 /*
8339 * When going down, decrement the number of cores with SMT present.
8340 */
31b164e2 8341 sched_smt_present_dec(cpu);
3c474b32 8342
31b164e2 8343#ifdef CONFIG_SCHED_SMT
3c474b32 8344 sched_core_cpu_deactivate(cpu);
c5511d03
PZI
8345#endif
8346
40190a78
TG
8347 if (!sched_smp_initialized)
8348 return 0;
8349
0fb3978b 8350 sched_update_numa(cpu, false);
53916d5f 8351 cpuset_cpu_inactive(cpu);
40190a78
TG
8352 sched_domains_numa_masks_clear(cpu);
8353 return 0;
135fb3e1
TG
8354}
8355
94baf7a5
TG
8356static void sched_rq_cpu_starting(unsigned int cpu)
8357{
8358 struct rq *rq = cpu_rq(cpu);
8359
8360 rq->calc_load_update = calc_load_update;
94baf7a5
TG
8361 update_max_interval();
8362}
8363
135fb3e1
TG
8364int sched_cpu_starting(unsigned int cpu)
8365{
9edeaea1 8366 sched_core_cpu_starting(cpu);
94baf7a5 8367 sched_rq_cpu_starting(cpu);
d84b3131 8368 sched_tick_start(cpu);
135fb3e1 8369 return 0;
e761b772 8370}
e761b772 8371
f2785ddb 8372#ifdef CONFIG_HOTPLUG_CPU
1cf12e08
TG
8373
8374/*
8375 * Invoked immediately before the stopper thread is invoked to bring the
8376 * CPU down completely. At this point all per CPU kthreads except the
8377 * hotplug thread (current) and the stopper thread (inactive) have been
8378 * either parked or have been unbound from the outgoing CPU. Ensure that
8379 * any of those which might be on the way out are gone.
8380 *
8381 * If after this point a bound task is being woken on this CPU then the
8382 * responsible hotplug callback has failed to do it's job.
8383 * sched_cpu_dying() will catch it with the appropriate fireworks.
8384 */
8385int sched_cpu_wait_empty(unsigned int cpu)
8386{
8387 balance_hotplug_wait();
21641bd9 8388 sched_force_init_mm();
1cf12e08
TG
8389 return 0;
8390}
8391
8392/*
8393 * Since this CPU is going 'away' for a while, fold any nr_active delta we
8394 * might have. Called from the CPU stopper task after ensuring that the
8395 * stopper is the last running task on the CPU, so nr_active count is
402de7fc 8396 * stable. We need to take the tear-down thread which is calling this into
1cf12e08
TG
8397 * account, so we hand in adjust = 1 to the load calculation.
8398 *
8399 * Also see the comment "Global load-average calculations".
8400 */
8401static void calc_load_migrate(struct rq *rq)
8402{
8403 long delta = calc_load_fold_active(rq, 1);
8404
8405 if (delta)
8406 atomic_long_add(delta, &calc_load_tasks);
8407}
8408
36c6e17b
VS
8409static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8410{
8411 struct task_struct *g, *p;
8412 int cpu = cpu_of(rq);
8413
5cb9eaa3 8414 lockdep_assert_rq_held(rq);
36c6e17b
VS
8415
8416 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8417 for_each_process_thread(g, p) {
8418 if (task_cpu(p) != cpu)
8419 continue;
8420
8421 if (!task_on_rq_queued(p))
8422 continue;
8423
8424 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8425 }
8426}
8427
f2785ddb
TG
8428int sched_cpu_dying(unsigned int cpu)
8429{
8430 struct rq *rq = cpu_rq(cpu);
8a8c69c3 8431 struct rq_flags rf;
f2785ddb
TG
8432
8433 /* Handle pending wakeups and then migrate everything off */
d84b3131 8434 sched_tick_stop(cpu);
8a8c69c3
PZ
8435
8436 rq_lock_irqsave(rq, &rf);
36c6e17b
VS
8437 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8438 WARN(true, "Dying CPU not properly vacated!");
8439 dump_rq_tasks(rq, KERN_WARNING);
8440 }
8a8c69c3
PZ
8441 rq_unlock_irqrestore(rq, &rf);
8442
f2785ddb
TG
8443 calc_load_migrate(rq);
8444 update_max_interval();
e5ef27d0 8445 hrtick_clear(rq);
3c474b32 8446 sched_core_cpu_dying(cpu);
f2785ddb
TG
8447 return 0;
8448}
8449#endif
8450
1da177e4
LT
8451void __init sched_init_smp(void)
8452{
0fb3978b 8453 sched_init_numa(NUMA_NO_NODE);
cb83b629 8454
6acce3ef
PZ
8455 /*
8456 * There's no userspace yet to cause hotplug operations; hence all the
d1ccc66d 8457 * CPU masks are stable and all blatant races in the below code cannot
b5a4e2bb 8458 * happen.
6acce3ef 8459 */
56209334 8460 sched_domains_mutex_lock();
8d5dc512 8461 sched_init_domains(cpu_active_mask);
56209334 8462 sched_domains_mutex_unlock();
e761b772 8463
5c1e1767 8464 /* Move init over to a non-isolated CPU */
04d4e665 8465 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
5c1e1767 8466 BUG();
15faafc6 8467 current->flags &= ~PF_NO_SETAFFINITY;
19978ca6 8468 sched_init_granularity();
4212823f 8469
0e3900e6 8470 init_sched_rt_class();
1baca4ce 8471 init_sched_dl_class();
1b568f0a 8472
e26fbffd 8473 sched_smp_initialized = true;
1da177e4 8474}
e26fbffd
TG
8475
8476static int __init migration_init(void)
8477{
77a5352b 8478 sched_cpu_starting(smp_processor_id());
e26fbffd 8479 return 0;
1da177e4 8480}
e26fbffd
TG
8481early_initcall(migration_init);
8482
1da177e4
LT
8483#else
8484void __init sched_init_smp(void)
8485{
19978ca6 8486 sched_init_granularity();
1da177e4
LT
8487}
8488#endif /* CONFIG_SMP */
8489
8490int in_sched_functions(unsigned long addr)
8491{
1da177e4
LT
8492 return in_lock_functions(addr) ||
8493 (addr >= (unsigned long)__sched_text_start
8494 && addr < (unsigned long)__sched_text_end);
8495}
8496
029632fb 8497#ifdef CONFIG_CGROUP_SCHED
27b4b931
LZ
8498/*
8499 * Default task group.
8500 * Every task in system belongs to this group at bootup.
8501 */
029632fb 8502struct task_group root_task_group;
35cf4e50 8503LIST_HEAD(task_groups);
b0367629
WL
8504
8505/* Cacheline aligned slab cache for task_group */
68279f9c 8506static struct kmem_cache *task_group_cache __ro_after_init;
052f1dc7 8507#endif
6f505b16 8508
1da177e4
LT
8509void __init sched_init(void)
8510{
a1dc0446 8511 unsigned long ptr = 0;
55627e3c 8512 int i;
434d53b0 8513
c3a340f7 8514 /* Make sure the linker didn't screw up */
c3a340f7 8515#ifdef CONFIG_SMP
df268382 8516 BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
c3a340f7 8517#endif
df268382
TH
8518 BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8519 BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8520 BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
f0e1a064
TH
8521#ifdef CONFIG_SCHED_CLASS_EXT
8522 BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8523 BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
c3a340f7
SRV
8524#endif
8525
5822a454 8526 wait_bit_init();
9dcb8b68 8527
434d53b0 8528#ifdef CONFIG_FAIR_GROUP_SCHED
a1dc0446 8529 ptr += 2 * nr_cpu_ids * sizeof(void **);
434d53b0
MT
8530#endif
8531#ifdef CONFIG_RT_GROUP_SCHED
a1dc0446 8532 ptr += 2 * nr_cpu_ids * sizeof(void **);
434d53b0 8533#endif
a1dc0446
QC
8534 if (ptr) {
8535 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
434d53b0
MT
8536
8537#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 8538 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
8539 ptr += nr_cpu_ids * sizeof(void **);
8540
07e06b01 8541 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 8542 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 8543
b1d1779e 8544 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
c98c1827 8545 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
6d6bc0ad 8546#endif /* CONFIG_FAIR_GROUP_SCHED */
81951366 8547#ifdef CONFIG_EXT_GROUP_SCHED
33796b91 8548 scx_tg_init(&root_task_group);
81951366 8549#endif /* CONFIG_EXT_GROUP_SCHED */
434d53b0 8550#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 8551 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
8552 ptr += nr_cpu_ids * sizeof(void **);
8553
07e06b01 8554 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
8555 ptr += nr_cpu_ids * sizeof(void **);
8556
6d6bc0ad 8557#endif /* CONFIG_RT_GROUP_SCHED */
b74e6278 8558 }
dd41f596 8559
57d885fe
GH
8560#ifdef CONFIG_SMP
8561 init_defrootdomain();
8562#endif
8563
d0b27fa7 8564#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 8565 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 8566 global_rt_period(), global_rt_runtime());
6d6bc0ad 8567#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 8568
7c941438 8569#ifdef CONFIG_CGROUP_SCHED
b0367629
WL
8570 task_group_cache = KMEM_CACHE(task_group, 0);
8571
07e06b01
YZ
8572 list_add(&root_task_group.list, &task_groups);
8573 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 8574 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 8575 autogroup_init(&init_task);
7c941438 8576#endif /* CONFIG_CGROUP_SCHED */
6f505b16 8577
0a945022 8578 for_each_possible_cpu(i) {
70b97a7f 8579 struct rq *rq;
1da177e4
LT
8580
8581 rq = cpu_rq(i);
5cb9eaa3 8582 raw_spin_lock_init(&rq->__lock);
7897986b 8583 rq->nr_running = 0;
dce48a84
TG
8584 rq->calc_load_active = 0;
8585 rq->calc_load_update = jiffies + LOAD_FREQ;
acb5a9ba 8586 init_cfs_rq(&rq->cfs);
07c54f7a
AV
8587 init_rt_rq(&rq->rt);
8588 init_dl_rq(&rq->dl);
dd41f596 8589#ifdef CONFIG_FAIR_GROUP_SCHED
6f505b16 8590 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
9c2791f9 8591 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
354d60c2 8592 /*
d1ccc66d 8593 * How much CPU bandwidth does root_task_group get?
354d60c2 8594 *
402de7fc 8595 * In case of task-groups formed through the cgroup filesystem, it
d1ccc66d
IM
8596 * gets 100% of the CPU resources in the system. This overall
8597 * system CPU resource is divided among the tasks of
07e06b01 8598 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
8599 * based on each entity's (task or task-group's) weight
8600 * (se->load.weight).
8601 *
07e06b01 8602 * In other words, if root_task_group has 10 tasks of weight
354d60c2 8603 * 1024) and two child groups A0 and A1 (of weight 1024 each),
d1ccc66d 8604 * then A0's share of the CPU resource is:
354d60c2 8605 *
0d905bca 8606 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 8607 *
07e06b01
YZ
8608 * We achieve this by letting root_task_group's tasks sit
8609 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 8610 */
07e06b01 8611 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
8612#endif /* CONFIG_FAIR_GROUP_SCHED */
8613
052f1dc7 8614#ifdef CONFIG_RT_GROUP_SCHED
5f6bd380
PZ
8615 /*
8616 * This is required for init cpu because rt.c:__enable_runtime()
8617 * starts working after scheduler_running, which is not the case
8618 * yet.
8619 */
8620 rq->rt.rt_runtime = global_rt_runtime();
07e06b01 8621 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 8622#endif
1da177e4 8623#ifdef CONFIG_SMP
41c7ce9a 8624 rq->sd = NULL;
57d885fe 8625 rq->rd = NULL;
7bc26384 8626 rq->cpu_capacity = SCHED_CAPACITY_SCALE;
b5c44773 8627 rq->balance_callback = &balance_push_callback;
1da177e4 8628 rq->active_balance = 0;
dd41f596 8629 rq->next_balance = jiffies;
1da177e4 8630 rq->push_cpu = 0;
0a2966b4 8631 rq->cpu = i;
1f11eb6a 8632 rq->online = 0;
eae0c9df
MG
8633 rq->idle_stamp = 0;
8634 rq->avg_idle = 2*sysctl_sched_migration_cost;
9bd721c5 8635 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
367456c7
PZ
8636
8637 INIT_LIST_HEAD(&rq->cfs_tasks);
8638
dc938520 8639 rq_attach_root(rq, &def_root_domain);
3451d024 8640#ifdef CONFIG_NO_HZ_COMMON
e022e0d3 8641 rq->last_blocked_load_update_tick = jiffies;
a22e47a4 8642 atomic_set(&rq->nohz_flags, 0);
90b5363a 8643
545b8c8d 8644 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
83cd4fe2 8645#endif
f2469a1f
TG
8646#ifdef CONFIG_HOTPLUG_CPU
8647 rcuwait_init(&rq->hotplug_wait);
83cd4fe2 8648#endif
9fd81dd5 8649#endif /* CONFIG_SMP */
77a021be 8650 hrtick_rq_init(rq);
1da177e4 8651 atomic_set(&rq->nr_iowait, 0);
557a6bfc 8652 fair_server_init(rq);
9edeaea1
PZ
8653
8654#ifdef CONFIG_SCHED_CORE
3c474b32 8655 rq->core = rq;
539f6512 8656 rq->core_pick = NULL;
bd9bbc96 8657 rq->core_dl_server = NULL;
9edeaea1 8658 rq->core_enabled = 0;
539f6512 8659 rq->core_tree = RB_ROOT;
4feee7d1
JD
8660 rq->core_forceidle_count = 0;
8661 rq->core_forceidle_occupation = 0;
8662 rq->core_forceidle_start = 0;
539f6512
PZ
8663
8664 rq->core_cookie = 0UL;
9edeaea1 8665#endif
da019032 8666 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
1da177e4
LT
8667 }
8668
b1e82065 8669 set_load_weight(&init_task, false);
857b158d 8670 init_task.se.slice = sysctl_sched_base_slice,
b50f60ce 8671
1da177e4
LT
8672 /*
8673 * The boot idle thread does lazy MMU switching as well:
8674 */
aa464ba9 8675 mmgrab_lazy_tlb(&init_mm);
1da177e4
LT
8676 enter_lazy_tlb(&init_mm, current);
8677
40966e31
EB
8678 /*
8679 * The idle task doesn't need the kthread struct to function, but it
8680 * is dressed up as a per-CPU kthread and thus needs to play the part
8681 * if we want to avoid special-casing it in code that deals with per-CPU
8682 * kthreads.
8683 */
dd621ee0 8684 WARN_ON(!set_kthread_struct(current));
40966e31 8685
1da177e4
LT
8686 /*
8687 * Make us the idle thread. Technically, schedule() should not be
8688 * called from this thread, however somewhere below it might be,
8689 * but because we are the idle thread, we just pick up running again
8690 * when this runqueue becomes "idle".
8691 */
b23decf8 8692 __sched_fork(0, current);
1da177e4 8693 init_idle(current, smp_processor_id());
dce48a84
TG
8694
8695 calc_load_update = jiffies + LOAD_FREQ;
8696
bf4d83f6 8697#ifdef CONFIG_SMP
29d5e047 8698 idle_thread_set_boot_cpu();
b5c44773 8699 balance_push_set(smp_processor_id(), false);
029632fb
PZ
8700#endif
8701 init_sched_fair_class();
a7a9fc54 8702 init_sched_ext_class();
6a7b3dc3 8703
eb414681
JW
8704 psi_init();
8705
69842cba
PB
8706 init_uclamp();
8707
c597bfdd
FW
8708 preempt_dynamic_init();
8709
6892b75e 8710 scheduler_running = 1;
1da177e4
LT
8711}
8712
d902db1e 8713#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2 8714
42a38756 8715void __might_sleep(const char *file, int line)
1da177e4 8716{
d6c23bb3 8717 unsigned int state = get_current_state();
8eb23b9f
PZ
8718 /*
8719 * Blocking primitives will set (and therefore destroy) current->state,
8720 * since we will exit with TASK_RUNNING make sure we enter with it,
8721 * otherwise we will destroy state.
8722 */
d6c23bb3 8723 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8eb23b9f 8724 "do not call blocking ops when !TASK_RUNNING; "
d6c23bb3 8725 "state=%x set at [<%p>] %pS\n", state,
8eb23b9f 8726 (void *)current->task_state_change,
00845eb9 8727 (void *)current->task_state_change);
8eb23b9f 8728
42a38756 8729 __might_resched(file, line, 0);
3427445a
PZ
8730}
8731EXPORT_SYMBOL(__might_sleep);
8732
8d713b69
TG
8733static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8734{
8735 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8736 return;
8737
8738 if (preempt_count() == preempt_offset)
8739 return;
8740
8741 pr_err("Preemption disabled at:");
8742 print_ip_sym(KERN_ERR, ip);
8743}
8744
50e081b9
TG
8745static inline bool resched_offsets_ok(unsigned int offsets)
8746{
8747 unsigned int nested = preempt_count();
8748
8749 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8750
8751 return nested == offsets;
8752}
8753
8754void __might_resched(const char *file, int line, unsigned int offsets)
1da177e4 8755{
d1ccc66d
IM
8756 /* Ratelimiting timestamp: */
8757 static unsigned long prev_jiffy;
8758
d1c6d149 8759 unsigned long preempt_disable_ip;
1da177e4 8760
d1ccc66d
IM
8761 /* WARN_ON_ONCE() by default, no rate limit required: */
8762 rcu_sleep_check();
8763
50e081b9 8764 if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
312364f3 8765 !is_idle_task(current) && !current->non_block_count) ||
1c3c5eab
TG
8766 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8767 oops_in_progress)
aef745fc 8768 return;
1c3c5eab 8769
aef745fc
IM
8770 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8771 return;
8772 prev_jiffy = jiffies;
8773
d1ccc66d 8774 /* Save this before calling printk(), since that will clobber it: */
d1c6d149
VN
8775 preempt_disable_ip = get_preempt_disable_ip(current);
8776
a45ed302
TG
8777 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8778 file, line);
8779 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8780 in_atomic(), irqs_disabled(), current->non_block_count,
8781 current->pid, current->comm);
8d713b69 8782 pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
50e081b9 8783 offsets & MIGHT_RESCHED_PREEMPT_MASK);
8d713b69
TG
8784
8785 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
50e081b9
TG
8786 pr_err("RCU nest depth: %d, expected: %u\n",
8787 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8d713b69 8788 }
aef745fc 8789
a8b686b3 8790 if (task_stack_end_corrupted(current))
a45ed302 8791 pr_emerg("Thread overran stack, or stack corrupted\n");
a8b686b3 8792
aef745fc
IM
8793 debug_show_held_locks(current);
8794 if (irqs_disabled())
8795 print_irqtrace_events(current);
8d713b69 8796
50e081b9
TG
8797 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8798 preempt_disable_ip);
8d713b69 8799
aef745fc 8800 dump_stack();
f0b22e39 8801 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
1da177e4 8802}
874f670e 8803EXPORT_SYMBOL(__might_resched);
568f1967
PZ
8804
8805void __cant_sleep(const char *file, int line, int preempt_offset)
8806{
8807 static unsigned long prev_jiffy;
8808
8809 if (irqs_disabled())
8810 return;
8811
8812 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8813 return;
8814
8815 if (preempt_count() > preempt_offset)
8816 return;
8817
8818 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8819 return;
8820 prev_jiffy = jiffies;
8821
8822 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8823 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8824 in_atomic(), irqs_disabled(),
8825 current->pid, current->comm);
8826
8827 debug_show_held_locks(current);
8828 dump_stack();
8829 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8830}
8831EXPORT_SYMBOL_GPL(__cant_sleep);
74d862b6
TG
8832
8833#ifdef CONFIG_SMP
8834void __cant_migrate(const char *file, int line)
8835{
8836 static unsigned long prev_jiffy;
8837
8838 if (irqs_disabled())
8839 return;
8840
8841 if (is_migration_disabled(current))
8842 return;
8843
8844 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8845 return;
8846
8847 if (preempt_count() > 0)
8848 return;
8849
8850 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8851 return;
8852 prev_jiffy = jiffies;
8853
8854 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8855 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8856 in_atomic(), irqs_disabled(), is_migration_disabled(current),
8857 current->pid, current->comm);
8858
8859 debug_show_held_locks(current);
8860 dump_stack();
8861 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8862}
8863EXPORT_SYMBOL_GPL(__cant_migrate);
8864#endif
1da177e4
LT
8865#endif
8866
8867#ifdef CONFIG_MAGIC_SYSRQ
dbc7f069 8868void normalize_rt_tasks(void)
3a5e4dc1 8869{
dbc7f069 8870 struct task_struct *g, *p;
d50dde5a
DF
8871 struct sched_attr attr = {
8872 .sched_policy = SCHED_NORMAL,
8873 };
1da177e4 8874
3472eaa1 8875 read_lock(&tasklist_lock);
5d07f420 8876 for_each_process_thread(g, p) {
178be793
IM
8877 /*
8878 * Only normalize user tasks:
8879 */
3472eaa1 8880 if (p->flags & PF_KTHREAD)
178be793
IM
8881 continue;
8882
4fa8d299 8883 p->se.exec_start = 0;
ceeadb83
YS
8884 schedstat_set(p->stats.wait_start, 0);
8885 schedstat_set(p->stats.sleep_start, 0);
8886 schedstat_set(p->stats.block_start, 0);
dd41f596 8887
ae04f69d 8888 if (!rt_or_dl_task(p)) {
dd41f596
IM
8889 /*
8890 * Renice negative nice level userspace
8891 * tasks back to 0:
8892 */
3472eaa1 8893 if (task_nice(p) < 0)
dd41f596 8894 set_user_nice(p, 0);
1da177e4 8895 continue;
dd41f596 8896 }
1da177e4 8897
dbc7f069 8898 __sched_setscheduler(p, &attr, false, false);
5d07f420 8899 }
3472eaa1 8900 read_unlock(&tasklist_lock);
1da177e4
LT
8901}
8902
8903#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 8904
cf8e8658 8905#if defined(CONFIG_KGDB_KDB)
1df5c10a 8906/*
402de7fc 8907 * These functions are only useful for KDB.
1df5c10a
LT
8908 *
8909 * They can only be called when the whole system has been
8910 * stopped - every CPU needs to be quiescent, and no scheduling
8911 * activity can take place. Using them for anything else would
8912 * be a serious bug, and as a result, they aren't even visible
8913 * under any other configuration.
8914 */
8915
8916/**
d1ccc66d 8917 * curr_task - return the current task for a given CPU.
1df5c10a
LT
8918 * @cpu: the processor in question.
8919 *
8920 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
e69f6186
YB
8921 *
8922 * Return: The current task for @cpu.
1df5c10a 8923 */
36c8b586 8924struct task_struct *curr_task(int cpu)
1df5c10a
LT
8925{
8926 return cpu_curr(cpu);
8927}
8928
cf8e8658 8929#endif /* defined(CONFIG_KGDB_KDB) */
29f59db3 8930
7c941438 8931#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
8932/* task_group_lock serializes the addition/removal of task groups */
8933static DEFINE_SPINLOCK(task_group_lock);
8934
2480c093
PB
8935static inline void alloc_uclamp_sched_group(struct task_group *tg,
8936 struct task_group *parent)
8937{
8938#ifdef CONFIG_UCLAMP_TASK_GROUP
0413d7f3 8939 enum uclamp_id clamp_id;
2480c093
PB
8940
8941 for_each_clamp_id(clamp_id) {
8942 uclamp_se_set(&tg->uclamp_req[clamp_id],
8943 uclamp_none(clamp_id), false);
0b60ba2d 8944 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
2480c093
PB
8945 }
8946#endif
8947}
8948
2f5177f0 8949static void sched_free_group(struct task_group *tg)
bccbe08a
PZ
8950{
8951 free_fair_sched_group(tg);
8952 free_rt_sched_group(tg);
e9aa1dd1 8953 autogroup_free(tg);
b0367629 8954 kmem_cache_free(task_group_cache, tg);
bccbe08a
PZ
8955}
8956
b027789e
MK
8957static void sched_free_group_rcu(struct rcu_head *rcu)
8958{
8959 sched_free_group(container_of(rcu, struct task_group, rcu));
8960}
8961
8962static void sched_unregister_group(struct task_group *tg)
8963{
8964 unregister_fair_sched_group(tg);
8965 unregister_rt_sched_group(tg);
8966 /*
8967 * We have to wait for yet another RCU grace period to expire, as
8968 * print_cfs_stats() might run concurrently.
8969 */
8970 call_rcu(&tg->rcu, sched_free_group_rcu);
8971}
8972
bccbe08a 8973/* allocate runqueue etc for a new task group */
ec7dc8ac 8974struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
8975{
8976 struct task_group *tg;
bccbe08a 8977
b0367629 8978 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
bccbe08a
PZ
8979 if (!tg)
8980 return ERR_PTR(-ENOMEM);
8981
ec7dc8ac 8982 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
8983 goto err;
8984
ec7dc8ac 8985 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
8986 goto err;
8987
33796b91 8988 scx_tg_init(tg);
2480c093
PB
8989 alloc_uclamp_sched_group(tg, parent);
8990
ace783b9
LZ
8991 return tg;
8992
8993err:
2f5177f0 8994 sched_free_group(tg);
ace783b9
LZ
8995 return ERR_PTR(-ENOMEM);
8996}
8997
8998void sched_online_group(struct task_group *tg, struct task_group *parent)
8999{
9000 unsigned long flags;
9001
8ed36996 9002 spin_lock_irqsave(&task_group_lock, flags);
61d3164f 9003 list_add_tail_rcu(&tg->list, &task_groups);
f473aa5e 9004
d1ccc66d
IM
9005 /* Root should already exist: */
9006 WARN_ON(!parent);
f473aa5e
PZ
9007
9008 tg->parent = parent;
f473aa5e 9009 INIT_LIST_HEAD(&tg->children);
09f2724a 9010 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 9011 spin_unlock_irqrestore(&task_group_lock, flags);
8663e24d
PZ
9012
9013 online_fair_sched_group(tg);
29f59db3
SV
9014}
9015
402de7fc 9016/* RCU callback to free various structures associated with a task group */
b027789e 9017static void sched_unregister_group_rcu(struct rcu_head *rhp)
29f59db3 9018{
d1ccc66d 9019 /* Now it should be safe to free those cfs_rqs: */
b027789e 9020 sched_unregister_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
9021}
9022
4cf86d77 9023void sched_destroy_group(struct task_group *tg)
ace783b9 9024{
d1ccc66d 9025 /* Wait for possible concurrent references to cfs_rqs complete: */
b027789e 9026 call_rcu(&tg->rcu, sched_unregister_group_rcu);
ace783b9
LZ
9027}
9028
b027789e 9029void sched_release_group(struct task_group *tg)
29f59db3 9030{
8ed36996 9031 unsigned long flags;
29f59db3 9032
b027789e
MK
9033 /*
9034 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
9035 * sched_cfs_period_timer()).
9036 *
9037 * For this to be effective, we have to wait for all pending users of
9038 * this task group to leave their RCU critical section to ensure no new
9039 * user will see our dying task group any more. Specifically ensure
9040 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
9041 *
9042 * We therefore defer calling unregister_fair_sched_group() to
9043 * sched_unregister_group() which is guarantied to get called only after the
9044 * current RCU grace period has expired.
9045 */
3d4b47b4 9046 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 9047 list_del_rcu(&tg->list);
f473aa5e 9048 list_del_rcu(&tg->siblings);
8ed36996 9049 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
9050}
9051
76f970ce 9052static void sched_change_group(struct task_struct *tsk)
29f59db3 9053{
8323f26c 9054 struct task_group *tg;
29f59db3 9055
f7b8a47d
KT
9056 /*
9057 * All callers are synchronized by task_rq_lock(); we do not use RCU
9058 * which is pointless here. Thus, we pass "true" to task_css_check()
9059 * to prevent lockdep warnings.
9060 */
9061 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8323f26c
PZ
9062 struct task_group, css);
9063 tg = autogroup_task_group(tsk, tg);
76f970ce 9064 tsk->sched_task_group = tg;
8323f26c 9065
810b3817 9066#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b 9067 if (tsk->sched_class->task_change_group)
39c42611 9068 tsk->sched_class->task_change_group(tsk);
b2b5ce02 9069 else
810b3817 9070#endif
b2b5ce02 9071 set_task_rq(tsk, task_cpu(tsk));
ea86cb4b
VG
9072}
9073
9074/*
9075 * Change task's runqueue when it moves between groups.
9076 *
9077 * The caller of this function should have put the task in its new group by
9078 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9079 * its new group.
9080 */
d6f3e7d5 9081void sched_move_task(struct task_struct *tsk, bool for_autogroup)
ea86cb4b 9082{
7a57f32a
PZ
9083 int queued, running, queue_flags =
9084 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
ea86cb4b
VG
9085 struct rq *rq;
9086
fa614b4f
PZ
9087 CLASS(task_rq_lock, rq_guard)(tsk);
9088 rq = rq_guard.rq;
9089
1b1d6225 9090 update_rq_clock(rq);
ea86cb4b 9091
af0c8b2b 9092 running = task_current_donor(rq, tsk);
ea86cb4b
VG
9093 queued = task_on_rq_queued(tsk);
9094
9095 if (queued)
7a57f32a 9096 dequeue_task(rq, tsk, queue_flags);
bb3bac2c 9097 if (running)
ea86cb4b
VG
9098 put_prev_task(rq, tsk);
9099
76f970ce 9100 sched_change_group(tsk);
d6f3e7d5
TH
9101 if (!for_autogroup)
9102 scx_cgroup_move_task(tsk);
810b3817 9103
da0c1e65 9104 if (queued)
7a57f32a 9105 enqueue_task(rq, tsk, queue_flags);
2a4b03ff 9106 if (running) {
03b7fad1 9107 set_next_task(rq, tsk);
2a4b03ff
VG
9108 /*
9109 * After changing group, the running task may have joined a
9110 * throttled one but it's still the running task. Trigger a
9111 * resched to make sure that task can still run.
9112 */
9113 resched_curr(rq);
9114 }
29f59db3 9115}
68318b8e 9116
eb95419b
TH
9117static struct cgroup_subsys_state *
9118cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
68318b8e 9119{
eb95419b
TH
9120 struct task_group *parent = css_tg(parent_css);
9121 struct task_group *tg;
68318b8e 9122
eb95419b 9123 if (!parent) {
68318b8e 9124 /* This is early initialization for the top cgroup */
07e06b01 9125 return &root_task_group.css;
68318b8e
SV
9126 }
9127
ec7dc8ac 9128 tg = sched_create_group(parent);
68318b8e
SV
9129 if (IS_ERR(tg))
9130 return ERR_PTR(-ENOMEM);
9131
68318b8e
SV
9132 return &tg->css;
9133}
9134
96b77745
KK
9135/* Expose task group only after completing cgroup initialization */
9136static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
9137{
9138 struct task_group *tg = css_tg(css);
9139 struct task_group *parent = css_tg(css->parent);
81951366
TH
9140 int ret;
9141
9142 ret = scx_tg_online(tg);
9143 if (ret)
9144 return ret;
96b77745
KK
9145
9146 if (parent)
9147 sched_online_group(tg, parent);
7226017a
QY
9148
9149#ifdef CONFIG_UCLAMP_TASK_GROUP
9150 /* Propagate the effective uclamp value for the new group */
0e34600a
PZ
9151 guard(mutex)(&uclamp_mutex);
9152 guard(rcu)();
7226017a
QY
9153 cpu_util_update_eff(css);
9154#endif
9155
96b77745
KK
9156 return 0;
9157}
9158
81951366
TH
9159static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9160{
9161 struct task_group *tg = css_tg(css);
9162
9163 scx_tg_offline(tg);
9164}
9165
2f5177f0 9166static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
ace783b9 9167{
eb95419b 9168 struct task_group *tg = css_tg(css);
ace783b9 9169
b027789e 9170 sched_release_group(tg);
ace783b9
LZ
9171}
9172
eb95419b 9173static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
68318b8e 9174{
eb95419b 9175 struct task_group *tg = css_tg(css);
68318b8e 9176
2f5177f0
PZ
9177 /*
9178 * Relies on the RCU grace period between css_released() and this.
9179 */
b027789e 9180 sched_unregister_group(tg);
ace783b9
LZ
9181}
9182
1f7dd3e5 9183static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
68318b8e 9184{
81951366 9185#ifdef CONFIG_RT_GROUP_SCHED
bb9d97b6 9186 struct task_struct *task;
1f7dd3e5 9187 struct cgroup_subsys_state *css;
bb9d97b6 9188
277e0909
MK
9189 if (!rt_group_sched_enabled())
9190 goto scx_check;
9191
1f7dd3e5 9192 cgroup_taskset_for_each(task, css, tset) {
eb95419b 9193 if (!sched_rt_can_attach(css_tg(css), task))
bb9d97b6 9194 return -EINVAL;
bb9d97b6 9195 }
277e0909
MK
9196scx_check:
9197#endif /* CONFIG_RT_GROUP_SCHED */
81951366
TH
9198 return scx_cgroup_can_attach(tset);
9199}
68318b8e 9200
1f7dd3e5 9201static void cpu_cgroup_attach(struct cgroup_taskset *tset)
68318b8e 9202{
bb9d97b6 9203 struct task_struct *task;
1f7dd3e5 9204 struct cgroup_subsys_state *css;
bb9d97b6 9205
1f7dd3e5 9206 cgroup_taskset_for_each(task, css, tset)
d6f3e7d5 9207 sched_move_task(task, false);
81951366
TH
9208
9209 scx_cgroup_finish_attach();
9210}
9211
9212static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9213{
9214 scx_cgroup_cancel_attach(tset);
68318b8e
SV
9215}
9216
2480c093 9217#ifdef CONFIG_UCLAMP_TASK_GROUP
0b60ba2d
PB
9218static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9219{
9220 struct cgroup_subsys_state *top_css = css;
9221 struct uclamp_se *uc_parent = NULL;
9222 struct uclamp_se *uc_se = NULL;
9223 unsigned int eff[UCLAMP_CNT];
0413d7f3 9224 enum uclamp_id clamp_id;
0b60ba2d
PB
9225 unsigned int clamps;
9226
93b73858 9227 lockdep_assert_held(&uclamp_mutex);
f7d2728c 9228 WARN_ON_ONCE(!rcu_read_lock_held());
93b73858 9229
0b60ba2d
PB
9230 css_for_each_descendant_pre(css, top_css) {
9231 uc_parent = css_tg(css)->parent
9232 ? css_tg(css)->parent->uclamp : NULL;
9233
9234 for_each_clamp_id(clamp_id) {
9235 /* Assume effective clamps matches requested clamps */
9236 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9237 /* Cap effective clamps with parent's effective clamps */
9238 if (uc_parent &&
9239 eff[clamp_id] > uc_parent[clamp_id].value) {
9240 eff[clamp_id] = uc_parent[clamp_id].value;
9241 }
9242 }
9243 /* Ensure protection is always capped by limit */
9244 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9245
9246 /* Propagate most restrictive effective clamps */
9247 clamps = 0x0;
9248 uc_se = css_tg(css)->uclamp;
9249 for_each_clamp_id(clamp_id) {
9250 if (eff[clamp_id] == uc_se[clamp_id].value)
9251 continue;
9252 uc_se[clamp_id].value = eff[clamp_id];
9253 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9254 clamps |= (0x1 << clamp_id);
9255 }
babbe170 9256 if (!clamps) {
0b60ba2d 9257 css = css_rightmost_descendant(css);
babbe170
PB
9258 continue;
9259 }
9260
9261 /* Immediately update descendants RUNNABLE tasks */
0213b708 9262 uclamp_update_active_tasks(css);
0b60ba2d
PB
9263 }
9264}
2480c093
PB
9265
9266/*
9267 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9268 * C expression. Since there is no way to convert a macro argument (N) into a
9269 * character constant, use two levels of macros.
9270 */
9271#define _POW10(exp) ((unsigned int)1e##exp)
9272#define POW10(exp) _POW10(exp)
9273
9274struct uclamp_request {
9275#define UCLAMP_PERCENT_SHIFT 2
9276#define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
9277 s64 percent;
9278 u64 util;
9279 int ret;
9280};
9281
9282static inline struct uclamp_request
9283capacity_from_percent(char *buf)
9284{
9285 struct uclamp_request req = {
9286 .percent = UCLAMP_PERCENT_SCALE,
9287 .util = SCHED_CAPACITY_SCALE,
9288 .ret = 0,
9289 };
9290
9291 buf = strim(buf);
9292 if (strcmp(buf, "max")) {
9293 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9294 &req.percent);
9295 if (req.ret)
9296 return req;
b562d140 9297 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
2480c093
PB
9298 req.ret = -ERANGE;
9299 return req;
9300 }
9301
9302 req.util = req.percent << SCHED_CAPACITY_SHIFT;
9303 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9304 }
9305
9306 return req;
9307}
9308
9309static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9310 size_t nbytes, loff_t off,
9311 enum uclamp_id clamp_id)
9312{
9313 struct uclamp_request req;
9314 struct task_group *tg;
9315
9316 req = capacity_from_percent(buf);
9317 if (req.ret)
9318 return req.ret;
9319
4bc45824 9320 sched_uclamp_enable();
46609ce2 9321
0e34600a
PZ
9322 guard(mutex)(&uclamp_mutex);
9323 guard(rcu)();
2480c093
PB
9324
9325 tg = css_tg(of_css(of));
9326 if (tg->uclamp_req[clamp_id].value != req.util)
9327 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9328
9329 /*
9330 * Because of not recoverable conversion rounding we keep track of the
9331 * exact requested value
9332 */
9333 tg->uclamp_pct[clamp_id] = req.percent;
9334
0b60ba2d
PB
9335 /* Update effective clamps to track the most restrictive value */
9336 cpu_util_update_eff(of_css(of));
9337
2480c093
PB
9338 return nbytes;
9339}
9340
9341static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9342 char *buf, size_t nbytes,
9343 loff_t off)
9344{
9345 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9346}
9347
9348static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9349 char *buf, size_t nbytes,
9350 loff_t off)
9351{
9352 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9353}
9354
9355static inline void cpu_uclamp_print(struct seq_file *sf,
9356 enum uclamp_id clamp_id)
9357{
9358 struct task_group *tg;
9359 u64 util_clamp;
9360 u64 percent;
9361 u32 rem;
9362
0e34600a
PZ
9363 scoped_guard (rcu) {
9364 tg = css_tg(seq_css(sf));
9365 util_clamp = tg->uclamp_req[clamp_id].value;
9366 }
2480c093
PB
9367
9368 if (util_clamp == SCHED_CAPACITY_SCALE) {
9369 seq_puts(sf, "max\n");
9370 return;
9371 }
9372
9373 percent = tg->uclamp_pct[clamp_id];
9374 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9375 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9376}
9377
9378static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9379{
9380 cpu_uclamp_print(sf, UCLAMP_MIN);
9381 return 0;
9382}
9383
9384static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9385{
9386 cpu_uclamp_print(sf, UCLAMP_MAX);
9387 return 0;
9388}
9389#endif /* CONFIG_UCLAMP_TASK_GROUP */
9390
e179e80c 9391#ifdef CONFIG_GROUP_SCHED_WEIGHT
41082c1d
TH
9392static unsigned long tg_weight(struct task_group *tg)
9393{
052f1dc7 9394#ifdef CONFIG_FAIR_GROUP_SCHED
41082c1d 9395 return scale_load_down(tg->shares);
81951366
TH
9396#else
9397 return sched_weight_from_cgroup(tg->scx_weight);
9398#endif
41082c1d
TH
9399}
9400
182446d0
TH
9401static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9402 struct cftype *cftype, u64 shareval)
68318b8e 9403{
81951366
TH
9404 int ret;
9405
5b61d50a
KK
9406 if (shareval > scale_load_down(ULONG_MAX))
9407 shareval = MAX_SHARES;
81951366
TH
9408 ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9409 if (!ret)
9410 scx_group_set_weight(css_tg(css),
9411 sched_weight_to_cgroup(shareval));
9412 return ret;
68318b8e
SV
9413}
9414
182446d0
TH
9415static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9416 struct cftype *cft)
68318b8e 9417{
41082c1d 9418 return tg_weight(css_tg(css));
68318b8e 9419}
e179e80c 9420#endif /* CONFIG_GROUP_SCHED_WEIGHT */
ab84d31e
PT
9421
9422#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
9423static DEFINE_MUTEX(cfs_constraints_mutex);
9424
ab84d31e 9425const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
b1546edc 9426static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
d505b8af
HC
9427/* More than 203 days if BW_SHIFT equals 20. */
9428static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
ab84d31e 9429
a790de99
PT
9430static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9431
f4183717
HC
9432static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
9433 u64 burst)
ab84d31e 9434{
56f570e5 9435 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 9436 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
9437
9438 if (tg == &root_task_group)
9439 return -EINVAL;
9440
9441 /*
9442 * Ensure we have at some amount of bandwidth every period. This is
9443 * to prevent reaching a state of large arrears when throttled via
9444 * entity_tick() resulting in prolonged exit starvation.
9445 */
9446 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
9447 return -EINVAL;
9448
9449 /*
3b03706f 9450 * Likewise, bound things on the other side by preventing insane quota
ab84d31e
PT
9451 * periods. This also allows us to normalize in computing quota
9452 * feasibility.
9453 */
9454 if (period > max_cfs_quota_period)
9455 return -EINVAL;
9456
d505b8af
HC
9457 /*
9458 * Bound quota to defend quota against overflow during bandwidth shift.
9459 */
9460 if (quota != RUNTIME_INF && quota > max_cfs_runtime)
9461 return -EINVAL;
9462
f4183717
HC
9463 if (quota != RUNTIME_INF && (burst > quota ||
9464 burst + quota > max_cfs_runtime))
9465 return -EINVAL;
9466
0e59bdae
KT
9467 /*
9468 * Prevent race between setting of cfs_rq->runtime_enabled and
9469 * unthrottle_offline_cfs_rqs().
9470 */
6fb45460
PZ
9471 guard(cpus_read_lock)();
9472 guard(mutex)(&cfs_constraints_mutex);
9473
a790de99
PT
9474 ret = __cfs_schedulable(tg, period, quota);
9475 if (ret)
6fb45460 9476 return ret;
a790de99 9477
58088ad0 9478 runtime_enabled = quota != RUNTIME_INF;
56f570e5 9479 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
1ee14e6c
BS
9480 /*
9481 * If we need to toggle cfs_bandwidth_used, off->on must occur
9482 * before making related changes, and on->off must occur afterwards
9483 */
9484 if (runtime_enabled && !runtime_was_enabled)
9485 cfs_bandwidth_usage_inc();
58088ad0 9486
6fb45460
PZ
9487 scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9488 cfs_b->period = ns_to_ktime(period);
9489 cfs_b->quota = quota;
9490 cfs_b->burst = burst;
d1ccc66d 9491
6fb45460 9492 __refill_cfs_bandwidth_runtime(cfs_b);
d1ccc66d 9493
6fb45460
PZ
9494 /*
9495 * Restart the period timer (if active) to handle new
9496 * period expiry:
9497 */
9498 if (runtime_enabled)
9499 start_cfs_bandwidth(cfs_b);
9500 }
ab84d31e 9501
0e59bdae 9502 for_each_online_cpu(i) {
ab84d31e 9503 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 9504 struct rq *rq = cfs_rq->rq;
ab84d31e 9505
6fb45460 9506 guard(rq_lock_irq)(rq);
58088ad0 9507 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 9508 cfs_rq->runtime_remaining = 0;
671fd9da 9509
029632fb 9510 if (cfs_rq->throttled)
671fd9da 9511 unthrottle_cfs_rq(cfs_rq);
ab84d31e 9512 }
6fb45460 9513
1ee14e6c
BS
9514 if (runtime_was_enabled && !runtime_enabled)
9515 cfs_bandwidth_usage_dec();
ab84d31e 9516
6fb45460 9517 return 0;
ab84d31e
PT
9518}
9519
b1546edc 9520static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
ab84d31e 9521{
f4183717 9522 u64 quota, period, burst;
ab84d31e 9523
029632fb 9524 period = ktime_to_ns(tg->cfs_bandwidth.period);
f4183717 9525 burst = tg->cfs_bandwidth.burst;
ab84d31e
PT
9526 if (cfs_quota_us < 0)
9527 quota = RUNTIME_INF;
1a8b4540 9528 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
ab84d31e 9529 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
1a8b4540
KK
9530 else
9531 return -EINVAL;
ab84d31e 9532
f4183717 9533 return tg_set_cfs_bandwidth(tg, period, quota, burst);
ab84d31e
PT
9534}
9535
b1546edc 9536static long tg_get_cfs_quota(struct task_group *tg)
ab84d31e
PT
9537{
9538 u64 quota_us;
9539
029632fb 9540 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
9541 return -1;
9542
029632fb 9543 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
9544 do_div(quota_us, NSEC_PER_USEC);
9545
9546 return quota_us;
9547}
9548
b1546edc 9549static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
ab84d31e 9550{
f4183717 9551 u64 quota, period, burst;
ab84d31e 9552
1a8b4540
KK
9553 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
9554 return -EINVAL;
9555
ab84d31e 9556 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 9557 quota = tg->cfs_bandwidth.quota;
f4183717 9558 burst = tg->cfs_bandwidth.burst;
ab84d31e 9559
f4183717 9560 return tg_set_cfs_bandwidth(tg, period, quota, burst);
ab84d31e
PT
9561}
9562
b1546edc 9563static long tg_get_cfs_period(struct task_group *tg)
ab84d31e
PT
9564{
9565 u64 cfs_period_us;
9566
029632fb 9567 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
9568 do_div(cfs_period_us, NSEC_PER_USEC);
9569
9570 return cfs_period_us;
9571}
9572
f4183717
HC
9573static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
9574{
9575 u64 quota, period, burst;
9576
9577 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
9578 return -EINVAL;
9579
9580 burst = (u64)cfs_burst_us * NSEC_PER_USEC;
9581 period = ktime_to_ns(tg->cfs_bandwidth.period);
9582 quota = tg->cfs_bandwidth.quota;
9583
9584 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9585}
9586
9587static long tg_get_cfs_burst(struct task_group *tg)
9588{
9589 u64 burst_us;
9590
9591 burst_us = tg->cfs_bandwidth.burst;
9592 do_div(burst_us, NSEC_PER_USEC);
9593
9594 return burst_us;
9595}
9596
182446d0
TH
9597static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
9598 struct cftype *cft)
ab84d31e 9599{
182446d0 9600 return tg_get_cfs_quota(css_tg(css));
ab84d31e
PT
9601}
9602
182446d0
TH
9603static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
9604 struct cftype *cftype, s64 cfs_quota_us)
ab84d31e 9605{
182446d0 9606 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
ab84d31e
PT
9607}
9608
182446d0
TH
9609static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
9610 struct cftype *cft)
ab84d31e 9611{
182446d0 9612 return tg_get_cfs_period(css_tg(css));
ab84d31e
PT
9613}
9614
182446d0
TH
9615static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
9616 struct cftype *cftype, u64 cfs_period_us)
ab84d31e 9617{
182446d0 9618 return tg_set_cfs_period(css_tg(css), cfs_period_us);
ab84d31e
PT
9619}
9620
f4183717
HC
9621static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
9622 struct cftype *cft)
9623{
9624 return tg_get_cfs_burst(css_tg(css));
9625}
9626
9627static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
9628 struct cftype *cftype, u64 cfs_burst_us)
9629{
9630 return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
9631}
9632
a790de99
PT
9633struct cfs_schedulable_data {
9634 struct task_group *tg;
9635 u64 period, quota;
9636};
9637
9638/*
9639 * normalize group quota/period to be quota/max_period
9640 * note: units are usecs
9641 */
9642static u64 normalize_cfs_quota(struct task_group *tg,
9643 struct cfs_schedulable_data *d)
9644{
9645 u64 quota, period;
9646
9647 if (tg == d->tg) {
9648 period = d->period;
9649 quota = d->quota;
9650 } else {
9651 period = tg_get_cfs_period(tg);
9652 quota = tg_get_cfs_quota(tg);
9653 }
9654
9655 /* note: these should typically be equivalent */
9656 if (quota == RUNTIME_INF || quota == -1)
9657 return RUNTIME_INF;
9658
9659 return to_ratio(period, quota);
9660}
9661
9662static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9663{
9664 struct cfs_schedulable_data *d = data;
029632fb 9665 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
9666 s64 quota = 0, parent_quota = -1;
9667
9668 if (!tg->parent) {
9669 quota = RUNTIME_INF;
9670 } else {
029632fb 9671 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
9672
9673 quota = normalize_cfs_quota(tg, d);
9c58c79a 9674 parent_quota = parent_b->hierarchical_quota;
a790de99
PT
9675
9676 /*
c53593e5 9677 * Ensure max(child_quota) <= parent_quota. On cgroup2,
c98c1827
PA
9678 * always take the non-RUNTIME_INF min. On cgroup1, only
9679 * inherit when no limit is set. In both cases this is used
9680 * by the scheduler to determine if a given CFS task has a
9681 * bandwidth constraint at some higher level.
a790de99 9682 */
c53593e5 9683 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
c98c1827
PA
9684 if (quota == RUNTIME_INF)
9685 quota = parent_quota;
9686 else if (parent_quota != RUNTIME_INF)
9687 quota = min(quota, parent_quota);
c53593e5
TH
9688 } else {
9689 if (quota == RUNTIME_INF)
9690 quota = parent_quota;
9691 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9692 return -EINVAL;
9693 }
a790de99 9694 }
9c58c79a 9695 cfs_b->hierarchical_quota = quota;
a790de99
PT
9696
9697 return 0;
9698}
9699
9700static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9701{
9702 struct cfs_schedulable_data data = {
9703 .tg = tg,
9704 .period = period,
9705 .quota = quota,
9706 };
9707
9708 if (quota != RUNTIME_INF) {
9709 do_div(data.period, NSEC_PER_USEC);
9710 do_div(data.quota, NSEC_PER_USEC);
9711 }
9712
0e34600a
PZ
9713 guard(rcu)();
9714 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
a790de99 9715}
e8da1b18 9716
a1f7164c 9717static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
e8da1b18 9718{
2da8ca82 9719 struct task_group *tg = css_tg(seq_css(sf));
029632fb 9720 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18 9721
44ffc75b
TH
9722 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9723 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9724 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
e8da1b18 9725
3d6c50c2 9726 if (schedstat_enabled() && tg != &root_task_group) {
ceeadb83 9727 struct sched_statistics *stats;
3d6c50c2
YW
9728 u64 ws = 0;
9729 int i;
9730
ceeadb83
YS
9731 for_each_possible_cpu(i) {
9732 stats = __schedstats_from_se(tg->se[i]);
9733 ws += schedstat_val(stats->wait_sum);
9734 }
3d6c50c2
YW
9735
9736 seq_printf(sf, "wait_sum %llu\n", ws);
9737 }
9738
bcb1704a
HC
9739 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9740 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9741
e8da1b18
NR
9742 return 0;
9743}
677ea015
JD
9744
9745static u64 throttled_time_self(struct task_group *tg)
9746{
9747 int i;
9748 u64 total = 0;
9749
9750 for_each_possible_cpu(i) {
9751 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9752 }
9753
9754 return total;
9755}
9756
9757static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9758{
9759 struct task_group *tg = css_tg(seq_css(sf));
9760
9761 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9762
9763 return 0;
9764}
ab84d31e 9765#endif /* CONFIG_CFS_BANDWIDTH */
68318b8e 9766
052f1dc7 9767#ifdef CONFIG_RT_GROUP_SCHED
182446d0
TH
9768static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9769 struct cftype *cft, s64 val)
6f505b16 9770{
182446d0 9771 return sched_group_set_rt_runtime(css_tg(css), val);
6f505b16
PZ
9772}
9773
182446d0
TH
9774static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9775 struct cftype *cft)
6f505b16 9776{
182446d0 9777 return sched_group_rt_runtime(css_tg(css));
6f505b16 9778}
d0b27fa7 9779
182446d0
TH
9780static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9781 struct cftype *cftype, u64 rt_period_us)
d0b27fa7 9782{
182446d0 9783 return sched_group_set_rt_period(css_tg(css), rt_period_us);
d0b27fa7
PZ
9784}
9785
182446d0
TH
9786static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9787 struct cftype *cft)
d0b27fa7 9788{
182446d0 9789 return sched_group_rt_period(css_tg(css));
d0b27fa7 9790}
6d6bc0ad 9791#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 9792
e179e80c 9793#ifdef CONFIG_GROUP_SCHED_WEIGHT
30400039
JD
9794static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9795 struct cftype *cft)
9796{
9797 return css_tg(css)->idle;
9798}
9799
9800static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9801 struct cftype *cft, s64 idle)
9802{
81951366
TH
9803 int ret;
9804
9805 ret = sched_group_set_idle(css_tg(css), idle);
9806 if (!ret)
9807 scx_group_set_idle(css_tg(css), idle);
9808 return ret;
30400039
JD
9809}
9810#endif
9811
a1f7164c 9812static struct cftype cpu_legacy_files[] = {
e179e80c 9813#ifdef CONFIG_GROUP_SCHED_WEIGHT
fe5c7cc2
PM
9814 {
9815 .name = "shares",
f4c753b7
PM
9816 .read_u64 = cpu_shares_read_u64,
9817 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 9818 },
30400039
JD
9819 {
9820 .name = "idle",
9821 .read_s64 = cpu_idle_read_s64,
9822 .write_s64 = cpu_idle_write_s64,
9823 },
052f1dc7 9824#endif
ab84d31e
PT
9825#ifdef CONFIG_CFS_BANDWIDTH
9826 {
9827 .name = "cfs_quota_us",
9828 .read_s64 = cpu_cfs_quota_read_s64,
9829 .write_s64 = cpu_cfs_quota_write_s64,
9830 },
9831 {
9832 .name = "cfs_period_us",
9833 .read_u64 = cpu_cfs_period_read_u64,
9834 .write_u64 = cpu_cfs_period_write_u64,
9835 },
f4183717
HC
9836 {
9837 .name = "cfs_burst_us",
9838 .read_u64 = cpu_cfs_burst_read_u64,
9839 .write_u64 = cpu_cfs_burst_write_u64,
9840 },
e8da1b18
NR
9841 {
9842 .name = "stat",
a1f7164c 9843 .seq_show = cpu_cfs_stat_show,
e8da1b18 9844 },
677ea015
JD
9845 {
9846 .name = "stat.local",
9847 .seq_show = cpu_cfs_local_stat_show,
9848 },
ab84d31e 9849#endif
2480c093
PB
9850#ifdef CONFIG_UCLAMP_TASK_GROUP
9851 {
9852 .name = "uclamp.min",
9853 .flags = CFTYPE_NOT_ON_ROOT,
9854 .seq_show = cpu_uclamp_min_show,
9855 .write = cpu_uclamp_min_write,
9856 },
9857 {
9858 .name = "uclamp.max",
9859 .flags = CFTYPE_NOT_ON_ROOT,
9860 .seq_show = cpu_uclamp_max_show,
9861 .write = cpu_uclamp_max_write,
9862 },
052f1dc7 9863#endif
d1ccc66d 9864 { } /* Terminate */
68318b8e
SV
9865};
9866
e34e0131 9867#ifdef CONFIG_RT_GROUP_SCHED
d6809c2f
MK
9868static struct cftype rt_group_files[] = {
9869 {
9870 .name = "rt_runtime_us",
9871 .read_s64 = cpu_rt_runtime_read,
9872 .write_s64 = cpu_rt_runtime_write,
9873 },
9874 {
9875 .name = "rt_period_us",
9876 .read_u64 = cpu_rt_period_read_uint,
9877 .write_u64 = cpu_rt_period_write_uint,
9878 },
9879 { } /* Terminate */
9880};
9881
e34e0131
MK
9882# ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED
9883DEFINE_STATIC_KEY_FALSE(rt_group_sched);
9884# else
9885DEFINE_STATIC_KEY_TRUE(rt_group_sched);
9886# endif
9887
9888static int __init setup_rt_group_sched(char *str)
9889{
9890 long val;
9891
9892 if (kstrtol(str, 0, &val) || val < 0 || val > 1) {
9893 pr_warn("Unable to set rt_group_sched\n");
9894 return 1;
9895 }
9896 if (val)
9897 static_branch_enable(&rt_group_sched);
9898 else
9899 static_branch_disable(&rt_group_sched);
9900
9901 return 1;
9902}
9903__setup("rt_group_sched=", setup_rt_group_sched);
d6809c2f
MK
9904
9905static int __init cpu_rt_group_init(void)
9906{
9907 if (!rt_group_sched_enabled())
9908 return 0;
9909
9910 WARN_ON(cgroup_add_legacy_cftypes(&cpu_cgrp_subsys, rt_group_files));
9911 return 0;
9912}
9913subsys_initcall(cpu_rt_group_init);
e34e0131
MK
9914#endif /* CONFIG_RT_GROUP_SCHED */
9915
d41bf8c9
TH
9916static int cpu_extra_stat_show(struct seq_file *sf,
9917 struct cgroup_subsys_state *css)
0d593634 9918{
0d593634
TH
9919#ifdef CONFIG_CFS_BANDWIDTH
9920 {
d41bf8c9 9921 struct task_group *tg = css_tg(css);
0d593634 9922 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
bcb1704a 9923 u64 throttled_usec, burst_usec;
0d593634
TH
9924
9925 throttled_usec = cfs_b->throttled_time;
9926 do_div(throttled_usec, NSEC_PER_USEC);
bcb1704a
HC
9927 burst_usec = cfs_b->burst_time;
9928 do_div(burst_usec, NSEC_PER_USEC);
0d593634
TH
9929
9930 seq_printf(sf, "nr_periods %d\n"
9931 "nr_throttled %d\n"
bcb1704a
HC
9932 "throttled_usec %llu\n"
9933 "nr_bursts %d\n"
9934 "burst_usec %llu\n",
0d593634 9935 cfs_b->nr_periods, cfs_b->nr_throttled,
bcb1704a 9936 throttled_usec, cfs_b->nr_burst, burst_usec);
0d593634
TH
9937 }
9938#endif
9939 return 0;
9940}
9941
677ea015
JD
9942static int cpu_local_stat_show(struct seq_file *sf,
9943 struct cgroup_subsys_state *css)
9944{
9945#ifdef CONFIG_CFS_BANDWIDTH
9946 {
9947 struct task_group *tg = css_tg(css);
9948 u64 throttled_self_usec;
9949
9950 throttled_self_usec = throttled_time_self(tg);
9951 do_div(throttled_self_usec, NSEC_PER_USEC);
9952
9953 seq_printf(sf, "throttled_usec %llu\n",
9954 throttled_self_usec);
9955 }
9956#endif
9957 return 0;
9958}
9959
e179e80c 9960#ifdef CONFIG_GROUP_SCHED_WEIGHT
4f9c7ca8 9961
0d593634
TH
9962static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
9963 struct cftype *cft)
9964{
4f9c7ca8 9965 return sched_weight_to_cgroup(tg_weight(css_tg(css)));
0d593634
TH
9966}
9967
9968static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
4f9c7ca8 9969 struct cftype *cft, u64 cgrp_weight)
0d593634 9970{
4f9c7ca8 9971 unsigned long weight;
81951366 9972 int ret;
4f9c7ca8
TH
9973
9974 if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
0d593634
TH
9975 return -ERANGE;
9976
4f9c7ca8 9977 weight = sched_weight_from_cgroup(cgrp_weight);
0d593634 9978
81951366
TH
9979 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9980 if (!ret)
9981 scx_group_set_weight(css_tg(css), cgrp_weight);
9982 return ret;
0d593634
TH
9983}
9984
9985static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
9986 struct cftype *cft)
9987{
4f9c7ca8 9988 unsigned long weight = tg_weight(css_tg(css));
0d593634
TH
9989 int last_delta = INT_MAX;
9990 int prio, delta;
9991
9992 /* find the closest nice value to the current weight */
9993 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
9994 delta = abs(sched_prio_to_weight[prio] - weight);
9995 if (delta >= last_delta)
9996 break;
9997 last_delta = delta;
9998 }
9999
10000 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
10001}
10002
10003static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
10004 struct cftype *cft, s64 nice)
10005{
10006 unsigned long weight;
81951366 10007 int idx, ret;
0d593634
TH
10008
10009 if (nice < MIN_NICE || nice > MAX_NICE)
10010 return -ERANGE;
10011
7281c8de
PZ
10012 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
10013 idx = array_index_nospec(idx, 40);
10014 weight = sched_prio_to_weight[idx];
10015
81951366
TH
10016 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
10017 if (!ret)
10018 scx_group_set_weight(css_tg(css),
10019 sched_weight_to_cgroup(weight));
10020 return ret;
0d593634 10021}
e179e80c 10022#endif /* CONFIG_GROUP_SCHED_WEIGHT */
0d593634
TH
10023
10024static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
10025 long period, long quota)
10026{
10027 if (quota < 0)
10028 seq_puts(sf, "max");
10029 else
10030 seq_printf(sf, "%ld", quota);
10031
10032 seq_printf(sf, " %ld\n", period);
10033}
10034
10035/* caller should put the current value in *@periodp before calling */
10036static int __maybe_unused cpu_period_quota_parse(char *buf,
10037 u64 *periodp, u64 *quotap)
10038{
10039 char tok[21]; /* U64_MAX */
10040
4c47acd8 10041 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
0d593634
TH
10042 return -EINVAL;
10043
10044 *periodp *= NSEC_PER_USEC;
10045
10046 if (sscanf(tok, "%llu", quotap))
10047 *quotap *= NSEC_PER_USEC;
10048 else if (!strcmp(tok, "max"))
10049 *quotap = RUNTIME_INF;
10050 else
10051 return -EINVAL;
10052
10053 return 0;
10054}
10055
10056#ifdef CONFIG_CFS_BANDWIDTH
10057static int cpu_max_show(struct seq_file *sf, void *v)
10058{
10059 struct task_group *tg = css_tg(seq_css(sf));
10060
10061 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
10062 return 0;
10063}
10064
10065static ssize_t cpu_max_write(struct kernfs_open_file *of,
10066 char *buf, size_t nbytes, loff_t off)
10067{
10068 struct task_group *tg = css_tg(of_css(of));
10069 u64 period = tg_get_cfs_period(tg);
49217ea1 10070 u64 burst = tg->cfs_bandwidth.burst;
0d593634
TH
10071 u64 quota;
10072 int ret;
10073
10074 ret = cpu_period_quota_parse(buf, &period, &quota);
10075 if (!ret)
f4183717 10076 ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
0d593634
TH
10077 return ret ?: nbytes;
10078}
10079#endif
10080
10081static struct cftype cpu_files[] = {
e179e80c 10082#ifdef CONFIG_GROUP_SCHED_WEIGHT
0d593634
TH
10083 {
10084 .name = "weight",
10085 .flags = CFTYPE_NOT_ON_ROOT,
10086 .read_u64 = cpu_weight_read_u64,
10087 .write_u64 = cpu_weight_write_u64,
10088 },
10089 {
10090 .name = "weight.nice",
10091 .flags = CFTYPE_NOT_ON_ROOT,
10092 .read_s64 = cpu_weight_nice_read_s64,
10093 .write_s64 = cpu_weight_nice_write_s64,
10094 },
30400039
JD
10095 {
10096 .name = "idle",
10097 .flags = CFTYPE_NOT_ON_ROOT,
10098 .read_s64 = cpu_idle_read_s64,
10099 .write_s64 = cpu_idle_write_s64,
10100 },
0d593634
TH
10101#endif
10102#ifdef CONFIG_CFS_BANDWIDTH
10103 {
10104 .name = "max",
10105 .flags = CFTYPE_NOT_ON_ROOT,
10106 .seq_show = cpu_max_show,
10107 .write = cpu_max_write,
10108 },
f4183717
HC
10109 {
10110 .name = "max.burst",
10111 .flags = CFTYPE_NOT_ON_ROOT,
10112 .read_u64 = cpu_cfs_burst_read_u64,
10113 .write_u64 = cpu_cfs_burst_write_u64,
10114 },
2480c093
PB
10115#endif
10116#ifdef CONFIG_UCLAMP_TASK_GROUP
10117 {
10118 .name = "uclamp.min",
10119 .flags = CFTYPE_NOT_ON_ROOT,
10120 .seq_show = cpu_uclamp_min_show,
10121 .write = cpu_uclamp_min_write,
10122 },
10123 {
10124 .name = "uclamp.max",
10125 .flags = CFTYPE_NOT_ON_ROOT,
10126 .seq_show = cpu_uclamp_max_show,
10127 .write = cpu_uclamp_max_write,
10128 },
0d593634
TH
10129#endif
10130 { } /* terminate */
10131};
10132
073219e9 10133struct cgroup_subsys cpu_cgrp_subsys = {
92fb9748 10134 .css_alloc = cpu_cgroup_css_alloc,
96b77745 10135 .css_online = cpu_cgroup_css_online,
81951366 10136 .css_offline = cpu_cgroup_css_offline,
2f5177f0 10137 .css_released = cpu_cgroup_css_released,
92fb9748 10138 .css_free = cpu_cgroup_css_free,
d41bf8c9 10139 .css_extra_stat_show = cpu_extra_stat_show,
677ea015 10140 .css_local_stat_show = cpu_local_stat_show,
bb9d97b6
TH
10141 .can_attach = cpu_cgroup_can_attach,
10142 .attach = cpu_cgroup_attach,
81951366 10143 .cancel_attach = cpu_cgroup_cancel_attach,
a1f7164c 10144 .legacy_cftypes = cpu_legacy_files,
0d593634 10145 .dfl_cftypes = cpu_files,
b38e42e9 10146 .early_init = true,
0d593634 10147 .threaded = true,
68318b8e
SV
10148};
10149
052f1dc7 10150#endif /* CONFIG_CGROUP_SCHED */
d842de87 10151
b637a328
PM
10152void dump_cpu_task(int cpu)
10153{
51b73999 10154 if (in_hardirq() && cpu == smp_processor_id()) {
bc1cca97
ZL
10155 struct pt_regs *regs;
10156
10157 regs = get_irq_regs();
10158 if (regs) {
10159 show_regs(regs);
10160 return;
10161 }
10162 }
10163
e73dfe30
ZL
10164 if (trigger_single_cpu_backtrace(cpu))
10165 return;
10166
b637a328
PM
10167 pr_info("Task dump for CPU %d:\n", cpu);
10168 sched_show_task(cpu_curr(cpu));
10169}
ed82b8a1
AK
10170
10171/*
10172 * Nice levels are multiplicative, with a gentle 10% change for every
10173 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10174 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10175 * that remained on nice 0.
10176 *
10177 * The "10% effect" is relative and cumulative: from _any_ nice level,
10178 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10179 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
10180 * If a task goes up by ~10% and another task goes down by ~10% then
10181 * the relative distance between them is ~25%.)
10182 */
10183const int sched_prio_to_weight[40] = {
10184 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10185 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10186 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10187 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10188 /* 0 */ 1024, 820, 655, 526, 423,
10189 /* 5 */ 335, 272, 215, 172, 137,
10190 /* 10 */ 110, 87, 70, 56, 45,
10191 /* 15 */ 36, 29, 23, 18, 15,
10192};
10193
10194/*
402de7fc 10195 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
ed82b8a1
AK
10196 *
10197 * In cases where the weight does not change often, we can use the
402de7fc 10198 * pre-calculated inverse to speed up arithmetics by turning divisions
ed82b8a1
AK
10199 * into multiplications:
10200 */
10201const u32 sched_prio_to_wmult[40] = {
10202 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10203 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10204 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10205 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10206 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
10207 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
10208 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
10209 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10210};
14a7405b 10211
9d246053
PA
10212void call_trace_sched_update_nr_running(struct rq *rq, int count)
10213{
10214 trace_sched_update_nr_running_tp(rq, count);
10215}
af7f588d
MD
10216
10217#ifdef CONFIG_SCHED_MM_CID
223baf9d 10218
0019a2d4 10219/*
223baf9d
MD
10220 * @cid_lock: Guarantee forward-progress of cid allocation.
10221 *
10222 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10223 * is only used when contention is detected by the lock-free allocation so
10224 * forward progress can be guaranteed.
10225 */
10226DEFINE_RAW_SPINLOCK(cid_lock);
10227
0019a2d4 10228/*
223baf9d
MD
10229 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10230 *
10231 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10232 * detected, it is set to 1 to ensure that all newly coming allocations are
10233 * serialized by @cid_lock until the allocation which detected contention
10234 * completes and sets @use_cid_lock back to 0. This guarantees forward progress
10235 * of a cid allocation.
10236 */
10237int use_cid_lock;
10238
10239/*
10240 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10241 * concurrently with respect to the execution of the source runqueue context
10242 * switch.
10243 *
10244 * There is one basic properties we want to guarantee here:
10245 *
10246 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10247 * used by a task. That would lead to concurrent allocation of the cid and
10248 * userspace corruption.
10249 *
10250 * Provide this guarantee by introducing a Dekker memory ordering to guarantee
10251 * that a pair of loads observe at least one of a pair of stores, which can be
10252 * shown as:
10253 *
10254 * X = Y = 0
10255 *
10256 * w[X]=1 w[Y]=1
10257 * MB MB
10258 * r[Y]=y r[X]=x
10259 *
10260 * Which guarantees that x==0 && y==0 is impossible. But rather than using
10261 * values 0 and 1, this algorithm cares about specific state transitions of the
10262 * runqueue current task (as updated by the scheduler context switch), and the
10263 * per-mm/cpu cid value.
10264 *
10265 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10266 * task->mm != mm for the rest of the discussion. There are two scheduler state
10267 * transitions on context switch we care about:
10268 *
10269 * (TSA) Store to rq->curr with transition from (N) to (Y)
10270 *
10271 * (TSB) Store to rq->curr with transition from (Y) to (N)
10272 *
10273 * On the remote-clear side, there is one transition we care about:
10274 *
10275 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
10276 *
10277 * There is also a transition to UNSET state which can be performed from all
10278 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10279 * guarantees that only a single thread will succeed:
10280 *
10281 * (TMB) cmpxchg to *pcpu_cid to mark UNSET
10282 *
10283 * Just to be clear, what we do _not_ want to happen is a transition to UNSET
10284 * when a thread is actively using the cid (property (1)).
10285 *
10286 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
10287 *
10288 * Scenario A) (TSA)+(TMA) (from next task perspective)
10289 *
10290 * CPU0 CPU1
10291 *
10292 * Context switch CS-1 Remote-clear
10293 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
10294 * (implied barrier after cmpxchg)
10295 * - switch_mm_cid()
10296 * - memory barrier (see switch_mm_cid()
10297 * comment explaining how this barrier
10298 * is combined with other scheduler
10299 * barriers)
10300 * - mm_cid_get (next)
10301 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
10302 *
10303 * This Dekker ensures that either task (Y) is observed by the
10304 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
10305 * observed.
10306 *
10307 * If task (Y) store is observed by rcu_dereference(), it means that there is
10308 * still an active task on the cpu. Remote-clear will therefore not transition
10309 * to UNSET, which fulfills property (1).
10310 *
10311 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
10312 * it will move its state to UNSET, which clears the percpu cid perhaps
10313 * uselessly (which is not an issue for correctness). Because task (Y) is not
10314 * observed, CPU1 can move ahead to set the state to UNSET. Because moving
10315 * state to UNSET is done with a cmpxchg expecting that the old state has the
10316 * LAZY flag set, only one thread will successfully UNSET.
10317 *
10318 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
10319 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
10320 * CPU1 will observe task (Y) and do nothing more, which is fine.
10321 *
10322 * What we are effectively preventing with this Dekker is a scenario where
10323 * neither LAZY flag nor store (Y) are observed, which would fail property (1)
10324 * because this would UNSET a cid which is actively used.
10325 */
10326
10327void sched_mm_cid_migrate_from(struct task_struct *t)
10328{
10329 t->migrate_from_cpu = task_cpu(t);
10330}
10331
10332static
10333int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
10334 struct task_struct *t,
10335 struct mm_cid *src_pcpu_cid)
af7f588d
MD
10336{
10337 struct mm_struct *mm = t->mm;
223baf9d
MD
10338 struct task_struct *src_task;
10339 int src_cid, last_mm_cid;
af7f588d
MD
10340
10341 if (!mm)
223baf9d
MD
10342 return -1;
10343
10344 last_mm_cid = t->last_mm_cid;
10345 /*
10346 * If the migrated task has no last cid, or if the current
10347 * task on src rq uses the cid, it means the source cid does not need
10348 * to be moved to the destination cpu.
10349 */
10350 if (last_mm_cid == -1)
10351 return -1;
10352 src_cid = READ_ONCE(src_pcpu_cid->cid);
10353 if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
10354 return -1;
10355
10356 /*
10357 * If we observe an active task using the mm on this rq, it means we
10358 * are not the last task to be migrated from this cpu for this mm, so
10359 * there is no need to move src_cid to the destination cpu.
10360 */
0e34600a 10361 guard(rcu)();
223baf9d
MD
10362 src_task = rcu_dereference(src_rq->curr);
10363 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
223baf9d
MD
10364 t->last_mm_cid = -1;
10365 return -1;
10366 }
223baf9d
MD
10367
10368 return src_cid;
10369}
10370
10371static
10372int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
10373 struct task_struct *t,
10374 struct mm_cid *src_pcpu_cid,
10375 int src_cid)
10376{
10377 struct task_struct *src_task;
10378 struct mm_struct *mm = t->mm;
10379 int lazy_cid;
10380
10381 if (src_cid == -1)
10382 return -1;
10383
10384 /*
10385 * Attempt to clear the source cpu cid to move it to the destination
10386 * cpu.
10387 */
10388 lazy_cid = mm_cid_set_lazy_put(src_cid);
10389 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
10390 return -1;
10391
10392 /*
10393 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10394 * rq->curr->mm matches the scheduler barrier in context_switch()
10395 * between store to rq->curr and load of prev and next task's
10396 * per-mm/cpu cid.
10397 *
10398 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10399 * rq->curr->mm_cid_active matches the barrier in
10400 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10401 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10402 * load of per-mm/cpu cid.
10403 */
10404
10405 /*
10406 * If we observe an active task using the mm on this rq after setting
10407 * the lazy-put flag, this task will be responsible for transitioning
10408 * from lazy-put flag set to MM_CID_UNSET.
10409 */
0e34600a
PZ
10410 scoped_guard (rcu) {
10411 src_task = rcu_dereference(src_rq->curr);
10412 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10413 /*
10414 * We observed an active task for this mm, there is therefore
10415 * no point in moving this cid to the destination cpu.
10416 */
10417 t->last_mm_cid = -1;
10418 return -1;
10419 }
223baf9d 10420 }
223baf9d
MD
10421
10422 /*
10423 * The src_cid is unused, so it can be unset.
10424 */
10425 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10426 return -1;
7e019dcc 10427 WRITE_ONCE(src_pcpu_cid->recent_cid, MM_CID_UNSET);
223baf9d
MD
10428 return src_cid;
10429}
10430
10431/*
10432 * Migration to dst cpu. Called with dst_rq lock held.
10433 * Interrupts are disabled, which keeps the window of cid ownership without the
10434 * source rq lock held small.
10435 */
10436void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
10437{
10438 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
10439 struct mm_struct *mm = t->mm;
7e019dcc
MD
10440 int src_cid, src_cpu;
10441 bool dst_cid_is_set;
223baf9d
MD
10442 struct rq *src_rq;
10443
10444 lockdep_assert_rq_held(dst_rq);
af7f588d
MD
10445
10446 if (!mm)
10447 return;
223baf9d
MD
10448 src_cpu = t->migrate_from_cpu;
10449 if (src_cpu == -1) {
10450 t->last_mm_cid = -1;
10451 return;
10452 }
10453 /*
10454 * Move the src cid if the dst cid is unset. This keeps id
10455 * allocation closest to 0 in cases where few threads migrate around
402de7fc 10456 * many CPUs.
223baf9d 10457 *
7e019dcc
MD
10458 * If destination cid or recent cid is already set, we may have
10459 * to just clear the src cid to ensure compactness in frequent
10460 * migrations scenarios.
223baf9d
MD
10461 *
10462 * It is not useful to clear the src cid when the number of threads is
402de7fc 10463 * greater or equal to the number of allowed CPUs, because user-space
223baf9d 10464 * can expect that the number of allowed cids can reach the number of
402de7fc 10465 * allowed CPUs.
223baf9d
MD
10466 */
10467 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
7e019dcc
MD
10468 dst_cid_is_set = !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->cid)) ||
10469 !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->recent_cid));
10470 if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed))
223baf9d
MD
10471 return;
10472 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
10473 src_rq = cpu_rq(src_cpu);
10474 src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
10475 if (src_cid == -1)
10476 return;
10477 src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
10478 src_cid);
10479 if (src_cid == -1)
10480 return;
7e019dcc 10481 if (dst_cid_is_set) {
223baf9d
MD
10482 __mm_cid_put(mm, src_cid);
10483 return;
10484 }
10485 /* Move src_cid to dst cpu. */
10486 mm_cid_snapshot_time(dst_rq, mm);
10487 WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
7e019dcc 10488 WRITE_ONCE(dst_pcpu_cid->recent_cid, src_cid);
223baf9d
MD
10489}
10490
10491static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
10492 int cpu)
10493{
10494 struct rq *rq = cpu_rq(cpu);
10495 struct task_struct *t;
223baf9d
MD
10496 int cid, lazy_cid;
10497
10498 cid = READ_ONCE(pcpu_cid->cid);
10499 if (!mm_cid_is_valid(cid))
af7f588d 10500 return;
223baf9d
MD
10501
10502 /*
10503 * Clear the cpu cid if it is set to keep cid allocation compact. If
10504 * there happens to be other tasks left on the source cpu using this
10505 * mm, the next task using this mm will reallocate its cid on context
10506 * switch.
10507 */
10508 lazy_cid = mm_cid_set_lazy_put(cid);
10509 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
10510 return;
10511
10512 /*
10513 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10514 * rq->curr->mm matches the scheduler barrier in context_switch()
10515 * between store to rq->curr and load of prev and next task's
10516 * per-mm/cpu cid.
10517 *
10518 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10519 * rq->curr->mm_cid_active matches the barrier in
10520 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10521 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10522 * load of per-mm/cpu cid.
10523 */
10524
10525 /*
10526 * If we observe an active task using the mm on this rq after setting
10527 * the lazy-put flag, that task will be responsible for transitioning
10528 * from lazy-put flag set to MM_CID_UNSET.
10529 */
0e34600a
PZ
10530 scoped_guard (rcu) {
10531 t = rcu_dereference(rq->curr);
10532 if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
10533 return;
223baf9d 10534 }
223baf9d
MD
10535
10536 /*
10537 * The cid is unused, so it can be unset.
10538 * Disable interrupts to keep the window of cid ownership without rq
10539 * lock small.
10540 */
0e34600a
PZ
10541 scoped_guard (irqsave) {
10542 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10543 __mm_cid_put(mm, cid);
10544 }
af7f588d
MD
10545}
10546
223baf9d
MD
10547static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
10548{
10549 struct rq *rq = cpu_rq(cpu);
10550 struct mm_cid *pcpu_cid;
10551 struct task_struct *curr;
10552 u64 rq_clock;
10553
10554 /*
10555 * rq->clock load is racy on 32-bit but one spurious clear once in a
10556 * while is irrelevant.
10557 */
10558 rq_clock = READ_ONCE(rq->clock);
10559 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10560
10561 /*
10562 * In order to take care of infrequently scheduled tasks, bump the time
10563 * snapshot associated with this cid if an active task using the mm is
10564 * observed on this rq.
10565 */
0e34600a
PZ
10566 scoped_guard (rcu) {
10567 curr = rcu_dereference(rq->curr);
10568 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
10569 WRITE_ONCE(pcpu_cid->time, rq_clock);
10570 return;
10571 }
223baf9d 10572 }
223baf9d
MD
10573
10574 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
10575 return;
10576 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10577}
10578
10579static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
10580 int weight)
10581{
10582 struct mm_cid *pcpu_cid;
10583 int cid;
10584
10585 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10586 cid = READ_ONCE(pcpu_cid->cid);
10587 if (!mm_cid_is_valid(cid) || cid < weight)
10588 return;
10589 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10590}
10591
10592static void task_mm_cid_work(struct callback_head *work)
10593{
10594 unsigned long now = jiffies, old_scan, next_scan;
10595 struct task_struct *t = current;
10596 struct cpumask *cidmask;
10597 struct mm_struct *mm;
10598 int weight, cpu;
10599
f7d2728c 10600 WARN_ON_ONCE(t != container_of(work, struct task_struct, cid_work));
223baf9d
MD
10601
10602 work->next = work; /* Prevent double-add */
10603 if (t->flags & PF_EXITING)
10604 return;
10605 mm = t->mm;
10606 if (!mm)
10607 return;
10608 old_scan = READ_ONCE(mm->mm_cid_next_scan);
10609 next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10610 if (!old_scan) {
10611 unsigned long res;
10612
10613 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
10614 if (res != old_scan)
10615 old_scan = res;
10616 else
10617 old_scan = next_scan;
10618 }
10619 if (time_before(now, old_scan))
10620 return;
10621 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
10622 return;
10623 cidmask = mm_cidmask(mm);
10624 /* Clear cids that were not recently used. */
10625 for_each_possible_cpu(cpu)
10626 sched_mm_cid_remote_clear_old(mm, cpu);
10627 weight = cpumask_weight(cidmask);
10628 /*
10629 * Clear cids that are greater or equal to the cidmask weight to
10630 * recompact it.
10631 */
10632 for_each_possible_cpu(cpu)
10633 sched_mm_cid_remote_clear_weight(mm, cpu, weight);
10634}
10635
10636void init_sched_mm_cid(struct task_struct *t)
10637{
10638 struct mm_struct *mm = t->mm;
10639 int mm_users = 0;
10640
10641 if (mm) {
10642 mm_users = atomic_read(&mm->mm_users);
10643 if (mm_users == 1)
10644 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10645 }
10646 t->cid_work.next = &t->cid_work; /* Protect against double add */
10647 init_task_work(&t->cid_work, task_mm_cid_work);
10648}
10649
10650void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
10651{
10652 struct callback_head *work = &curr->cid_work;
10653 unsigned long now = jiffies;
10654
10655 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
10656 work->next != work)
10657 return;
10658 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
10659 return;
73ab05aa
WL
10660
10661 /* No page allocation under rq lock */
d40797d6 10662 task_work_add(curr, work, TWA_RESUME);
223baf9d
MD
10663}
10664
10665void sched_mm_cid_exit_signals(struct task_struct *t)
10666{
10667 struct mm_struct *mm = t->mm;
223baf9d
MD
10668 struct rq *rq;
10669
10670 if (!mm)
10671 return;
10672
10673 preempt_disable();
10674 rq = this_rq();
0e34600a 10675 guard(rq_lock_irqsave)(rq);
223baf9d
MD
10676 preempt_enable_no_resched(); /* holding spinlock */
10677 WRITE_ONCE(t->mm_cid_active, 0);
10678 /*
10679 * Store t->mm_cid_active before loading per-mm/cpu cid.
10680 * Matches barrier in sched_mm_cid_remote_clear_old().
10681 */
10682 smp_mb();
10683 mm_cid_put(mm);
10684 t->last_mm_cid = t->mm_cid = -1;
223baf9d
MD
10685}
10686
af7f588d
MD
10687void sched_mm_cid_before_execve(struct task_struct *t)
10688{
10689 struct mm_struct *mm = t->mm;
223baf9d 10690 struct rq *rq;
af7f588d
MD
10691
10692 if (!mm)
10693 return;
223baf9d
MD
10694
10695 preempt_disable();
10696 rq = this_rq();
0e34600a 10697 guard(rq_lock_irqsave)(rq);
223baf9d
MD
10698 preempt_enable_no_resched(); /* holding spinlock */
10699 WRITE_ONCE(t->mm_cid_active, 0);
10700 /*
10701 * Store t->mm_cid_active before loading per-mm/cpu cid.
10702 * Matches barrier in sched_mm_cid_remote_clear_old().
10703 */
10704 smp_mb();
10705 mm_cid_put(mm);
10706 t->last_mm_cid = t->mm_cid = -1;
af7f588d
MD
10707}
10708
10709void sched_mm_cid_after_execve(struct task_struct *t)
10710{
10711 struct mm_struct *mm = t->mm;
223baf9d 10712 struct rq *rq;
af7f588d 10713
bbd0b031
MD
10714 if (!mm)
10715 return;
223baf9d
MD
10716
10717 preempt_disable();
10718 rq = this_rq();
0e34600a
PZ
10719 scoped_guard (rq_lock_irqsave, rq) {
10720 preempt_enable_no_resched(); /* holding spinlock */
10721 WRITE_ONCE(t->mm_cid_active, 1);
10722 /*
10723 * Store t->mm_cid_active before loading per-mm/cpu cid.
10724 * Matches barrier in sched_mm_cid_remote_clear_old().
10725 */
10726 smp_mb();
7e019dcc 10727 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, mm);
0e34600a 10728 }
af7f588d
MD
10729}
10730
10731void sched_mm_cid_fork(struct task_struct *t)
10732{
bbd0b031 10733 WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
af7f588d
MD
10734 t->mm_cid_active = 1;
10735}
10736#endif
f0e1a064
TH
10737
10738#ifdef CONFIG_SCHED_CLASS_EXT
10739void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
10740 struct sched_enq_and_set_ctx *ctx)
10741{
10742 struct rq *rq = task_rq(p);
10743
10744 lockdep_assert_rq_held(rq);
10745
10746 *ctx = (struct sched_enq_and_set_ctx){
10747 .p = p,
10748 .queue_flags = queue_flags,
10749 .queued = task_on_rq_queued(p),
10750 .running = task_current(rq, p),
10751 };
10752
10753 update_rq_clock(rq);
10754 if (ctx->queued)
10755 dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK);
10756 if (ctx->running)
10757 put_prev_task(rq, p);
10758}
10759
10760void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx)
10761{
10762 struct rq *rq = task_rq(ctx->p);
10763
10764 lockdep_assert_rq_held(rq);
10765
10766 if (ctx->queued)
10767 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK);
10768 if (ctx->running)
10769 set_next_task(rq, ctx->p);
10770}
10771#endif /* CONFIG_SCHED_CLASS_EXT */