sched/deadline: Fix switched_from_dl() warning
[linux-2.6-block.git] / kernel / sched / sched.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
97fb7a0a
IM
2/*
3 * Scheduler internal types and methods:
4 */
029632fb 5#include <linux/sched.h>
325ea10c 6
dfc3401a 7#include <linux/sched/autogroup.h>
e6017571 8#include <linux/sched/clock.h>
325ea10c 9#include <linux/sched/coredump.h>
55687da1 10#include <linux/sched/cpufreq.h>
325ea10c
IM
11#include <linux/sched/cputime.h>
12#include <linux/sched/deadline.h>
b17b0153 13#include <linux/sched/debug.h>
ef8bd77f 14#include <linux/sched/hotplug.h>
325ea10c
IM
15#include <linux/sched/idle.h>
16#include <linux/sched/init.h>
17#include <linux/sched/isolation.h>
18#include <linux/sched/jobctl.h>
19#include <linux/sched/loadavg.h>
20#include <linux/sched/mm.h>
21#include <linux/sched/nohz.h>
22#include <linux/sched/numa_balancing.h>
23#include <linux/sched/prio.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/signal.h>
26#include <linux/sched/stat.h>
27#include <linux/sched/sysctl.h>
29930025 28#include <linux/sched/task.h>
68db0cf1 29#include <linux/sched/task_stack.h>
325ea10c
IM
30#include <linux/sched/topology.h>
31#include <linux/sched/user.h>
32#include <linux/sched/wake_q.h>
33#include <linux/sched/xacct.h>
34
35#include <uapi/linux/sched/types.h>
ef8bd77f 36
3866e845 37#include <linux/binfmts.h>
325ea10c
IM
38#include <linux/blkdev.h>
39#include <linux/compat.h>
40#include <linux/context_tracking.h>
41#include <linux/cpufreq.h>
42#include <linux/cpuidle.h>
43#include <linux/cpuset.h>
44#include <linux/ctype.h>
45#include <linux/debugfs.h>
46#include <linux/delayacct.h>
47#include <linux/init_task.h>
48#include <linux/kprobes.h>
49#include <linux/kthread.h>
50#include <linux/membarrier.h>
51#include <linux/migrate.h>
52#include <linux/mmu_context.h>
53#include <linux/nmi.h>
54#include <linux/proc_fs.h>
55#include <linux/prefetch.h>
56#include <linux/profile.h>
57#include <linux/rcupdate_wait.h>
58#include <linux/security.h>
59#include <linux/stackprotector.h>
029632fb 60#include <linux/stop_machine.h>
325ea10c
IM
61#include <linux/suspend.h>
62#include <linux/swait.h>
63#include <linux/syscalls.h>
64#include <linux/task_work.h>
65#include <linux/tsacct_kern.h>
66
67#include <asm/tlb.h>
029632fb 68
7fce777c 69#ifdef CONFIG_PARAVIRT
325ea10c 70# include <asm/paravirt.h>
7fce777c
IM
71#endif
72
391e43da 73#include "cpupri.h"
6bfd6d72 74#include "cpudeadline.h"
029632fb 75
9148a3a1 76#ifdef CONFIG_SCHED_DEBUG
6d3aed3d 77# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
9148a3a1 78#else
6d3aed3d 79# define SCHED_WARN_ON(x) ({ (void)(x), 0; })
9148a3a1
PZ
80#endif
81
45ceebf7 82struct rq;
442bf3aa 83struct cpuidle_state;
45ceebf7 84
da0c1e65
KT
85/* task_struct::on_rq states: */
86#define TASK_ON_RQ_QUEUED 1
cca26e80 87#define TASK_ON_RQ_MIGRATING 2
da0c1e65 88
029632fb
PZ
89extern __read_mostly int scheduler_running;
90
45ceebf7
PG
91extern unsigned long calc_load_update;
92extern atomic_long_t calc_load_tasks;
93
3289bdb4 94extern void calc_global_load_tick(struct rq *this_rq);
d60585c5 95extern long calc_load_fold_active(struct rq *this_rq, long adjust);
3289bdb4
PZ
96
97#ifdef CONFIG_SMP
cee1afce 98extern void cpu_load_update_active(struct rq *this_rq);
3289bdb4 99#else
cee1afce 100static inline void cpu_load_update_active(struct rq *this_rq) { }
3289bdb4 101#endif
45ceebf7 102
029632fb
PZ
103/*
104 * Helpers for converting nanosecond timing to jiffy resolution
105 */
106#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
107
cc1f4b1f
LZ
108/*
109 * Increase resolution of nice-level calculations for 64-bit architectures.
110 * The extra resolution improves shares distribution and load balancing of
111 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
112 * hierarchies, especially on larger systems. This is not a user-visible change
113 * and does not change the user-interface for setting shares/weights.
114 *
115 * We increase resolution only if we have enough bits to allow this increased
97fb7a0a
IM
116 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
117 * are pretty high and the returns do not justify the increased costs.
2159197d 118 *
97fb7a0a
IM
119 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
120 * increase coverage and consistency always enable it on 64-bit platforms.
cc1f4b1f 121 */
2159197d 122#ifdef CONFIG_64BIT
172895e6 123# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
6ecdd749
YD
124# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
125# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
cc1f4b1f 126#else
172895e6 127# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
cc1f4b1f
LZ
128# define scale_load(w) (w)
129# define scale_load_down(w) (w)
130#endif
131
6ecdd749 132/*
172895e6
YD
133 * Task weight (visible to users) and its load (invisible to users) have
134 * independent resolution, but they should be well calibrated. We use
135 * scale_load() and scale_load_down(w) to convert between them. The
136 * following must be true:
137 *
138 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
139 *
6ecdd749 140 */
172895e6 141#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
029632fb 142
332ac17e
DF
143/*
144 * Single value that decides SCHED_DEADLINE internal math precision.
145 * 10 -> just above 1us
146 * 9 -> just above 0.5us
147 */
97fb7a0a 148#define DL_SCALE 10
029632fb
PZ
149
150/*
97fb7a0a 151 * Single value that denotes runtime == period, ie unlimited time.
029632fb 152 */
97fb7a0a 153#define RUNTIME_INF ((u64)~0ULL)
029632fb 154
20f9cd2a
HA
155static inline int idle_policy(int policy)
156{
157 return policy == SCHED_IDLE;
158}
d50dde5a
DF
159static inline int fair_policy(int policy)
160{
161 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
162}
163
029632fb
PZ
164static inline int rt_policy(int policy)
165{
d50dde5a 166 return policy == SCHED_FIFO || policy == SCHED_RR;
029632fb
PZ
167}
168
aab03e05
DF
169static inline int dl_policy(int policy)
170{
171 return policy == SCHED_DEADLINE;
172}
20f9cd2a
HA
173static inline bool valid_policy(int policy)
174{
175 return idle_policy(policy) || fair_policy(policy) ||
176 rt_policy(policy) || dl_policy(policy);
177}
aab03e05 178
029632fb
PZ
179static inline int task_has_rt_policy(struct task_struct *p)
180{
181 return rt_policy(p->policy);
182}
183
aab03e05
DF
184static inline int task_has_dl_policy(struct task_struct *p)
185{
186 return dl_policy(p->policy);
187}
188
07881166
JL
189#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
190
794a56eb
JL
191/*
192 * !! For sched_setattr_nocheck() (kernel) only !!
193 *
194 * This is actually gross. :(
195 *
196 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
197 * tasks, but still be able to sleep. We need this on platforms that cannot
198 * atomically change clock frequency. Remove once fast switching will be
199 * available on such platforms.
200 *
201 * SUGOV stands for SchedUtil GOVernor.
202 */
203#define SCHED_FLAG_SUGOV 0x10000000
204
205static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
206{
207#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
208 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
209#else
210 return false;
211#endif
212}
213
2d3d891d
DF
214/*
215 * Tells if entity @a should preempt entity @b.
216 */
332ac17e
DF
217static inline bool
218dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
2d3d891d 219{
794a56eb
JL
220 return dl_entity_is_special(a) ||
221 dl_time_before(a->deadline, b->deadline);
2d3d891d
DF
222}
223
029632fb
PZ
224/*
225 * This is the priority-queue data structure of the RT scheduling class:
226 */
227struct rt_prio_array {
228 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
229 struct list_head queue[MAX_RT_PRIO];
230};
231
232struct rt_bandwidth {
233 /* nests inside the rq lock: */
234 raw_spinlock_t rt_runtime_lock;
235 ktime_t rt_period;
236 u64 rt_runtime;
237 struct hrtimer rt_period_timer;
4cfafd30 238 unsigned int rt_period_active;
029632fb 239};
a5e7be3b
JL
240
241void __dl_clear_params(struct task_struct *p);
242
332ac17e
DF
243/*
244 * To keep the bandwidth of -deadline tasks and groups under control
245 * we need some place where:
246 * - store the maximum -deadline bandwidth of the system (the group);
247 * - cache the fraction of that bandwidth that is currently allocated.
248 *
249 * This is all done in the data structure below. It is similar to the
250 * one used for RT-throttling (rt_bandwidth), with the main difference
251 * that, since here we are only interested in admission control, we
252 * do not decrease any runtime while the group "executes", neither we
253 * need a timer to replenish it.
254 *
255 * With respect to SMP, the bandwidth is given on a per-CPU basis,
256 * meaning that:
257 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
258 * - dl_total_bw array contains, in the i-eth element, the currently
259 * allocated bandwidth on the i-eth CPU.
260 * Moreover, groups consume bandwidth on each CPU, while tasks only
261 * consume bandwidth on the CPU they're running on.
262 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
263 * that will be shown the next time the proc or cgroup controls will
264 * be red. It on its turn can be changed by writing on its own
265 * control.
266 */
267struct dl_bandwidth {
97fb7a0a
IM
268 raw_spinlock_t dl_runtime_lock;
269 u64 dl_runtime;
270 u64 dl_period;
332ac17e
DF
271};
272
273static inline int dl_bandwidth_enabled(void)
274{
1724813d 275 return sysctl_sched_rt_runtime >= 0;
332ac17e
DF
276}
277
332ac17e 278struct dl_bw {
97fb7a0a
IM
279 raw_spinlock_t lock;
280 u64 bw;
281 u64 total_bw;
332ac17e
DF
282};
283
daec5798
LA
284static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
285
7f51412a 286static inline
8c0944ce 287void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
7f51412a
JL
288{
289 dl_b->total_bw -= tsk_bw;
daec5798 290 __dl_update(dl_b, (s32)tsk_bw / cpus);
7f51412a
JL
291}
292
293static inline
daec5798 294void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
7f51412a
JL
295{
296 dl_b->total_bw += tsk_bw;
daec5798 297 __dl_update(dl_b, -((s32)tsk_bw / cpus));
7f51412a
JL
298}
299
300static inline
301bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
302{
303 return dl_b->bw != -1 &&
304 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
305}
306
97fb7a0a 307extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
f2cb1360 308extern void init_dl_bw(struct dl_bw *dl_b);
97fb7a0a 309extern int sched_dl_global_validate(void);
06a76fe0 310extern void sched_dl_do_global(void);
97fb7a0a 311extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
06a76fe0
NP
312extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
313extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
314extern bool __checkparam_dl(const struct sched_attr *attr);
06a76fe0 315extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
97fb7a0a
IM
316extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
317extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
06a76fe0 318extern bool dl_cpu_busy(unsigned int cpu);
029632fb
PZ
319
320#ifdef CONFIG_CGROUP_SCHED
321
322#include <linux/cgroup.h>
323
324struct cfs_rq;
325struct rt_rq;
326
35cf4e50 327extern struct list_head task_groups;
029632fb
PZ
328
329struct cfs_bandwidth {
330#ifdef CONFIG_CFS_BANDWIDTH
97fb7a0a
IM
331 raw_spinlock_t lock;
332 ktime_t period;
333 u64 quota;
334 u64 runtime;
335 s64 hierarchical_quota;
336 u64 runtime_expires;
512ac999 337 int expires_seq;
97fb7a0a 338
512ac999
XP
339 short idle;
340 short period_active;
97fb7a0a
IM
341 struct hrtimer period_timer;
342 struct hrtimer slack_timer;
343 struct list_head throttled_cfs_rq;
344
345 /* Statistics: */
346 int nr_periods;
347 int nr_throttled;
348 u64 throttled_time;
029632fb
PZ
349#endif
350};
351
97fb7a0a 352/* Task group related information */
029632fb
PZ
353struct task_group {
354 struct cgroup_subsys_state css;
355
356#ifdef CONFIG_FAIR_GROUP_SCHED
97fb7a0a
IM
357 /* schedulable entities of this group on each CPU */
358 struct sched_entity **se;
359 /* runqueue "owned" by this group on each CPU */
360 struct cfs_rq **cfs_rq;
361 unsigned long shares;
029632fb 362
fa6bddeb 363#ifdef CONFIG_SMP
b0367629
WL
364 /*
365 * load_avg can be heavily contended at clock tick time, so put
366 * it in its own cacheline separated from the fields above which
367 * will also be accessed at each tick.
368 */
97fb7a0a 369 atomic_long_t load_avg ____cacheline_aligned;
029632fb 370#endif
fa6bddeb 371#endif
029632fb
PZ
372
373#ifdef CONFIG_RT_GROUP_SCHED
97fb7a0a
IM
374 struct sched_rt_entity **rt_se;
375 struct rt_rq **rt_rq;
029632fb 376
97fb7a0a 377 struct rt_bandwidth rt_bandwidth;
029632fb
PZ
378#endif
379
97fb7a0a
IM
380 struct rcu_head rcu;
381 struct list_head list;
029632fb 382
97fb7a0a
IM
383 struct task_group *parent;
384 struct list_head siblings;
385 struct list_head children;
029632fb
PZ
386
387#ifdef CONFIG_SCHED_AUTOGROUP
97fb7a0a 388 struct autogroup *autogroup;
029632fb
PZ
389#endif
390
97fb7a0a 391 struct cfs_bandwidth cfs_bandwidth;
029632fb
PZ
392};
393
394#ifdef CONFIG_FAIR_GROUP_SCHED
395#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
396
397/*
398 * A weight of 0 or 1 can cause arithmetics problems.
399 * A weight of a cfs_rq is the sum of weights of which entities
400 * are queued on this cfs_rq, so a weight of a entity should not be
401 * too large, so as the shares value of a task group.
402 * (The default weight is 1024 - so there's no practical
403 * limitation from this.)
404 */
97fb7a0a
IM
405#define MIN_SHARES (1UL << 1)
406#define MAX_SHARES (1UL << 18)
029632fb
PZ
407#endif
408
029632fb
PZ
409typedef int (*tg_visitor)(struct task_group *, void *);
410
411extern int walk_tg_tree_from(struct task_group *from,
412 tg_visitor down, tg_visitor up, void *data);
413
414/*
415 * Iterate the full tree, calling @down when first entering a node and @up when
416 * leaving it for the final time.
417 *
418 * Caller must hold rcu_lock or sufficient equivalent.
419 */
420static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
421{
422 return walk_tg_tree_from(&root_task_group, down, up, data);
423}
424
425extern int tg_nop(struct task_group *tg, void *data);
426
427extern void free_fair_sched_group(struct task_group *tg);
428extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
8663e24d 429extern void online_fair_sched_group(struct task_group *tg);
6fe1f348 430extern void unregister_fair_sched_group(struct task_group *tg);
029632fb
PZ
431extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
432 struct sched_entity *se, int cpu,
433 struct sched_entity *parent);
434extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
029632fb
PZ
435
436extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
77a4d1a1 437extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
029632fb
PZ
438extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
439
440extern void free_rt_sched_group(struct task_group *tg);
441extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
442extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
443 struct sched_rt_entity *rt_se, int cpu,
444 struct sched_rt_entity *parent);
8887cd99
NP
445extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
446extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
447extern long sched_group_rt_runtime(struct task_group *tg);
448extern long sched_group_rt_period(struct task_group *tg);
449extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
029632fb 450
25cc7da7
LZ
451extern struct task_group *sched_create_group(struct task_group *parent);
452extern void sched_online_group(struct task_group *tg,
453 struct task_group *parent);
454extern void sched_destroy_group(struct task_group *tg);
455extern void sched_offline_group(struct task_group *tg);
456
457extern void sched_move_task(struct task_struct *tsk);
458
459#ifdef CONFIG_FAIR_GROUP_SCHED
460extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
ad936d86
BP
461
462#ifdef CONFIG_SMP
463extern void set_task_rq_fair(struct sched_entity *se,
464 struct cfs_rq *prev, struct cfs_rq *next);
465#else /* !CONFIG_SMP */
466static inline void set_task_rq_fair(struct sched_entity *se,
467 struct cfs_rq *prev, struct cfs_rq *next) { }
468#endif /* CONFIG_SMP */
469#endif /* CONFIG_FAIR_GROUP_SCHED */
25cc7da7 470
029632fb
PZ
471#else /* CONFIG_CGROUP_SCHED */
472
473struct cfs_bandwidth { };
474
475#endif /* CONFIG_CGROUP_SCHED */
476
477/* CFS-related fields in a runqueue */
478struct cfs_rq {
97fb7a0a
IM
479 struct load_weight load;
480 unsigned long runnable_weight;
481 unsigned int nr_running;
482 unsigned int h_nr_running;
029632fb 483
97fb7a0a
IM
484 u64 exec_clock;
485 u64 min_vruntime;
029632fb 486#ifndef CONFIG_64BIT
97fb7a0a 487 u64 min_vruntime_copy;
029632fb
PZ
488#endif
489
97fb7a0a 490 struct rb_root_cached tasks_timeline;
029632fb 491
029632fb
PZ
492 /*
493 * 'curr' points to currently running entity on this cfs_rq.
494 * It is set to NULL otherwise (i.e when none are currently running).
495 */
97fb7a0a
IM
496 struct sched_entity *curr;
497 struct sched_entity *next;
498 struct sched_entity *last;
499 struct sched_entity *skip;
029632fb
PZ
500
501#ifdef CONFIG_SCHED_DEBUG
97fb7a0a 502 unsigned int nr_spread_over;
029632fb
PZ
503#endif
504
2dac754e
PT
505#ifdef CONFIG_SMP
506 /*
9d89c257 507 * CFS load tracking
2dac754e 508 */
97fb7a0a 509 struct sched_avg avg;
2a2f5d4e 510#ifndef CONFIG_64BIT
97fb7a0a 511 u64 load_last_update_time_copy;
9d89c257 512#endif
2a2f5d4e
PZ
513 struct {
514 raw_spinlock_t lock ____cacheline_aligned;
515 int nr;
516 unsigned long load_avg;
517 unsigned long util_avg;
0e2d2aaa 518 unsigned long runnable_sum;
2a2f5d4e 519 } removed;
82958366 520
9d89c257 521#ifdef CONFIG_FAIR_GROUP_SCHED
97fb7a0a
IM
522 unsigned long tg_load_avg_contrib;
523 long propagate;
524 long prop_runnable_sum;
0e2d2aaa 525
82958366
PT
526 /*
527 * h_load = weight * f(tg)
528 *
529 * Where f(tg) is the recursive weight fraction assigned to
530 * this group.
531 */
97fb7a0a
IM
532 unsigned long h_load;
533 u64 last_h_load_update;
534 struct sched_entity *h_load_next;
68520796 535#endif /* CONFIG_FAIR_GROUP_SCHED */
82958366
PT
536#endif /* CONFIG_SMP */
537
029632fb 538#ifdef CONFIG_FAIR_GROUP_SCHED
97fb7a0a 539 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */
029632fb
PZ
540
541 /*
542 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
543 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
544 * (like users, containers etc.)
545 *
97fb7a0a
IM
546 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU.
547 * This list is used during load balance.
029632fb 548 */
97fb7a0a
IM
549 int on_list;
550 struct list_head leaf_cfs_rq_list;
551 struct task_group *tg; /* group that "owns" this runqueue */
029632fb 552
029632fb 553#ifdef CONFIG_CFS_BANDWIDTH
97fb7a0a 554 int runtime_enabled;
512ac999 555 int expires_seq;
97fb7a0a
IM
556 u64 runtime_expires;
557 s64 runtime_remaining;
558
559 u64 throttled_clock;
560 u64 throttled_clock_task;
561 u64 throttled_clock_task_time;
562 int throttled;
563 int throttle_count;
564 struct list_head throttled_list;
029632fb
PZ
565#endif /* CONFIG_CFS_BANDWIDTH */
566#endif /* CONFIG_FAIR_GROUP_SCHED */
567};
568
569static inline int rt_bandwidth_enabled(void)
570{
571 return sysctl_sched_rt_runtime >= 0;
572}
573
b6366f04 574/* RT IPI pull logic requires IRQ_WORK */
4bdced5c 575#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
b6366f04
SR
576# define HAVE_RT_PUSH_IPI
577#endif
578
029632fb
PZ
579/* Real-Time classes' related field in a runqueue: */
580struct rt_rq {
97fb7a0a
IM
581 struct rt_prio_array active;
582 unsigned int rt_nr_running;
583 unsigned int rr_nr_running;
029632fb
PZ
584#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
585 struct {
97fb7a0a 586 int curr; /* highest queued rt task prio */
029632fb 587#ifdef CONFIG_SMP
97fb7a0a 588 int next; /* next highest */
029632fb
PZ
589#endif
590 } highest_prio;
591#endif
592#ifdef CONFIG_SMP
97fb7a0a
IM
593 unsigned long rt_nr_migratory;
594 unsigned long rt_nr_total;
595 int overloaded;
596 struct plist_head pushable_tasks;
b6366f04 597#endif /* CONFIG_SMP */
97fb7a0a 598 int rt_queued;
f4ebcbc0 599
97fb7a0a
IM
600 int rt_throttled;
601 u64 rt_time;
602 u64 rt_runtime;
029632fb 603 /* Nests inside the rq lock: */
97fb7a0a 604 raw_spinlock_t rt_runtime_lock;
029632fb
PZ
605
606#ifdef CONFIG_RT_GROUP_SCHED
97fb7a0a 607 unsigned long rt_nr_boosted;
029632fb 608
97fb7a0a
IM
609 struct rq *rq;
610 struct task_group *tg;
029632fb
PZ
611#endif
612};
613
296b2ffe
VG
614static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
615{
616 return rt_rq->rt_queued && rt_rq->rt_nr_running;
617}
618
aab03e05
DF
619/* Deadline class' related fields in a runqueue */
620struct dl_rq {
621 /* runqueue is an rbtree, ordered by deadline */
97fb7a0a 622 struct rb_root_cached root;
aab03e05 623
97fb7a0a 624 unsigned long dl_nr_running;
1baca4ce
JL
625
626#ifdef CONFIG_SMP
627 /*
628 * Deadline values of the currently executing and the
629 * earliest ready task on this rq. Caching these facilitates
630 * the decision wether or not a ready but not running task
631 * should migrate somewhere else.
632 */
633 struct {
97fb7a0a
IM
634 u64 curr;
635 u64 next;
1baca4ce
JL
636 } earliest_dl;
637
97fb7a0a
IM
638 unsigned long dl_nr_migratory;
639 int overloaded;
1baca4ce
JL
640
641 /*
642 * Tasks on this rq that can be pushed away. They are kept in
643 * an rb-tree, ordered by tasks' deadlines, with caching
644 * of the leftmost (earliest deadline) element.
645 */
97fb7a0a 646 struct rb_root_cached pushable_dl_tasks_root;
332ac17e 647#else
97fb7a0a 648 struct dl_bw dl_bw;
1baca4ce 649#endif
e36d8677
LA
650 /*
651 * "Active utilization" for this runqueue: increased when a
652 * task wakes up (becomes TASK_RUNNING) and decreased when a
653 * task blocks
654 */
97fb7a0a 655 u64 running_bw;
4da3abce 656
8fd27231
LA
657 /*
658 * Utilization of the tasks "assigned" to this runqueue (including
659 * the tasks that are in runqueue and the tasks that executed on this
660 * CPU and blocked). Increased when a task moves to this runqueue, and
661 * decreased when the task moves away (migrates, changes scheduling
662 * policy, or terminates).
663 * This is needed to compute the "inactive utilization" for the
664 * runqueue (inactive utilization = this_bw - running_bw).
665 */
97fb7a0a
IM
666 u64 this_bw;
667 u64 extra_bw;
8fd27231 668
4da3abce
LA
669 /*
670 * Inverse of the fraction of CPU utilization that can be reclaimed
671 * by the GRUB algorithm.
672 */
97fb7a0a 673 u64 bw_ratio;
aab03e05
DF
674};
675
029632fb
PZ
676#ifdef CONFIG_SMP
677
afe06efd
TC
678static inline bool sched_asym_prefer(int a, int b)
679{
680 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
681}
682
029632fb
PZ
683/*
684 * We add the notion of a root-domain which will be used to define per-domain
685 * variables. Each exclusive cpuset essentially defines an island domain by
97fb7a0a 686 * fully partitioning the member CPUs from any other cpuset. Whenever a new
029632fb
PZ
687 * exclusive cpuset is created, we also create and attach a new root-domain
688 * object.
689 *
690 */
691struct root_domain {
97fb7a0a
IM
692 atomic_t refcount;
693 atomic_t rto_count;
694 struct rcu_head rcu;
695 cpumask_var_t span;
696 cpumask_var_t online;
029632fb 697
4486edd1 698 /* Indicate more than one runnable task for any CPU */
97fb7a0a 699 bool overload;
4486edd1 700
1baca4ce
JL
701 /*
702 * The bit corresponding to a CPU gets set here if such CPU has more
703 * than one runnable -deadline task (as it is below for RT tasks).
704 */
97fb7a0a
IM
705 cpumask_var_t dlo_mask;
706 atomic_t dlo_count;
707 struct dl_bw dl_bw;
708 struct cpudl cpudl;
1baca4ce 709
4bdced5c
SRRH
710#ifdef HAVE_RT_PUSH_IPI
711 /*
712 * For IPI pull requests, loop across the rto_mask.
713 */
97fb7a0a
IM
714 struct irq_work rto_push_work;
715 raw_spinlock_t rto_lock;
4bdced5c 716 /* These are only updated and read within rto_lock */
97fb7a0a
IM
717 int rto_loop;
718 int rto_cpu;
4bdced5c 719 /* These atomics are updated outside of a lock */
97fb7a0a
IM
720 atomic_t rto_loop_next;
721 atomic_t rto_loop_start;
4bdced5c 722#endif
029632fb
PZ
723 /*
724 * The "RT overload" flag: it gets set if a CPU has more than
725 * one runnable RT task.
726 */
97fb7a0a
IM
727 cpumask_var_t rto_mask;
728 struct cpupri cpupri;
cd92bfd3 729
97fb7a0a 730 unsigned long max_cpu_capacity;
029632fb
PZ
731};
732
733extern struct root_domain def_root_domain;
f2cb1360 734extern struct mutex sched_domains_mutex;
f2cb1360
IM
735
736extern void init_defrootdomain(void);
8d5dc512 737extern int sched_init_domains(const struct cpumask *cpu_map);
f2cb1360 738extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
364f5665
SRV
739extern void sched_get_rd(struct root_domain *rd);
740extern void sched_put_rd(struct root_domain *rd);
029632fb 741
4bdced5c
SRRH
742#ifdef HAVE_RT_PUSH_IPI
743extern void rto_push_irq_work_func(struct irq_work *work);
744#endif
029632fb
PZ
745#endif /* CONFIG_SMP */
746
747/*
748 * This is the main, per-CPU runqueue data structure.
749 *
750 * Locking rule: those places that want to lock multiple runqueues
751 * (such as the load balancing or the thread migration code), lock
752 * acquire operations must be ordered by ascending &runqueue.
753 */
754struct rq {
755 /* runqueue lock: */
97fb7a0a 756 raw_spinlock_t lock;
029632fb
PZ
757
758 /*
759 * nr_running and cpu_load should be in the same cacheline because
760 * remote CPUs use both these fields when doing load calculation.
761 */
97fb7a0a 762 unsigned int nr_running;
0ec8aa00 763#ifdef CONFIG_NUMA_BALANCING
97fb7a0a
IM
764 unsigned int nr_numa_running;
765 unsigned int nr_preferred_running;
0ec8aa00 766#endif
029632fb 767 #define CPU_LOAD_IDX_MAX 5
97fb7a0a 768 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
3451d024 769#ifdef CONFIG_NO_HZ_COMMON
9fd81dd5 770#ifdef CONFIG_SMP
97fb7a0a 771 unsigned long last_load_update_tick;
e022e0d3 772 unsigned long last_blocked_load_update_tick;
f643ea22 773 unsigned int has_blocked_load;
9fd81dd5 774#endif /* CONFIG_SMP */
00357f5e 775 unsigned int nohz_tick_stopped;
a22e47a4 776 atomic_t nohz_flags;
9fd81dd5 777#endif /* CONFIG_NO_HZ_COMMON */
dcdedb24 778
97fb7a0a
IM
779 /* capture load from *all* tasks on this CPU: */
780 struct load_weight load;
781 unsigned long nr_load_updates;
782 u64 nr_switches;
029632fb 783
97fb7a0a
IM
784 struct cfs_rq cfs;
785 struct rt_rq rt;
786 struct dl_rq dl;
029632fb
PZ
787
788#ifdef CONFIG_FAIR_GROUP_SCHED
97fb7a0a
IM
789 /* list of leaf cfs_rq on this CPU: */
790 struct list_head leaf_cfs_rq_list;
791 struct list_head *tmp_alone_branch;
a35b6466
PZ
792#endif /* CONFIG_FAIR_GROUP_SCHED */
793
029632fb
PZ
794 /*
795 * This is part of a global counter where only the total sum
796 * over all CPUs matters. A task can increase this counter on
797 * one CPU and if it got migrated afterwards it may decrease
798 * it on another CPU. Always updated under the runqueue lock:
799 */
97fb7a0a 800 unsigned long nr_uninterruptible;
029632fb 801
97fb7a0a
IM
802 struct task_struct *curr;
803 struct task_struct *idle;
804 struct task_struct *stop;
805 unsigned long next_balance;
806 struct mm_struct *prev_mm;
029632fb 807
97fb7a0a
IM
808 unsigned int clock_update_flags;
809 u64 clock;
810 u64 clock_task;
029632fb 811
97fb7a0a 812 atomic_t nr_iowait;
029632fb
PZ
813
814#ifdef CONFIG_SMP
97fb7a0a
IM
815 struct root_domain *rd;
816 struct sched_domain *sd;
817
818 unsigned long cpu_capacity;
819 unsigned long cpu_capacity_orig;
029632fb 820
97fb7a0a 821 struct callback_head *balance_callback;
029632fb 822
97fb7a0a 823 unsigned char idle_balance;
e3fca9e7 824
029632fb 825 /* For active balancing */
97fb7a0a
IM
826 int active_balance;
827 int push_cpu;
828 struct cpu_stop_work active_balance_work;
829
830 /* CPU of this runqueue: */
831 int cpu;
832 int online;
029632fb 833
367456c7
PZ
834 struct list_head cfs_tasks;
835
97fb7a0a
IM
836 u64 rt_avg;
837 u64 age_stamp;
838 u64 idle_stamp;
839 u64 avg_idle;
9bd721c5
JL
840
841 /* This is used to determine avg_idle's max value */
97fb7a0a 842 u64 max_idle_balance_cost;
029632fb
PZ
843#endif
844
845#ifdef CONFIG_IRQ_TIME_ACCOUNTING
97fb7a0a 846 u64 prev_irq_time;
029632fb
PZ
847#endif
848#ifdef CONFIG_PARAVIRT
97fb7a0a 849 u64 prev_steal_time;
029632fb
PZ
850#endif
851#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
97fb7a0a 852 u64 prev_steal_time_rq;
029632fb
PZ
853#endif
854
855 /* calc_load related fields */
97fb7a0a
IM
856 unsigned long calc_load_update;
857 long calc_load_active;
029632fb
PZ
858
859#ifdef CONFIG_SCHED_HRTICK
860#ifdef CONFIG_SMP
97fb7a0a
IM
861 int hrtick_csd_pending;
862 call_single_data_t hrtick_csd;
029632fb 863#endif
97fb7a0a 864 struct hrtimer hrtick_timer;
029632fb
PZ
865#endif
866
867#ifdef CONFIG_SCHEDSTATS
868 /* latency stats */
97fb7a0a
IM
869 struct sched_info rq_sched_info;
870 unsigned long long rq_cpu_time;
029632fb
PZ
871 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
872
873 /* sys_sched_yield() stats */
97fb7a0a 874 unsigned int yld_count;
029632fb
PZ
875
876 /* schedule() stats */
97fb7a0a
IM
877 unsigned int sched_count;
878 unsigned int sched_goidle;
029632fb
PZ
879
880 /* try_to_wake_up() stats */
97fb7a0a
IM
881 unsigned int ttwu_count;
882 unsigned int ttwu_local;
029632fb
PZ
883#endif
884
885#ifdef CONFIG_SMP
97fb7a0a 886 struct llist_head wake_list;
029632fb 887#endif
442bf3aa
DL
888
889#ifdef CONFIG_CPU_IDLE
890 /* Must be inspected within a rcu lock section */
97fb7a0a 891 struct cpuidle_state *idle_state;
442bf3aa 892#endif
029632fb
PZ
893};
894
895static inline int cpu_of(struct rq *rq)
896{
897#ifdef CONFIG_SMP
898 return rq->cpu;
899#else
900 return 0;
901#endif
902}
903
1b568f0a
PZ
904
905#ifdef CONFIG_SCHED_SMT
906
907extern struct static_key_false sched_smt_present;
908
909extern void __update_idle_core(struct rq *rq);
910
911static inline void update_idle_core(struct rq *rq)
912{
913 if (static_branch_unlikely(&sched_smt_present))
914 __update_idle_core(rq);
915}
916
917#else
918static inline void update_idle_core(struct rq *rq) { }
919#endif
920
8b06c55b 921DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
029632fb 922
518cd623 923#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
4a32fea9 924#define this_rq() this_cpu_ptr(&runqueues)
518cd623
PZ
925#define task_rq(p) cpu_rq(task_cpu(p))
926#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
4a32fea9 927#define raw_rq() raw_cpu_ptr(&runqueues)
518cd623 928
cebde6d6
PZ
929static inline u64 __rq_clock_broken(struct rq *rq)
930{
316c1608 931 return READ_ONCE(rq->clock);
cebde6d6
PZ
932}
933
cb42c9a3
MF
934/*
935 * rq::clock_update_flags bits
936 *
937 * %RQCF_REQ_SKIP - will request skipping of clock update on the next
938 * call to __schedule(). This is an optimisation to avoid
939 * neighbouring rq clock updates.
940 *
941 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
942 * in effect and calls to update_rq_clock() are being ignored.
943 *
944 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
945 * made to update_rq_clock() since the last time rq::lock was pinned.
946 *
947 * If inside of __schedule(), clock_update_flags will have been
948 * shifted left (a left shift is a cheap operation for the fast path
949 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
950 *
951 * if (rq-clock_update_flags >= RQCF_UPDATED)
952 *
953 * to check if %RQCF_UPADTED is set. It'll never be shifted more than
954 * one position though, because the next rq_unpin_lock() will shift it
955 * back.
956 */
97fb7a0a
IM
957#define RQCF_REQ_SKIP 0x01
958#define RQCF_ACT_SKIP 0x02
959#define RQCF_UPDATED 0x04
cb42c9a3
MF
960
961static inline void assert_clock_updated(struct rq *rq)
962{
963 /*
964 * The only reason for not seeing a clock update since the
965 * last rq_pin_lock() is if we're currently skipping updates.
966 */
967 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
968}
969
78becc27
FW
970static inline u64 rq_clock(struct rq *rq)
971{
cebde6d6 972 lockdep_assert_held(&rq->lock);
cb42c9a3
MF
973 assert_clock_updated(rq);
974
78becc27
FW
975 return rq->clock;
976}
977
978static inline u64 rq_clock_task(struct rq *rq)
979{
cebde6d6 980 lockdep_assert_held(&rq->lock);
cb42c9a3
MF
981 assert_clock_updated(rq);
982
78becc27
FW
983 return rq->clock_task;
984}
985
adcc8da8 986static inline void rq_clock_skip_update(struct rq *rq)
9edfbfed
PZ
987{
988 lockdep_assert_held(&rq->lock);
adcc8da8
DB
989 rq->clock_update_flags |= RQCF_REQ_SKIP;
990}
991
992/*
595058b6 993 * See rt task throttling, which is the only time a skip
adcc8da8
DB
994 * request is cancelled.
995 */
996static inline void rq_clock_cancel_skipupdate(struct rq *rq)
997{
998 lockdep_assert_held(&rq->lock);
999 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
9edfbfed
PZ
1000}
1001
d8ac8971
MF
1002struct rq_flags {
1003 unsigned long flags;
1004 struct pin_cookie cookie;
cb42c9a3
MF
1005#ifdef CONFIG_SCHED_DEBUG
1006 /*
1007 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
1008 * current pin context is stashed here in case it needs to be
1009 * restored in rq_repin_lock().
1010 */
1011 unsigned int clock_update_flags;
1012#endif
d8ac8971
MF
1013};
1014
1015static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1016{
1017 rf->cookie = lockdep_pin_lock(&rq->lock);
cb42c9a3
MF
1018
1019#ifdef CONFIG_SCHED_DEBUG
1020 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1021 rf->clock_update_flags = 0;
1022#endif
d8ac8971
MF
1023}
1024
1025static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1026{
cb42c9a3
MF
1027#ifdef CONFIG_SCHED_DEBUG
1028 if (rq->clock_update_flags > RQCF_ACT_SKIP)
1029 rf->clock_update_flags = RQCF_UPDATED;
1030#endif
1031
d8ac8971
MF
1032 lockdep_unpin_lock(&rq->lock, rf->cookie);
1033}
1034
1035static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1036{
1037 lockdep_repin_lock(&rq->lock, rf->cookie);
cb42c9a3
MF
1038
1039#ifdef CONFIG_SCHED_DEBUG
1040 /*
1041 * Restore the value we stashed in @rf for this pin context.
1042 */
1043 rq->clock_update_flags |= rf->clock_update_flags;
1044#endif
d8ac8971
MF
1045}
1046
9942f79b 1047#ifdef CONFIG_NUMA
e3fe70b1
RR
1048enum numa_topology_type {
1049 NUMA_DIRECT,
1050 NUMA_GLUELESS_MESH,
1051 NUMA_BACKPLANE,
1052};
1053extern enum numa_topology_type sched_numa_topology_type;
9942f79b
RR
1054extern int sched_max_numa_distance;
1055extern bool find_numa_distance(int distance);
1056#endif
1057
f2cb1360
IM
1058#ifdef CONFIG_NUMA
1059extern void sched_init_numa(void);
1060extern void sched_domains_numa_masks_set(unsigned int cpu);
1061extern void sched_domains_numa_masks_clear(unsigned int cpu);
1062#else
1063static inline void sched_init_numa(void) { }
1064static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1065static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1066#endif
1067
f809ca9a 1068#ifdef CONFIG_NUMA_BALANCING
44dba3d5
IM
1069/* The regions in numa_faults array from task_struct */
1070enum numa_faults_stats {
1071 NUMA_MEM = 0,
1072 NUMA_CPU,
1073 NUMA_MEMBUF,
1074 NUMA_CPUBUF
1075};
0ec8aa00 1076extern void sched_setnuma(struct task_struct *p, int node);
e6628d5b 1077extern int migrate_task_to(struct task_struct *p, int cpu);
ac66f547 1078extern int migrate_swap(struct task_struct *, struct task_struct *);
13784475
MG
1079extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
1080#else
1081static inline void
1082init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
1083{
1084}
f809ca9a
MG
1085#endif /* CONFIG_NUMA_BALANCING */
1086
518cd623
PZ
1087#ifdef CONFIG_SMP
1088
e3fca9e7
PZ
1089static inline void
1090queue_balance_callback(struct rq *rq,
1091 struct callback_head *head,
1092 void (*func)(struct rq *rq))
1093{
1094 lockdep_assert_held(&rq->lock);
1095
1096 if (unlikely(head->next))
1097 return;
1098
1099 head->func = (void (*)(struct callback_head *))func;
1100 head->next = rq->balance_callback;
1101 rq->balance_callback = head;
1102}
1103
e3baac47
PZ
1104extern void sched_ttwu_pending(void);
1105
029632fb
PZ
1106#define rcu_dereference_check_sched_domain(p) \
1107 rcu_dereference_check((p), \
1108 lockdep_is_held(&sched_domains_mutex))
1109
1110/*
1111 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1112 * See detach_destroy_domains: synchronize_sched for details.
1113 *
1114 * The domain tree of any CPU may only be accessed from within
1115 * preempt-disabled sections.
1116 */
1117#define for_each_domain(cpu, __sd) \
518cd623
PZ
1118 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1119 __sd; __sd = __sd->parent)
029632fb 1120
77e81365
SS
1121#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
1122
518cd623
PZ
1123/**
1124 * highest_flag_domain - Return highest sched_domain containing flag.
97fb7a0a 1125 * @cpu: The CPU whose highest level of sched domain is to
518cd623
PZ
1126 * be returned.
1127 * @flag: The flag to check for the highest sched_domain
97fb7a0a 1128 * for the given CPU.
518cd623 1129 *
97fb7a0a 1130 * Returns the highest sched_domain of a CPU which contains the given flag.
518cd623
PZ
1131 */
1132static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1133{
1134 struct sched_domain *sd, *hsd = NULL;
1135
1136 for_each_domain(cpu, sd) {
1137 if (!(sd->flags & flag))
1138 break;
1139 hsd = sd;
1140 }
1141
1142 return hsd;
1143}
1144
fb13c7ee
MG
1145static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1146{
1147 struct sched_domain *sd;
1148
1149 for_each_domain(cpu, sd) {
1150 if (sd->flags & flag)
1151 break;
1152 }
1153
1154 return sd;
1155}
1156
518cd623 1157DECLARE_PER_CPU(struct sched_domain *, sd_llc);
7d9ffa89 1158DECLARE_PER_CPU(int, sd_llc_size);
518cd623 1159DECLARE_PER_CPU(int, sd_llc_id);
0e369d75 1160DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
fb13c7ee 1161DECLARE_PER_CPU(struct sched_domain *, sd_numa);
37dc6b50 1162DECLARE_PER_CPU(struct sched_domain *, sd_asym);
518cd623 1163
63b2ca30 1164struct sched_group_capacity {
97fb7a0a 1165 atomic_t ref;
5e6521ea 1166 /*
172895e6 1167 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
63b2ca30 1168 * for a single CPU.
5e6521ea 1169 */
97fb7a0a
IM
1170 unsigned long capacity;
1171 unsigned long min_capacity; /* Min per-CPU capacity in group */
1172 unsigned long next_update;
1173 int imbalance; /* XXX unrelated to capacity but shared group state */
5e6521ea 1174
005f874d 1175#ifdef CONFIG_SCHED_DEBUG
97fb7a0a 1176 int id;
005f874d
PZ
1177#endif
1178
97fb7a0a 1179 unsigned long cpumask[0]; /* Balance mask */
5e6521ea
LZ
1180};
1181
1182struct sched_group {
97fb7a0a
IM
1183 struct sched_group *next; /* Must be a circular list */
1184 atomic_t ref;
5e6521ea 1185
97fb7a0a 1186 unsigned int group_weight;
63b2ca30 1187 struct sched_group_capacity *sgc;
97fb7a0a 1188 int asym_prefer_cpu; /* CPU of highest priority in group */
5e6521ea
LZ
1189
1190 /*
1191 * The CPUs this group covers.
1192 *
1193 * NOTE: this field is variable length. (Allocated dynamically
1194 * by attaching extra space to the end of the structure,
1195 * depending on how many CPUs the kernel has booted up with)
1196 */
97fb7a0a 1197 unsigned long cpumask[0];
5e6521ea
LZ
1198};
1199
ae4df9d6 1200static inline struct cpumask *sched_group_span(struct sched_group *sg)
5e6521ea
LZ
1201{
1202 return to_cpumask(sg->cpumask);
1203}
1204
1205/*
e5c14b1f 1206 * See build_balance_mask().
5e6521ea 1207 */
e5c14b1f 1208static inline struct cpumask *group_balance_mask(struct sched_group *sg)
5e6521ea 1209{
63b2ca30 1210 return to_cpumask(sg->sgc->cpumask);
5e6521ea
LZ
1211}
1212
1213/**
97fb7a0a
IM
1214 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group.
1215 * @group: The group whose first CPU is to be returned.
5e6521ea
LZ
1216 */
1217static inline unsigned int group_first_cpu(struct sched_group *group)
1218{
ae4df9d6 1219 return cpumask_first(sched_group_span(group));
5e6521ea
LZ
1220}
1221
c1174876
PZ
1222extern int group_balance_cpu(struct sched_group *sg);
1223
3866e845
SRRH
1224#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
1225void register_sched_domain_sysctl(void);
bbdacdfe 1226void dirty_sched_domain_sysctl(int cpu);
3866e845
SRRH
1227void unregister_sched_domain_sysctl(void);
1228#else
1229static inline void register_sched_domain_sysctl(void)
1230{
1231}
bbdacdfe
PZ
1232static inline void dirty_sched_domain_sysctl(int cpu)
1233{
1234}
3866e845
SRRH
1235static inline void unregister_sched_domain_sysctl(void)
1236{
1237}
1238#endif
1239
e3baac47
PZ
1240#else
1241
1242static inline void sched_ttwu_pending(void) { }
1243
518cd623 1244#endif /* CONFIG_SMP */
029632fb 1245
391e43da 1246#include "stats.h"
1051408f 1247#include "autogroup.h"
029632fb
PZ
1248
1249#ifdef CONFIG_CGROUP_SCHED
1250
1251/*
1252 * Return the group to which this tasks belongs.
1253 *
8af01f56
TH
1254 * We cannot use task_css() and friends because the cgroup subsystem
1255 * changes that value before the cgroup_subsys::attach() method is called,
1256 * therefore we cannot pin it and might observe the wrong value.
8323f26c
PZ
1257 *
1258 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
1259 * core changes this before calling sched_move_task().
1260 *
1261 * Instead we use a 'copy' which is updated from sched_move_task() while
1262 * holding both task_struct::pi_lock and rq::lock.
029632fb
PZ
1263 */
1264static inline struct task_group *task_group(struct task_struct *p)
1265{
8323f26c 1266 return p->sched_task_group;
029632fb
PZ
1267}
1268
1269/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
1270static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1271{
1272#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1273 struct task_group *tg = task_group(p);
1274#endif
1275
1276#ifdef CONFIG_FAIR_GROUP_SCHED
ad936d86 1277 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
029632fb
PZ
1278 p->se.cfs_rq = tg->cfs_rq[cpu];
1279 p->se.parent = tg->se[cpu];
1280#endif
1281
1282#ifdef CONFIG_RT_GROUP_SCHED
1283 p->rt.rt_rq = tg->rt_rq[cpu];
1284 p->rt.parent = tg->rt_se[cpu];
1285#endif
1286}
1287
1288#else /* CONFIG_CGROUP_SCHED */
1289
1290static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1291static inline struct task_group *task_group(struct task_struct *p)
1292{
1293 return NULL;
1294}
1295
1296#endif /* CONFIG_CGROUP_SCHED */
1297
1298static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1299{
1300 set_task_rq(p, cpu);
1301#ifdef CONFIG_SMP
1302 /*
1303 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1304 * successfuly executed on another CPU. We must ensure that updates of
1305 * per-task data have been completed by this moment.
1306 */
1307 smp_wmb();
c65eacbe
AL
1308#ifdef CONFIG_THREAD_INFO_IN_TASK
1309 p->cpu = cpu;
1310#else
029632fb 1311 task_thread_info(p)->cpu = cpu;
c65eacbe 1312#endif
ac66f547 1313 p->wake_cpu = cpu;
029632fb
PZ
1314#endif
1315}
1316
1317/*
1318 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1319 */
1320#ifdef CONFIG_SCHED_DEBUG
c5905afb 1321# include <linux/static_key.h>
029632fb
PZ
1322# define const_debug __read_mostly
1323#else
1324# define const_debug const
1325#endif
1326
029632fb
PZ
1327#define SCHED_FEAT(name, enabled) \
1328 __SCHED_FEAT_##name ,
1329
1330enum {
391e43da 1331#include "features.h"
f8b6d1cc 1332 __SCHED_FEAT_NR,
029632fb
PZ
1333};
1334
1335#undef SCHED_FEAT
1336
f8b6d1cc 1337#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
765cc3a4
PB
1338
1339/*
1340 * To support run-time toggling of sched features, all the translation units
1341 * (but core.c) reference the sysctl_sched_features defined in core.c.
1342 */
1343extern const_debug unsigned int sysctl_sched_features;
1344
f8b6d1cc 1345#define SCHED_FEAT(name, enabled) \
c5905afb 1346static __always_inline bool static_branch_##name(struct static_key *key) \
f8b6d1cc 1347{ \
6e76ea8a 1348 return static_key_##enabled(key); \
f8b6d1cc
PZ
1349}
1350
1351#include "features.h"
f8b6d1cc
PZ
1352#undef SCHED_FEAT
1353
c5905afb 1354extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
f8b6d1cc 1355#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
765cc3a4 1356
f8b6d1cc 1357#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
765cc3a4
PB
1358
1359/*
1360 * Each translation unit has its own copy of sysctl_sched_features to allow
1361 * constants propagation at compile time and compiler optimization based on
1362 * features default.
1363 */
1364#define SCHED_FEAT(name, enabled) \
1365 (1UL << __SCHED_FEAT_##name) * enabled |
1366static const_debug __maybe_unused unsigned int sysctl_sched_features =
1367#include "features.h"
1368 0;
1369#undef SCHED_FEAT
1370
029632fb 1371#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
765cc3a4 1372
f8b6d1cc 1373#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
029632fb 1374
2a595721 1375extern struct static_key_false sched_numa_balancing;
cb251765 1376extern struct static_key_false sched_schedstats;
cbee9f88 1377
029632fb
PZ
1378static inline u64 global_rt_period(void)
1379{
1380 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1381}
1382
1383static inline u64 global_rt_runtime(void)
1384{
1385 if (sysctl_sched_rt_runtime < 0)
1386 return RUNTIME_INF;
1387
1388 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1389}
1390
029632fb
PZ
1391static inline int task_current(struct rq *rq, struct task_struct *p)
1392{
1393 return rq->curr == p;
1394}
1395
1396static inline int task_running(struct rq *rq, struct task_struct *p)
1397{
1398#ifdef CONFIG_SMP
1399 return p->on_cpu;
1400#else
1401 return task_current(rq, p);
1402#endif
1403}
1404
da0c1e65
KT
1405static inline int task_on_rq_queued(struct task_struct *p)
1406{
1407 return p->on_rq == TASK_ON_RQ_QUEUED;
1408}
029632fb 1409
cca26e80
KT
1410static inline int task_on_rq_migrating(struct task_struct *p)
1411{
1412 return p->on_rq == TASK_ON_RQ_MIGRATING;
1413}
1414
b13095f0
LZ
1415/*
1416 * wake flags
1417 */
97fb7a0a
IM
1418#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
1419#define WF_FORK 0x02 /* Child wakeup after fork */
1420#define WF_MIGRATED 0x4 /* Internal use, task got migrated */
b13095f0 1421
029632fb
PZ
1422/*
1423 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1424 * of tasks with abnormal "nice" values across CPUs the contribution that
1425 * each task makes to its run queue's load is weighted according to its
1426 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1427 * scaled version of the new time slice allocation that they receive on time
1428 * slice expiry etc.
1429 */
1430
97fb7a0a
IM
1431#define WEIGHT_IDLEPRIO 3
1432#define WMULT_IDLEPRIO 1431655765
029632fb 1433
97fb7a0a
IM
1434extern const int sched_prio_to_weight[40];
1435extern const u32 sched_prio_to_wmult[40];
029632fb 1436
ff77e468
PZ
1437/*
1438 * {de,en}queue flags:
1439 *
1440 * DEQUEUE_SLEEP - task is no longer runnable
1441 * ENQUEUE_WAKEUP - task just became runnable
1442 *
1443 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1444 * are in a known state which allows modification. Such pairs
1445 * should preserve as much state as possible.
1446 *
1447 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1448 * in the runqueue.
1449 *
1450 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
1451 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
59efa0ba 1452 * ENQUEUE_MIGRATED - the task was migrated during wakeup
ff77e468
PZ
1453 *
1454 */
1455
1456#define DEQUEUE_SLEEP 0x01
97fb7a0a
IM
1457#define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */
1458#define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */
1459#define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */
ff77e468 1460
1de64443 1461#define ENQUEUE_WAKEUP 0x01
ff77e468
PZ
1462#define ENQUEUE_RESTORE 0x02
1463#define ENQUEUE_MOVE 0x04
0a67d1ee 1464#define ENQUEUE_NOCLOCK 0x08
ff77e468 1465
0a67d1ee
PZ
1466#define ENQUEUE_HEAD 0x10
1467#define ENQUEUE_REPLENISH 0x20
c82ba9fa 1468#ifdef CONFIG_SMP
0a67d1ee 1469#define ENQUEUE_MIGRATED 0x40
c82ba9fa 1470#else
59efa0ba 1471#define ENQUEUE_MIGRATED 0x00
c82ba9fa 1472#endif
c82ba9fa 1473
37e117c0
PZ
1474#define RETRY_TASK ((void *)-1UL)
1475
c82ba9fa
LZ
1476struct sched_class {
1477 const struct sched_class *next;
1478
1479 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1480 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
97fb7a0a
IM
1481 void (*yield_task) (struct rq *rq);
1482 bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
c82ba9fa 1483
97fb7a0a 1484 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
c82ba9fa 1485
606dba2e
PZ
1486 /*
1487 * It is the responsibility of the pick_next_task() method that will
1488 * return the next task to call put_prev_task() on the @prev task or
1489 * something equivalent.
37e117c0
PZ
1490 *
1491 * May return RETRY_TASK when it finds a higher prio class has runnable
1492 * tasks.
606dba2e 1493 */
97fb7a0a
IM
1494 struct task_struct * (*pick_next_task)(struct rq *rq,
1495 struct task_struct *prev,
1496 struct rq_flags *rf);
1497 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
c82ba9fa
LZ
1498
1499#ifdef CONFIG_SMP
ac66f547 1500 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
5a4fd036 1501 void (*migrate_task_rq)(struct task_struct *p);
c82ba9fa 1502
97fb7a0a 1503 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
c82ba9fa
LZ
1504
1505 void (*set_cpus_allowed)(struct task_struct *p,
1506 const struct cpumask *newmask);
1507
1508 void (*rq_online)(struct rq *rq);
1509 void (*rq_offline)(struct rq *rq);
1510#endif
1511
97fb7a0a
IM
1512 void (*set_curr_task)(struct rq *rq);
1513 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1514 void (*task_fork)(struct task_struct *p);
1515 void (*task_dead)(struct task_struct *p);
c82ba9fa 1516
67dfa1b7
KT
1517 /*
1518 * The switched_from() call is allowed to drop rq->lock, therefore we
1519 * cannot assume the switched_from/switched_to pair is serliazed by
1520 * rq->lock. They are however serialized by p->pi_lock.
1521 */
97fb7a0a
IM
1522 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1523 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
c82ba9fa 1524 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
97fb7a0a 1525 int oldprio);
c82ba9fa 1526
97fb7a0a
IM
1527 unsigned int (*get_rr_interval)(struct rq *rq,
1528 struct task_struct *task);
c82ba9fa 1529
97fb7a0a 1530 void (*update_curr)(struct rq *rq);
6e998916 1531
97fb7a0a
IM
1532#define TASK_SET_GROUP 0
1533#define TASK_MOVE_GROUP 1
ea86cb4b 1534
c82ba9fa 1535#ifdef CONFIG_FAIR_GROUP_SCHED
97fb7a0a 1536 void (*task_change_group)(struct task_struct *p, int type);
c82ba9fa
LZ
1537#endif
1538};
029632fb 1539
3f1d2a31
PZ
1540static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1541{
1542 prev->sched_class->put_prev_task(rq, prev);
1543}
1544
b2bf6c31
PZ
1545static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1546{
1547 curr->sched_class->set_curr_task(rq);
1548}
1549
f5832c19 1550#ifdef CONFIG_SMP
029632fb 1551#define sched_class_highest (&stop_sched_class)
f5832c19
NP
1552#else
1553#define sched_class_highest (&dl_sched_class)
1554#endif
029632fb
PZ
1555#define for_each_class(class) \
1556 for (class = sched_class_highest; class; class = class->next)
1557
1558extern const struct sched_class stop_sched_class;
aab03e05 1559extern const struct sched_class dl_sched_class;
029632fb
PZ
1560extern const struct sched_class rt_sched_class;
1561extern const struct sched_class fair_sched_class;
1562extern const struct sched_class idle_sched_class;
1563
1564
1565#ifdef CONFIG_SMP
1566
63b2ca30 1567extern void update_group_capacity(struct sched_domain *sd, int cpu);
b719203b 1568
7caff66f 1569extern void trigger_load_balance(struct rq *rq);
029632fb 1570
c5b28038
PZ
1571extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1572
029632fb
PZ
1573#endif
1574
442bf3aa
DL
1575#ifdef CONFIG_CPU_IDLE
1576static inline void idle_set_state(struct rq *rq,
1577 struct cpuidle_state *idle_state)
1578{
1579 rq->idle_state = idle_state;
1580}
1581
1582static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1583{
9148a3a1 1584 SCHED_WARN_ON(!rcu_read_lock_held());
97fb7a0a 1585
442bf3aa
DL
1586 return rq->idle_state;
1587}
1588#else
1589static inline void idle_set_state(struct rq *rq,
1590 struct cpuidle_state *idle_state)
1591{
1592}
1593
1594static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1595{
1596 return NULL;
1597}
1598#endif
1599
8663effb
SRV
1600extern void schedule_idle(void);
1601
029632fb
PZ
1602extern void sysrq_sched_debug_show(void);
1603extern void sched_init_granularity(void);
1604extern void update_max_interval(void);
1baca4ce
JL
1605
1606extern void init_sched_dl_class(void);
029632fb
PZ
1607extern void init_sched_rt_class(void);
1608extern void init_sched_fair_class(void);
1609
9059393e
VG
1610extern void reweight_task(struct task_struct *p, int prio);
1611
8875125e 1612extern void resched_curr(struct rq *rq);
029632fb
PZ
1613extern void resched_cpu(int cpu);
1614
1615extern struct rt_bandwidth def_rt_bandwidth;
1616extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1617
332ac17e
DF
1618extern struct dl_bandwidth def_dl_bandwidth;
1619extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
aab03e05 1620extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
209a0cbd 1621extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
4da3abce 1622extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
aab03e05 1623
97fb7a0a
IM
1624#define BW_SHIFT 20
1625#define BW_UNIT (1 << BW_SHIFT)
1626#define RATIO_SHIFT 8
332ac17e
DF
1627unsigned long to_ratio(u64 period, u64 runtime);
1628
540247fb 1629extern void init_entity_runnable_average(struct sched_entity *se);
2b8c41da 1630extern void post_init_entity_util_avg(struct sched_entity *se);
a75cdaa9 1631
76d92ac3
FW
1632#ifdef CONFIG_NO_HZ_FULL
1633extern bool sched_can_stop_tick(struct rq *rq);
d84b3131 1634extern int __init sched_tick_offload_init(void);
76d92ac3
FW
1635
1636/*
1637 * Tick may be needed by tasks in the runqueue depending on their policy and
1638 * requirements. If tick is needed, lets send the target an IPI to kick it out of
1639 * nohz mode if necessary.
1640 */
1641static inline void sched_update_tick_dependency(struct rq *rq)
1642{
1643 int cpu;
1644
1645 if (!tick_nohz_full_enabled())
1646 return;
1647
1648 cpu = cpu_of(rq);
1649
1650 if (!tick_nohz_full_cpu(cpu))
1651 return;
1652
1653 if (sched_can_stop_tick(rq))
1654 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1655 else
1656 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1657}
1658#else
d84b3131 1659static inline int sched_tick_offload_init(void) { return 0; }
76d92ac3
FW
1660static inline void sched_update_tick_dependency(struct rq *rq) { }
1661#endif
1662
72465447 1663static inline void add_nr_running(struct rq *rq, unsigned count)
029632fb 1664{
72465447
KT
1665 unsigned prev_nr = rq->nr_running;
1666
1667 rq->nr_running = prev_nr + count;
9f3660c2 1668
72465447 1669 if (prev_nr < 2 && rq->nr_running >= 2) {
4486edd1
TC
1670#ifdef CONFIG_SMP
1671 if (!rq->rd->overload)
1672 rq->rd->overload = true;
1673#endif
4486edd1 1674 }
76d92ac3
FW
1675
1676 sched_update_tick_dependency(rq);
029632fb
PZ
1677}
1678
72465447 1679static inline void sub_nr_running(struct rq *rq, unsigned count)
029632fb 1680{
72465447 1681 rq->nr_running -= count;
76d92ac3
FW
1682 /* Check if we still need preemption */
1683 sched_update_tick_dependency(rq);
029632fb
PZ
1684}
1685
1686extern void update_rq_clock(struct rq *rq);
1687
1688extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1689extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1690
1691extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1692
1693extern const_debug unsigned int sysctl_sched_time_avg;
1694extern const_debug unsigned int sysctl_sched_nr_migrate;
1695extern const_debug unsigned int sysctl_sched_migration_cost;
1696
1697static inline u64 sched_avg_period(void)
1698{
1699 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1700}
1701
029632fb
PZ
1702#ifdef CONFIG_SCHED_HRTICK
1703
1704/*
1705 * Use hrtick when:
1706 * - enabled by features
1707 * - hrtimer is actually high res
1708 */
1709static inline int hrtick_enabled(struct rq *rq)
1710{
1711 if (!sched_feat(HRTICK))
1712 return 0;
1713 if (!cpu_active(cpu_of(rq)))
1714 return 0;
1715 return hrtimer_is_hres_active(&rq->hrtick_timer);
1716}
1717
1718void hrtick_start(struct rq *rq, u64 delay);
1719
b39e66ea
MG
1720#else
1721
1722static inline int hrtick_enabled(struct rq *rq)
1723{
1724 return 0;
1725}
1726
029632fb
PZ
1727#endif /* CONFIG_SCHED_HRTICK */
1728
dfbca41f
PZ
1729#ifndef arch_scale_freq_capacity
1730static __always_inline
7673c8a4 1731unsigned long arch_scale_freq_capacity(int cpu)
dfbca41f
PZ
1732{
1733 return SCHED_CAPACITY_SCALE;
1734}
1735#endif
b5b4860d 1736
7e1a9208
JL
1737#ifdef CONFIG_SMP
1738extern void sched_avg_update(struct rq *rq);
1739
8cd5601c
MR
1740#ifndef arch_scale_cpu_capacity
1741static __always_inline
1742unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1743{
e3279a2e 1744 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
8cd5601c
MR
1745 return sd->smt_gain / sd->span_weight;
1746
1747 return SCHED_CAPACITY_SCALE;
1748}
1749#endif
1750
029632fb
PZ
1751static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1752{
7673c8a4 1753 rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq));
029632fb
PZ
1754 sched_avg_update(rq);
1755}
1756#else
7e1a9208
JL
1757#ifndef arch_scale_cpu_capacity
1758static __always_inline
1759unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
1760{
1761 return SCHED_CAPACITY_SCALE;
1762}
1763#endif
029632fb
PZ
1764static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1765static inline void sched_avg_update(struct rq *rq) { }
1766#endif
1767
eb580751 1768struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3e71a462 1769 __acquires(rq->lock);
8a8c69c3 1770
eb580751 1771struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
3960c8c0 1772 __acquires(p->pi_lock)
3e71a462 1773 __acquires(rq->lock);
3960c8c0 1774
eb580751 1775static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
3960c8c0
PZ
1776 __releases(rq->lock)
1777{
d8ac8971 1778 rq_unpin_lock(rq, rf);
3960c8c0
PZ
1779 raw_spin_unlock(&rq->lock);
1780}
1781
1782static inline void
eb580751 1783task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
3960c8c0
PZ
1784 __releases(rq->lock)
1785 __releases(p->pi_lock)
1786{
d8ac8971 1787 rq_unpin_lock(rq, rf);
3960c8c0 1788 raw_spin_unlock(&rq->lock);
eb580751 1789 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
3960c8c0
PZ
1790}
1791
8a8c69c3
PZ
1792static inline void
1793rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1794 __acquires(rq->lock)
1795{
1796 raw_spin_lock_irqsave(&rq->lock, rf->flags);
1797 rq_pin_lock(rq, rf);
1798}
1799
1800static inline void
1801rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1802 __acquires(rq->lock)
1803{
1804 raw_spin_lock_irq(&rq->lock);
1805 rq_pin_lock(rq, rf);
1806}
1807
1808static inline void
1809rq_lock(struct rq *rq, struct rq_flags *rf)
1810 __acquires(rq->lock)
1811{
1812 raw_spin_lock(&rq->lock);
1813 rq_pin_lock(rq, rf);
1814}
1815
1816static inline void
1817rq_relock(struct rq *rq, struct rq_flags *rf)
1818 __acquires(rq->lock)
1819{
1820 raw_spin_lock(&rq->lock);
1821 rq_repin_lock(rq, rf);
1822}
1823
1824static inline void
1825rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1826 __releases(rq->lock)
1827{
1828 rq_unpin_lock(rq, rf);
1829 raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1830}
1831
1832static inline void
1833rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1834 __releases(rq->lock)
1835{
1836 rq_unpin_lock(rq, rf);
1837 raw_spin_unlock_irq(&rq->lock);
1838}
1839
1840static inline void
1841rq_unlock(struct rq *rq, struct rq_flags *rf)
1842 __releases(rq->lock)
1843{
1844 rq_unpin_lock(rq, rf);
1845 raw_spin_unlock(&rq->lock);
1846}
1847
029632fb
PZ
1848#ifdef CONFIG_SMP
1849#ifdef CONFIG_PREEMPT
1850
1851static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1852
1853/*
1854 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1855 * way at the expense of forcing extra atomic operations in all
1856 * invocations. This assures that the double_lock is acquired using the
1857 * same underlying policy as the spinlock_t on this architecture, which
1858 * reduces latency compared to the unfair variant below. However, it
1859 * also adds more overhead and therefore may reduce throughput.
1860 */
1861static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1862 __releases(this_rq->lock)
1863 __acquires(busiest->lock)
1864 __acquires(this_rq->lock)
1865{
1866 raw_spin_unlock(&this_rq->lock);
1867 double_rq_lock(this_rq, busiest);
1868
1869 return 1;
1870}
1871
1872#else
1873/*
1874 * Unfair double_lock_balance: Optimizes throughput at the expense of
1875 * latency by eliminating extra atomic operations when the locks are
97fb7a0a
IM
1876 * already in proper order on entry. This favors lower CPU-ids and will
1877 * grant the double lock to lower CPUs over higher ids under contention,
029632fb
PZ
1878 * regardless of entry order into the function.
1879 */
1880static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1881 __releases(this_rq->lock)
1882 __acquires(busiest->lock)
1883 __acquires(this_rq->lock)
1884{
1885 int ret = 0;
1886
1887 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1888 if (busiest < this_rq) {
1889 raw_spin_unlock(&this_rq->lock);
1890 raw_spin_lock(&busiest->lock);
1891 raw_spin_lock_nested(&this_rq->lock,
1892 SINGLE_DEPTH_NESTING);
1893 ret = 1;
1894 } else
1895 raw_spin_lock_nested(&busiest->lock,
1896 SINGLE_DEPTH_NESTING);
1897 }
1898 return ret;
1899}
1900
1901#endif /* CONFIG_PREEMPT */
1902
1903/*
1904 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1905 */
1906static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1907{
1908 if (unlikely(!irqs_disabled())) {
97fb7a0a 1909 /* printk() doesn't work well under rq->lock */
029632fb
PZ
1910 raw_spin_unlock(&this_rq->lock);
1911 BUG_ON(1);
1912 }
1913
1914 return _double_lock_balance(this_rq, busiest);
1915}
1916
1917static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1918 __releases(busiest->lock)
1919{
1920 raw_spin_unlock(&busiest->lock);
1921 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1922}
1923
74602315
PZ
1924static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1925{
1926 if (l1 > l2)
1927 swap(l1, l2);
1928
1929 spin_lock(l1);
1930 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1931}
1932
60e69eed
MG
1933static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1934{
1935 if (l1 > l2)
1936 swap(l1, l2);
1937
1938 spin_lock_irq(l1);
1939 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1940}
1941
74602315
PZ
1942static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1943{
1944 if (l1 > l2)
1945 swap(l1, l2);
1946
1947 raw_spin_lock(l1);
1948 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1949}
1950
029632fb
PZ
1951/*
1952 * double_rq_lock - safely lock two runqueues
1953 *
1954 * Note this does not disable interrupts like task_rq_lock,
1955 * you need to do so manually before calling.
1956 */
1957static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1958 __acquires(rq1->lock)
1959 __acquires(rq2->lock)
1960{
1961 BUG_ON(!irqs_disabled());
1962 if (rq1 == rq2) {
1963 raw_spin_lock(&rq1->lock);
1964 __acquire(rq2->lock); /* Fake it out ;) */
1965 } else {
1966 if (rq1 < rq2) {
1967 raw_spin_lock(&rq1->lock);
1968 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1969 } else {
1970 raw_spin_lock(&rq2->lock);
1971 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1972 }
1973 }
1974}
1975
1976/*
1977 * double_rq_unlock - safely unlock two runqueues
1978 *
1979 * Note this does not restore interrupts like task_rq_unlock,
1980 * you need to do so manually after calling.
1981 */
1982static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1983 __releases(rq1->lock)
1984 __releases(rq2->lock)
1985{
1986 raw_spin_unlock(&rq1->lock);
1987 if (rq1 != rq2)
1988 raw_spin_unlock(&rq2->lock);
1989 else
1990 __release(rq2->lock);
1991}
1992
f2cb1360
IM
1993extern void set_rq_online (struct rq *rq);
1994extern void set_rq_offline(struct rq *rq);
1995extern bool sched_smp_initialized;
1996
029632fb
PZ
1997#else /* CONFIG_SMP */
1998
1999/*
2000 * double_rq_lock - safely lock two runqueues
2001 *
2002 * Note this does not disable interrupts like task_rq_lock,
2003 * you need to do so manually before calling.
2004 */
2005static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2006 __acquires(rq1->lock)
2007 __acquires(rq2->lock)
2008{
2009 BUG_ON(!irqs_disabled());
2010 BUG_ON(rq1 != rq2);
2011 raw_spin_lock(&rq1->lock);
2012 __acquire(rq2->lock); /* Fake it out ;) */
2013}
2014
2015/*
2016 * double_rq_unlock - safely unlock two runqueues
2017 *
2018 * Note this does not restore interrupts like task_rq_unlock,
2019 * you need to do so manually after calling.
2020 */
2021static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2022 __releases(rq1->lock)
2023 __releases(rq2->lock)
2024{
2025 BUG_ON(rq1 != rq2);
2026 raw_spin_unlock(&rq1->lock);
2027 __release(rq2->lock);
2028}
2029
2030#endif
2031
2032extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2033extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
6b55c965
SD
2034
2035#ifdef CONFIG_SCHED_DEBUG
9469eb01
PZ
2036extern bool sched_debug_enabled;
2037
029632fb
PZ
2038extern void print_cfs_stats(struct seq_file *m, int cpu);
2039extern void print_rt_stats(struct seq_file *m, int cpu);
acb32132 2040extern void print_dl_stats(struct seq_file *m, int cpu);
f6a34630
MM
2041extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2042extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2043extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
397f2378
SD
2044#ifdef CONFIG_NUMA_BALANCING
2045extern void
2046show_numa_stats(struct task_struct *p, struct seq_file *m);
2047extern void
2048print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2049 unsigned long tpf, unsigned long gsf, unsigned long gpf);
2050#endif /* CONFIG_NUMA_BALANCING */
2051#endif /* CONFIG_SCHED_DEBUG */
029632fb
PZ
2052
2053extern void init_cfs_rq(struct cfs_rq *cfs_rq);
07c54f7a
AV
2054extern void init_rt_rq(struct rt_rq *rt_rq);
2055extern void init_dl_rq(struct dl_rq *dl_rq);
029632fb 2056
1ee14e6c
BS
2057extern void cfs_bandwidth_usage_inc(void);
2058extern void cfs_bandwidth_usage_dec(void);
1c792db7 2059
3451d024 2060#ifdef CONFIG_NO_HZ_COMMON
00357f5e
PZ
2061#define NOHZ_BALANCE_KICK_BIT 0
2062#define NOHZ_STATS_KICK_BIT 1
a22e47a4 2063
a22e47a4 2064#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
b7031a02
PZ
2065#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
2066
2067#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
1c792db7
SS
2068
2069#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
20a5c8cc 2070
00357f5e 2071extern void nohz_balance_exit_idle(struct rq *rq);
20a5c8cc 2072#else
00357f5e 2073static inline void nohz_balance_exit_idle(struct rq *rq) { }
1c792db7 2074#endif
73fbec60 2075
daec5798
LA
2076
2077#ifdef CONFIG_SMP
2078static inline
2079void __dl_update(struct dl_bw *dl_b, s64 bw)
2080{
2081 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
2082 int i;
2083
2084 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2085 "sched RCU must be held");
2086 for_each_cpu_and(i, rd->span, cpu_active_mask) {
2087 struct rq *rq = cpu_rq(i);
2088
2089 rq->dl.extra_bw += bw;
2090 }
2091}
2092#else
2093static inline
2094void __dl_update(struct dl_bw *dl_b, s64 bw)
2095{
2096 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
2097
2098 dl->extra_bw += bw;
2099}
2100#endif
2101
2102
73fbec60 2103#ifdef CONFIG_IRQ_TIME_ACCOUNTING
19d23dbf 2104struct irqtime {
25e2d8c1 2105 u64 total;
a499a5a1 2106 u64 tick_delta;
19d23dbf
FW
2107 u64 irq_start_time;
2108 struct u64_stats_sync sync;
2109};
73fbec60 2110
19d23dbf 2111DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
73fbec60 2112
25e2d8c1
FW
2113/*
2114 * Returns the irqtime minus the softirq time computed by ksoftirqd.
2115 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
2116 * and never move forward.
2117 */
73fbec60
FW
2118static inline u64 irq_time_read(int cpu)
2119{
19d23dbf
FW
2120 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2121 unsigned int seq;
2122 u64 total;
73fbec60
FW
2123
2124 do {
19d23dbf 2125 seq = __u64_stats_fetch_begin(&irqtime->sync);
25e2d8c1 2126 total = irqtime->total;
19d23dbf 2127 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
73fbec60 2128
19d23dbf 2129 return total;
73fbec60 2130}
73fbec60 2131#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
adaf9fcd
RW
2132
2133#ifdef CONFIG_CPU_FREQ
2134DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
2135
2136/**
2137 * cpufreq_update_util - Take a note about CPU utilization changes.
12bde33d 2138 * @rq: Runqueue to carry out the update for.
58919e83 2139 * @flags: Update reason flags.
adaf9fcd 2140 *
58919e83
RW
2141 * This function is called by the scheduler on the CPU whose utilization is
2142 * being updated.
adaf9fcd
RW
2143 *
2144 * It can only be called from RCU-sched read-side critical sections.
adaf9fcd
RW
2145 *
2146 * The way cpufreq is currently arranged requires it to evaluate the CPU
2147 * performance state (frequency/voltage) on a regular basis to prevent it from
2148 * being stuck in a completely inadequate performance level for too long.
e0367b12
JL
2149 * That is not guaranteed to happen if the updates are only triggered from CFS
2150 * and DL, though, because they may not be coming in if only RT tasks are
2151 * active all the time (or there are RT tasks only).
adaf9fcd 2152 *
e0367b12
JL
2153 * As a workaround for that issue, this function is called periodically by the
2154 * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
adaf9fcd 2155 * but that really is a band-aid. Going forward it should be replaced with
e0367b12 2156 * solutions targeted more specifically at RT tasks.
adaf9fcd 2157 */
12bde33d 2158static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
adaf9fcd 2159{
58919e83
RW
2160 struct update_util_data *data;
2161
674e7541
VK
2162 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2163 cpu_of(rq)));
58919e83 2164 if (data)
12bde33d
RW
2165 data->func(data, rq_clock(rq), flags);
2166}
adaf9fcd 2167#else
12bde33d 2168static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
adaf9fcd 2169#endif /* CONFIG_CPU_FREQ */
be53f58f 2170
9bdcb44e 2171#ifdef arch_scale_freq_capacity
97fb7a0a
IM
2172# ifndef arch_scale_freq_invariant
2173# define arch_scale_freq_invariant() true
2174# endif
2175#else
2176# define arch_scale_freq_invariant() false
9bdcb44e 2177#endif
d4edd662 2178
794a56eb 2179#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
d4edd662
JL
2180static inline unsigned long cpu_util_dl(struct rq *rq)
2181{
2182 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2183}
2184
2185static inline unsigned long cpu_util_cfs(struct rq *rq)
2186{
a07630b8
PB
2187 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2188
2189 if (sched_feat(UTIL_EST)) {
2190 util = max_t(unsigned long, util,
2191 READ_ONCE(rq->cfs.avg.util_est.enqueued));
2192 }
2193
2194 return util;
d4edd662 2195}
794a56eb 2196#endif