sched/numa: Show numa_group ID in /proc/sched_debug task listings
[linux-2.6-block.git] / kernel / sched / sched.h
CommitLineData
029632fb
PZ
1
2#include <linux/sched.h>
cf4aebc2 3#include <linux/sched/sysctl.h>
8bd75c77 4#include <linux/sched/rt.h>
aab03e05 5#include <linux/sched/deadline.h>
029632fb
PZ
6#include <linux/mutex.h>
7#include <linux/spinlock.h>
8#include <linux/stop_machine.h>
b6366f04 9#include <linux/irq_work.h>
9f3660c2 10#include <linux/tick.h>
f809ca9a 11#include <linux/slab.h>
029632fb 12
391e43da 13#include "cpupri.h"
6bfd6d72 14#include "cpudeadline.h"
60fed789 15#include "cpuacct.h"
029632fb 16
45ceebf7 17struct rq;
442bf3aa 18struct cpuidle_state;
45ceebf7 19
da0c1e65
KT
20/* task_struct::on_rq states: */
21#define TASK_ON_RQ_QUEUED 1
cca26e80 22#define TASK_ON_RQ_MIGRATING 2
da0c1e65 23
029632fb
PZ
24extern __read_mostly int scheduler_running;
25
45ceebf7
PG
26extern unsigned long calc_load_update;
27extern atomic_long_t calc_load_tasks;
28
3289bdb4 29extern void calc_global_load_tick(struct rq *this_rq);
45ceebf7 30extern long calc_load_fold_active(struct rq *this_rq);
3289bdb4
PZ
31
32#ifdef CONFIG_SMP
45ceebf7 33extern void update_cpu_load_active(struct rq *this_rq);
3289bdb4
PZ
34#else
35static inline void update_cpu_load_active(struct rq *this_rq) { }
36#endif
45ceebf7 37
029632fb
PZ
38/*
39 * Helpers for converting nanosecond timing to jiffy resolution
40 */
41#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
42
cc1f4b1f
LZ
43/*
44 * Increase resolution of nice-level calculations for 64-bit architectures.
45 * The extra resolution improves shares distribution and load balancing of
46 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
47 * hierarchies, especially on larger systems. This is not a user-visible change
48 * and does not change the user-interface for setting shares/weights.
49 *
50 * We increase resolution only if we have enough bits to allow this increased
51 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
52 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
53 * increased costs.
54 */
55#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
56# define SCHED_LOAD_RESOLUTION 10
57# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
58# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
59#else
60# define SCHED_LOAD_RESOLUTION 0
61# define scale_load(w) (w)
62# define scale_load_down(w) (w)
63#endif
64
65#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
66#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
67
029632fb
PZ
68#define NICE_0_LOAD SCHED_LOAD_SCALE
69#define NICE_0_SHIFT SCHED_LOAD_SHIFT
70
332ac17e
DF
71/*
72 * Single value that decides SCHED_DEADLINE internal math precision.
73 * 10 -> just above 1us
74 * 9 -> just above 0.5us
75 */
76#define DL_SCALE (10)
77
029632fb
PZ
78/*
79 * These are the 'tuning knobs' of the scheduler:
029632fb 80 */
029632fb
PZ
81
82/*
83 * single value that denotes runtime == period, ie unlimited time.
84 */
85#define RUNTIME_INF ((u64)~0ULL)
86
d50dde5a
DF
87static inline int fair_policy(int policy)
88{
89 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
90}
91
029632fb
PZ
92static inline int rt_policy(int policy)
93{
d50dde5a 94 return policy == SCHED_FIFO || policy == SCHED_RR;
029632fb
PZ
95}
96
aab03e05
DF
97static inline int dl_policy(int policy)
98{
99 return policy == SCHED_DEADLINE;
100}
101
029632fb
PZ
102static inline int task_has_rt_policy(struct task_struct *p)
103{
104 return rt_policy(p->policy);
105}
106
aab03e05
DF
107static inline int task_has_dl_policy(struct task_struct *p)
108{
109 return dl_policy(p->policy);
110}
111
332ac17e 112static inline bool dl_time_before(u64 a, u64 b)
2d3d891d
DF
113{
114 return (s64)(a - b) < 0;
115}
116
117/*
118 * Tells if entity @a should preempt entity @b.
119 */
332ac17e
DF
120static inline bool
121dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
2d3d891d
DF
122{
123 return dl_time_before(a->deadline, b->deadline);
124}
125
029632fb
PZ
126/*
127 * This is the priority-queue data structure of the RT scheduling class:
128 */
129struct rt_prio_array {
130 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
131 struct list_head queue[MAX_RT_PRIO];
132};
133
134struct rt_bandwidth {
135 /* nests inside the rq lock: */
136 raw_spinlock_t rt_runtime_lock;
137 ktime_t rt_period;
138 u64 rt_runtime;
139 struct hrtimer rt_period_timer;
4cfafd30 140 unsigned int rt_period_active;
029632fb 141};
a5e7be3b
JL
142
143void __dl_clear_params(struct task_struct *p);
144
332ac17e
DF
145/*
146 * To keep the bandwidth of -deadline tasks and groups under control
147 * we need some place where:
148 * - store the maximum -deadline bandwidth of the system (the group);
149 * - cache the fraction of that bandwidth that is currently allocated.
150 *
151 * This is all done in the data structure below. It is similar to the
152 * one used for RT-throttling (rt_bandwidth), with the main difference
153 * that, since here we are only interested in admission control, we
154 * do not decrease any runtime while the group "executes", neither we
155 * need a timer to replenish it.
156 *
157 * With respect to SMP, the bandwidth is given on a per-CPU basis,
158 * meaning that:
159 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
160 * - dl_total_bw array contains, in the i-eth element, the currently
161 * allocated bandwidth on the i-eth CPU.
162 * Moreover, groups consume bandwidth on each CPU, while tasks only
163 * consume bandwidth on the CPU they're running on.
164 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
165 * that will be shown the next time the proc or cgroup controls will
166 * be red. It on its turn can be changed by writing on its own
167 * control.
168 */
169struct dl_bandwidth {
170 raw_spinlock_t dl_runtime_lock;
171 u64 dl_runtime;
172 u64 dl_period;
173};
174
175static inline int dl_bandwidth_enabled(void)
176{
1724813d 177 return sysctl_sched_rt_runtime >= 0;
332ac17e
DF
178}
179
180extern struct dl_bw *dl_bw_of(int i);
181
182struct dl_bw {
183 raw_spinlock_t lock;
184 u64 bw, total_bw;
185};
186
7f51412a
JL
187static inline
188void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
189{
190 dl_b->total_bw -= tsk_bw;
191}
192
193static inline
194void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
195{
196 dl_b->total_bw += tsk_bw;
197}
198
199static inline
200bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
201{
202 return dl_b->bw != -1 &&
203 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
204}
205
029632fb
PZ
206extern struct mutex sched_domains_mutex;
207
208#ifdef CONFIG_CGROUP_SCHED
209
210#include <linux/cgroup.h>
211
212struct cfs_rq;
213struct rt_rq;
214
35cf4e50 215extern struct list_head task_groups;
029632fb
PZ
216
217struct cfs_bandwidth {
218#ifdef CONFIG_CFS_BANDWIDTH
219 raw_spinlock_t lock;
220 ktime_t period;
221 u64 quota, runtime;
9c58c79a 222 s64 hierarchical_quota;
029632fb
PZ
223 u64 runtime_expires;
224
4cfafd30 225 int idle, period_active;
029632fb
PZ
226 struct hrtimer period_timer, slack_timer;
227 struct list_head throttled_cfs_rq;
228
229 /* statistics */
230 int nr_periods, nr_throttled;
231 u64 throttled_time;
232#endif
233};
234
235/* task group related information */
236struct task_group {
237 struct cgroup_subsys_state css;
238
239#ifdef CONFIG_FAIR_GROUP_SCHED
240 /* schedulable entities of this group on each cpu */
241 struct sched_entity **se;
242 /* runqueue "owned" by this group on each cpu */
243 struct cfs_rq **cfs_rq;
244 unsigned long shares;
245
fa6bddeb 246#ifdef CONFIG_SMP
bf5b986e 247 atomic_long_t load_avg;
bb17f655 248 atomic_t runnable_avg;
029632fb 249#endif
fa6bddeb 250#endif
029632fb
PZ
251
252#ifdef CONFIG_RT_GROUP_SCHED
253 struct sched_rt_entity **rt_se;
254 struct rt_rq **rt_rq;
255
256 struct rt_bandwidth rt_bandwidth;
257#endif
258
259 struct rcu_head rcu;
260 struct list_head list;
261
262 struct task_group *parent;
263 struct list_head siblings;
264 struct list_head children;
265
266#ifdef CONFIG_SCHED_AUTOGROUP
267 struct autogroup *autogroup;
268#endif
269
270 struct cfs_bandwidth cfs_bandwidth;
271};
272
273#ifdef CONFIG_FAIR_GROUP_SCHED
274#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
275
276/*
277 * A weight of 0 or 1 can cause arithmetics problems.
278 * A weight of a cfs_rq is the sum of weights of which entities
279 * are queued on this cfs_rq, so a weight of a entity should not be
280 * too large, so as the shares value of a task group.
281 * (The default weight is 1024 - so there's no practical
282 * limitation from this.)
283 */
284#define MIN_SHARES (1UL << 1)
285#define MAX_SHARES (1UL << 18)
286#endif
287
029632fb
PZ
288typedef int (*tg_visitor)(struct task_group *, void *);
289
290extern int walk_tg_tree_from(struct task_group *from,
291 tg_visitor down, tg_visitor up, void *data);
292
293/*
294 * Iterate the full tree, calling @down when first entering a node and @up when
295 * leaving it for the final time.
296 *
297 * Caller must hold rcu_lock or sufficient equivalent.
298 */
299static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
300{
301 return walk_tg_tree_from(&root_task_group, down, up, data);
302}
303
304extern int tg_nop(struct task_group *tg, void *data);
305
306extern void free_fair_sched_group(struct task_group *tg);
307extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
308extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
309extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
310 struct sched_entity *se, int cpu,
311 struct sched_entity *parent);
312extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
313extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
314
315extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
77a4d1a1 316extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
029632fb
PZ
317extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
318
319extern void free_rt_sched_group(struct task_group *tg);
320extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
321extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
322 struct sched_rt_entity *rt_se, int cpu,
323 struct sched_rt_entity *parent);
324
25cc7da7
LZ
325extern struct task_group *sched_create_group(struct task_group *parent);
326extern void sched_online_group(struct task_group *tg,
327 struct task_group *parent);
328extern void sched_destroy_group(struct task_group *tg);
329extern void sched_offline_group(struct task_group *tg);
330
331extern void sched_move_task(struct task_struct *tsk);
332
333#ifdef CONFIG_FAIR_GROUP_SCHED
334extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
335#endif
336
029632fb
PZ
337#else /* CONFIG_CGROUP_SCHED */
338
339struct cfs_bandwidth { };
340
341#endif /* CONFIG_CGROUP_SCHED */
342
343/* CFS-related fields in a runqueue */
344struct cfs_rq {
345 struct load_weight load;
c82513e5 346 unsigned int nr_running, h_nr_running;
029632fb
PZ
347
348 u64 exec_clock;
349 u64 min_vruntime;
350#ifndef CONFIG_64BIT
351 u64 min_vruntime_copy;
352#endif
353
354 struct rb_root tasks_timeline;
355 struct rb_node *rb_leftmost;
356
029632fb
PZ
357 /*
358 * 'curr' points to currently running entity on this cfs_rq.
359 * It is set to NULL otherwise (i.e when none are currently running).
360 */
361 struct sched_entity *curr, *next, *last, *skip;
362
363#ifdef CONFIG_SCHED_DEBUG
364 unsigned int nr_spread_over;
365#endif
366
2dac754e
PT
367#ifdef CONFIG_SMP
368 /*
369 * CFS Load tracking
370 * Under CFS, load is tracked on a per-entity basis and aggregated up.
371 * This allows for the description of both thread and group usage (in
372 * the FAIR_GROUP_SCHED case).
36ee28e4
VG
373 * runnable_load_avg is the sum of the load_avg_contrib of the
374 * sched_entities on the rq.
375 * blocked_load_avg is similar to runnable_load_avg except that its
376 * the blocked sched_entities on the rq.
377 * utilization_load_avg is the sum of the average running time of the
378 * sched_entities on the rq.
2dac754e 379 */
36ee28e4 380 unsigned long runnable_load_avg, blocked_load_avg, utilization_load_avg;
2509940f 381 atomic64_t decay_counter;
9ee474f5 382 u64 last_decay;
2509940f 383 atomic_long_t removed_load;
141965c7 384
c566e8e9 385#ifdef CONFIG_FAIR_GROUP_SCHED
141965c7 386 /* Required to track per-cpu representation of a task_group */
bb17f655 387 u32 tg_runnable_contrib;
bf5b986e 388 unsigned long tg_load_contrib;
82958366
PT
389
390 /*
391 * h_load = weight * f(tg)
392 *
393 * Where f(tg) is the recursive weight fraction assigned to
394 * this group.
395 */
396 unsigned long h_load;
68520796
VD
397 u64 last_h_load_update;
398 struct sched_entity *h_load_next;
399#endif /* CONFIG_FAIR_GROUP_SCHED */
82958366
PT
400#endif /* CONFIG_SMP */
401
029632fb
PZ
402#ifdef CONFIG_FAIR_GROUP_SCHED
403 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
404
405 /*
406 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
407 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
408 * (like users, containers etc.)
409 *
410 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
411 * list is used during load balance.
412 */
413 int on_list;
414 struct list_head leaf_cfs_rq_list;
415 struct task_group *tg; /* group that "owns" this runqueue */
416
029632fb
PZ
417#ifdef CONFIG_CFS_BANDWIDTH
418 int runtime_enabled;
419 u64 runtime_expires;
420 s64 runtime_remaining;
421
f1b17280
PT
422 u64 throttled_clock, throttled_clock_task;
423 u64 throttled_clock_task_time;
029632fb
PZ
424 int throttled, throttle_count;
425 struct list_head throttled_list;
426#endif /* CONFIG_CFS_BANDWIDTH */
427#endif /* CONFIG_FAIR_GROUP_SCHED */
428};
429
430static inline int rt_bandwidth_enabled(void)
431{
432 return sysctl_sched_rt_runtime >= 0;
433}
434
b6366f04
SR
435/* RT IPI pull logic requires IRQ_WORK */
436#ifdef CONFIG_IRQ_WORK
437# define HAVE_RT_PUSH_IPI
438#endif
439
029632fb
PZ
440/* Real-Time classes' related field in a runqueue: */
441struct rt_rq {
442 struct rt_prio_array active;
c82513e5 443 unsigned int rt_nr_running;
029632fb
PZ
444#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
445 struct {
446 int curr; /* highest queued rt task prio */
447#ifdef CONFIG_SMP
448 int next; /* next highest */
449#endif
450 } highest_prio;
451#endif
452#ifdef CONFIG_SMP
453 unsigned long rt_nr_migratory;
454 unsigned long rt_nr_total;
455 int overloaded;
456 struct plist_head pushable_tasks;
b6366f04
SR
457#ifdef HAVE_RT_PUSH_IPI
458 int push_flags;
459 int push_cpu;
460 struct irq_work push_work;
461 raw_spinlock_t push_lock;
029632fb 462#endif
b6366f04 463#endif /* CONFIG_SMP */
f4ebcbc0
KT
464 int rt_queued;
465
029632fb
PZ
466 int rt_throttled;
467 u64 rt_time;
468 u64 rt_runtime;
469 /* Nests inside the rq lock: */
470 raw_spinlock_t rt_runtime_lock;
471
472#ifdef CONFIG_RT_GROUP_SCHED
473 unsigned long rt_nr_boosted;
474
475 struct rq *rq;
029632fb
PZ
476 struct task_group *tg;
477#endif
478};
479
aab03e05
DF
480/* Deadline class' related fields in a runqueue */
481struct dl_rq {
482 /* runqueue is an rbtree, ordered by deadline */
483 struct rb_root rb_root;
484 struct rb_node *rb_leftmost;
485
486 unsigned long dl_nr_running;
1baca4ce
JL
487
488#ifdef CONFIG_SMP
489 /*
490 * Deadline values of the currently executing and the
491 * earliest ready task on this rq. Caching these facilitates
492 * the decision wether or not a ready but not running task
493 * should migrate somewhere else.
494 */
495 struct {
496 u64 curr;
497 u64 next;
498 } earliest_dl;
499
500 unsigned long dl_nr_migratory;
1baca4ce
JL
501 int overloaded;
502
503 /*
504 * Tasks on this rq that can be pushed away. They are kept in
505 * an rb-tree, ordered by tasks' deadlines, with caching
506 * of the leftmost (earliest deadline) element.
507 */
508 struct rb_root pushable_dl_tasks_root;
509 struct rb_node *pushable_dl_tasks_leftmost;
332ac17e
DF
510#else
511 struct dl_bw dl_bw;
1baca4ce 512#endif
aab03e05
DF
513};
514
029632fb
PZ
515#ifdef CONFIG_SMP
516
517/*
518 * We add the notion of a root-domain which will be used to define per-domain
519 * variables. Each exclusive cpuset essentially defines an island domain by
520 * fully partitioning the member cpus from any other cpuset. Whenever a new
521 * exclusive cpuset is created, we also create and attach a new root-domain
522 * object.
523 *
524 */
525struct root_domain {
526 atomic_t refcount;
527 atomic_t rto_count;
528 struct rcu_head rcu;
529 cpumask_var_t span;
530 cpumask_var_t online;
531
4486edd1
TC
532 /* Indicate more than one runnable task for any CPU */
533 bool overload;
534
1baca4ce
JL
535 /*
536 * The bit corresponding to a CPU gets set here if such CPU has more
537 * than one runnable -deadline task (as it is below for RT tasks).
538 */
539 cpumask_var_t dlo_mask;
540 atomic_t dlo_count;
332ac17e 541 struct dl_bw dl_bw;
6bfd6d72 542 struct cpudl cpudl;
1baca4ce 543
029632fb
PZ
544 /*
545 * The "RT overload" flag: it gets set if a CPU has more than
546 * one runnable RT task.
547 */
548 cpumask_var_t rto_mask;
549 struct cpupri cpupri;
550};
551
552extern struct root_domain def_root_domain;
553
554#endif /* CONFIG_SMP */
555
556/*
557 * This is the main, per-CPU runqueue data structure.
558 *
559 * Locking rule: those places that want to lock multiple runqueues
560 * (such as the load balancing or the thread migration code), lock
561 * acquire operations must be ordered by ascending &runqueue.
562 */
563struct rq {
564 /* runqueue lock: */
565 raw_spinlock_t lock;
566
567 /*
568 * nr_running and cpu_load should be in the same cacheline because
569 * remote CPUs use both these fields when doing load calculation.
570 */
c82513e5 571 unsigned int nr_running;
0ec8aa00
PZ
572#ifdef CONFIG_NUMA_BALANCING
573 unsigned int nr_numa_running;
574 unsigned int nr_preferred_running;
575#endif
029632fb
PZ
576 #define CPU_LOAD_IDX_MAX 5
577 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
578 unsigned long last_load_update_tick;
3451d024 579#ifdef CONFIG_NO_HZ_COMMON
029632fb 580 u64 nohz_stamp;
1c792db7 581 unsigned long nohz_flags;
265f22a9
FW
582#endif
583#ifdef CONFIG_NO_HZ_FULL
584 unsigned long last_sched_tick;
029632fb 585#endif
029632fb
PZ
586 /* capture load from *all* tasks on this cpu: */
587 struct load_weight load;
588 unsigned long nr_load_updates;
589 u64 nr_switches;
590
591 struct cfs_rq cfs;
592 struct rt_rq rt;
aab03e05 593 struct dl_rq dl;
029632fb
PZ
594
595#ifdef CONFIG_FAIR_GROUP_SCHED
596 /* list of leaf cfs_rq on this cpu: */
597 struct list_head leaf_cfs_rq_list;
f5f9739d
DE
598
599 struct sched_avg avg;
a35b6466
PZ
600#endif /* CONFIG_FAIR_GROUP_SCHED */
601
029632fb
PZ
602 /*
603 * This is part of a global counter where only the total sum
604 * over all CPUs matters. A task can increase this counter on
605 * one CPU and if it got migrated afterwards it may decrease
606 * it on another CPU. Always updated under the runqueue lock:
607 */
608 unsigned long nr_uninterruptible;
609
610 struct task_struct *curr, *idle, *stop;
611 unsigned long next_balance;
612 struct mm_struct *prev_mm;
613
9edfbfed 614 unsigned int clock_skip_update;
029632fb
PZ
615 u64 clock;
616 u64 clock_task;
617
618 atomic_t nr_iowait;
619
620#ifdef CONFIG_SMP
621 struct root_domain *rd;
622 struct sched_domain *sd;
623
ced549fa 624 unsigned long cpu_capacity;
ca6d75e6 625 unsigned long cpu_capacity_orig;
029632fb
PZ
626
627 unsigned char idle_balance;
628 /* For active balancing */
629 int post_schedule;
630 int active_balance;
631 int push_cpu;
632 struct cpu_stop_work active_balance_work;
633 /* cpu of this runqueue: */
634 int cpu;
635 int online;
636
367456c7
PZ
637 struct list_head cfs_tasks;
638
029632fb
PZ
639 u64 rt_avg;
640 u64 age_stamp;
641 u64 idle_stamp;
642 u64 avg_idle;
9bd721c5
JL
643
644 /* This is used to determine avg_idle's max value */
645 u64 max_idle_balance_cost;
029632fb
PZ
646#endif
647
648#ifdef CONFIG_IRQ_TIME_ACCOUNTING
649 u64 prev_irq_time;
650#endif
651#ifdef CONFIG_PARAVIRT
652 u64 prev_steal_time;
653#endif
654#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
655 u64 prev_steal_time_rq;
656#endif
657
658 /* calc_load related fields */
659 unsigned long calc_load_update;
660 long calc_load_active;
661
662#ifdef CONFIG_SCHED_HRTICK
663#ifdef CONFIG_SMP
664 int hrtick_csd_pending;
665 struct call_single_data hrtick_csd;
666#endif
667 struct hrtimer hrtick_timer;
668#endif
669
670#ifdef CONFIG_SCHEDSTATS
671 /* latency stats */
672 struct sched_info rq_sched_info;
673 unsigned long long rq_cpu_time;
674 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
675
676 /* sys_sched_yield() stats */
677 unsigned int yld_count;
678
679 /* schedule() stats */
029632fb
PZ
680 unsigned int sched_count;
681 unsigned int sched_goidle;
682
683 /* try_to_wake_up() stats */
684 unsigned int ttwu_count;
685 unsigned int ttwu_local;
686#endif
687
688#ifdef CONFIG_SMP
689 struct llist_head wake_list;
690#endif
442bf3aa
DL
691
692#ifdef CONFIG_CPU_IDLE
693 /* Must be inspected within a rcu lock section */
694 struct cpuidle_state *idle_state;
695#endif
029632fb
PZ
696};
697
698static inline int cpu_of(struct rq *rq)
699{
700#ifdef CONFIG_SMP
701 return rq->cpu;
702#else
703 return 0;
704#endif
705}
706
8b06c55b 707DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
029632fb 708
518cd623 709#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
4a32fea9 710#define this_rq() this_cpu_ptr(&runqueues)
518cd623
PZ
711#define task_rq(p) cpu_rq(task_cpu(p))
712#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
4a32fea9 713#define raw_rq() raw_cpu_ptr(&runqueues)
518cd623 714
cebde6d6
PZ
715static inline u64 __rq_clock_broken(struct rq *rq)
716{
316c1608 717 return READ_ONCE(rq->clock);
cebde6d6
PZ
718}
719
78becc27
FW
720static inline u64 rq_clock(struct rq *rq)
721{
cebde6d6 722 lockdep_assert_held(&rq->lock);
78becc27
FW
723 return rq->clock;
724}
725
726static inline u64 rq_clock_task(struct rq *rq)
727{
cebde6d6 728 lockdep_assert_held(&rq->lock);
78becc27
FW
729 return rq->clock_task;
730}
731
9edfbfed
PZ
732#define RQCF_REQ_SKIP 0x01
733#define RQCF_ACT_SKIP 0x02
734
735static inline void rq_clock_skip_update(struct rq *rq, bool skip)
736{
737 lockdep_assert_held(&rq->lock);
738 if (skip)
739 rq->clock_skip_update |= RQCF_REQ_SKIP;
740 else
741 rq->clock_skip_update &= ~RQCF_REQ_SKIP;
742}
743
9942f79b 744#ifdef CONFIG_NUMA
e3fe70b1
RR
745enum numa_topology_type {
746 NUMA_DIRECT,
747 NUMA_GLUELESS_MESH,
748 NUMA_BACKPLANE,
749};
750extern enum numa_topology_type sched_numa_topology_type;
9942f79b
RR
751extern int sched_max_numa_distance;
752extern bool find_numa_distance(int distance);
753#endif
754
f809ca9a 755#ifdef CONFIG_NUMA_BALANCING
44dba3d5
IM
756/* The regions in numa_faults array from task_struct */
757enum numa_faults_stats {
758 NUMA_MEM = 0,
759 NUMA_CPU,
760 NUMA_MEMBUF,
761 NUMA_CPUBUF
762};
0ec8aa00 763extern void sched_setnuma(struct task_struct *p, int node);
e6628d5b 764extern int migrate_task_to(struct task_struct *p, int cpu);
ac66f547 765extern int migrate_swap(struct task_struct *, struct task_struct *);
f809ca9a
MG
766#endif /* CONFIG_NUMA_BALANCING */
767
518cd623
PZ
768#ifdef CONFIG_SMP
769
e3baac47
PZ
770extern void sched_ttwu_pending(void);
771
029632fb
PZ
772#define rcu_dereference_check_sched_domain(p) \
773 rcu_dereference_check((p), \
774 lockdep_is_held(&sched_domains_mutex))
775
776/*
777 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
778 * See detach_destroy_domains: synchronize_sched for details.
779 *
780 * The domain tree of any CPU may only be accessed from within
781 * preempt-disabled sections.
782 */
783#define for_each_domain(cpu, __sd) \
518cd623
PZ
784 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
785 __sd; __sd = __sd->parent)
029632fb 786
77e81365
SS
787#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
788
518cd623
PZ
789/**
790 * highest_flag_domain - Return highest sched_domain containing flag.
791 * @cpu: The cpu whose highest level of sched domain is to
792 * be returned.
793 * @flag: The flag to check for the highest sched_domain
794 * for the given cpu.
795 *
796 * Returns the highest sched_domain of a cpu which contains the given flag.
797 */
798static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
799{
800 struct sched_domain *sd, *hsd = NULL;
801
802 for_each_domain(cpu, sd) {
803 if (!(sd->flags & flag))
804 break;
805 hsd = sd;
806 }
807
808 return hsd;
809}
810
fb13c7ee
MG
811static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
812{
813 struct sched_domain *sd;
814
815 for_each_domain(cpu, sd) {
816 if (sd->flags & flag)
817 break;
818 }
819
820 return sd;
821}
822
518cd623 823DECLARE_PER_CPU(struct sched_domain *, sd_llc);
7d9ffa89 824DECLARE_PER_CPU(int, sd_llc_size);
518cd623 825DECLARE_PER_CPU(int, sd_llc_id);
fb13c7ee 826DECLARE_PER_CPU(struct sched_domain *, sd_numa);
37dc6b50
PM
827DECLARE_PER_CPU(struct sched_domain *, sd_busy);
828DECLARE_PER_CPU(struct sched_domain *, sd_asym);
518cd623 829
63b2ca30 830struct sched_group_capacity {
5e6521ea
LZ
831 atomic_t ref;
832 /*
63b2ca30
NP
833 * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
834 * for a single CPU.
5e6521ea 835 */
dc7ff76e 836 unsigned int capacity;
5e6521ea 837 unsigned long next_update;
63b2ca30 838 int imbalance; /* XXX unrelated to capacity but shared group state */
5e6521ea
LZ
839 /*
840 * Number of busy cpus in this group.
841 */
842 atomic_t nr_busy_cpus;
843
844 unsigned long cpumask[0]; /* iteration mask */
845};
846
847struct sched_group {
848 struct sched_group *next; /* Must be a circular list */
849 atomic_t ref;
850
851 unsigned int group_weight;
63b2ca30 852 struct sched_group_capacity *sgc;
5e6521ea
LZ
853
854 /*
855 * The CPUs this group covers.
856 *
857 * NOTE: this field is variable length. (Allocated dynamically
858 * by attaching extra space to the end of the structure,
859 * depending on how many CPUs the kernel has booted up with)
860 */
861 unsigned long cpumask[0];
862};
863
864static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
865{
866 return to_cpumask(sg->cpumask);
867}
868
869/*
870 * cpumask masking which cpus in the group are allowed to iterate up the domain
871 * tree.
872 */
873static inline struct cpumask *sched_group_mask(struct sched_group *sg)
874{
63b2ca30 875 return to_cpumask(sg->sgc->cpumask);
5e6521ea
LZ
876}
877
878/**
879 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
880 * @group: The group whose first cpu is to be returned.
881 */
882static inline unsigned int group_first_cpu(struct sched_group *group)
883{
884 return cpumask_first(sched_group_cpus(group));
885}
886
c1174876
PZ
887extern int group_balance_cpu(struct sched_group *sg);
888
e3baac47
PZ
889#else
890
891static inline void sched_ttwu_pending(void) { }
892
518cd623 893#endif /* CONFIG_SMP */
029632fb 894
391e43da
PZ
895#include "stats.h"
896#include "auto_group.h"
029632fb
PZ
897
898#ifdef CONFIG_CGROUP_SCHED
899
900/*
901 * Return the group to which this tasks belongs.
902 *
8af01f56
TH
903 * We cannot use task_css() and friends because the cgroup subsystem
904 * changes that value before the cgroup_subsys::attach() method is called,
905 * therefore we cannot pin it and might observe the wrong value.
8323f26c
PZ
906 *
907 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
908 * core changes this before calling sched_move_task().
909 *
910 * Instead we use a 'copy' which is updated from sched_move_task() while
911 * holding both task_struct::pi_lock and rq::lock.
029632fb
PZ
912 */
913static inline struct task_group *task_group(struct task_struct *p)
914{
8323f26c 915 return p->sched_task_group;
029632fb
PZ
916}
917
918/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
919static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
920{
921#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
922 struct task_group *tg = task_group(p);
923#endif
924
925#ifdef CONFIG_FAIR_GROUP_SCHED
926 p->se.cfs_rq = tg->cfs_rq[cpu];
927 p->se.parent = tg->se[cpu];
928#endif
929
930#ifdef CONFIG_RT_GROUP_SCHED
931 p->rt.rt_rq = tg->rt_rq[cpu];
932 p->rt.parent = tg->rt_se[cpu];
933#endif
934}
935
936#else /* CONFIG_CGROUP_SCHED */
937
938static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
939static inline struct task_group *task_group(struct task_struct *p)
940{
941 return NULL;
942}
943
944#endif /* CONFIG_CGROUP_SCHED */
945
946static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
947{
948 set_task_rq(p, cpu);
949#ifdef CONFIG_SMP
950 /*
951 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
952 * successfuly executed on another CPU. We must ensure that updates of
953 * per-task data have been completed by this moment.
954 */
955 smp_wmb();
956 task_thread_info(p)->cpu = cpu;
ac66f547 957 p->wake_cpu = cpu;
029632fb
PZ
958#endif
959}
960
961/*
962 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
963 */
964#ifdef CONFIG_SCHED_DEBUG
c5905afb 965# include <linux/static_key.h>
029632fb
PZ
966# define const_debug __read_mostly
967#else
968# define const_debug const
969#endif
970
971extern const_debug unsigned int sysctl_sched_features;
972
973#define SCHED_FEAT(name, enabled) \
974 __SCHED_FEAT_##name ,
975
976enum {
391e43da 977#include "features.h"
f8b6d1cc 978 __SCHED_FEAT_NR,
029632fb
PZ
979};
980
981#undef SCHED_FEAT
982
f8b6d1cc 983#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
f8b6d1cc 984#define SCHED_FEAT(name, enabled) \
c5905afb 985static __always_inline bool static_branch_##name(struct static_key *key) \
f8b6d1cc 986{ \
6e76ea8a 987 return static_key_##enabled(key); \
f8b6d1cc
PZ
988}
989
990#include "features.h"
991
992#undef SCHED_FEAT
993
c5905afb 994extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
f8b6d1cc
PZ
995#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
996#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
029632fb 997#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
f8b6d1cc 998#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
029632fb 999
cbee9f88
PZ
1000#ifdef CONFIG_NUMA_BALANCING
1001#define sched_feat_numa(x) sched_feat(x)
3105b86a
MG
1002#ifdef CONFIG_SCHED_DEBUG
1003#define numabalancing_enabled sched_feat_numa(NUMA)
1004#else
1005extern bool numabalancing_enabled;
1006#endif /* CONFIG_SCHED_DEBUG */
cbee9f88
PZ
1007#else
1008#define sched_feat_numa(x) (0)
3105b86a
MG
1009#define numabalancing_enabled (0)
1010#endif /* CONFIG_NUMA_BALANCING */
cbee9f88 1011
029632fb
PZ
1012static inline u64 global_rt_period(void)
1013{
1014 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1015}
1016
1017static inline u64 global_rt_runtime(void)
1018{
1019 if (sysctl_sched_rt_runtime < 0)
1020 return RUNTIME_INF;
1021
1022 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1023}
1024
029632fb
PZ
1025static inline int task_current(struct rq *rq, struct task_struct *p)
1026{
1027 return rq->curr == p;
1028}
1029
1030static inline int task_running(struct rq *rq, struct task_struct *p)
1031{
1032#ifdef CONFIG_SMP
1033 return p->on_cpu;
1034#else
1035 return task_current(rq, p);
1036#endif
1037}
1038
da0c1e65
KT
1039static inline int task_on_rq_queued(struct task_struct *p)
1040{
1041 return p->on_rq == TASK_ON_RQ_QUEUED;
1042}
029632fb 1043
cca26e80
KT
1044static inline int task_on_rq_migrating(struct task_struct *p)
1045{
1046 return p->on_rq == TASK_ON_RQ_MIGRATING;
1047}
1048
029632fb
PZ
1049#ifndef prepare_arch_switch
1050# define prepare_arch_switch(next) do { } while (0)
1051#endif
1052#ifndef finish_arch_switch
1053# define finish_arch_switch(prev) do { } while (0)
1054#endif
01f23e16
CM
1055#ifndef finish_arch_post_lock_switch
1056# define finish_arch_post_lock_switch() do { } while (0)
1057#endif
029632fb 1058
029632fb
PZ
1059static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1060{
1061#ifdef CONFIG_SMP
1062 /*
1063 * We can optimise this out completely for !SMP, because the
1064 * SMP rebalancing from interrupt is the only thing that cares
1065 * here.
1066 */
1067 next->on_cpu = 1;
1068#endif
1069}
1070
1071static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1072{
1073#ifdef CONFIG_SMP
1074 /*
1075 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1076 * We must ensure this doesn't happen until the switch is completely
1077 * finished.
1078 */
1079 smp_wmb();
1080 prev->on_cpu = 0;
1081#endif
1082#ifdef CONFIG_DEBUG_SPINLOCK
1083 /* this is a valid case when another task releases the spinlock */
1084 rq->lock.owner = current;
1085#endif
1086 /*
1087 * If we are tracking spinlock dependencies then we have to
1088 * fix up the runqueue lock - which gets 'carried over' from
1089 * prev into current:
1090 */
1091 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1092
1093 raw_spin_unlock_irq(&rq->lock);
1094}
1095
b13095f0
LZ
1096/*
1097 * wake flags
1098 */
1099#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
1100#define WF_FORK 0x02 /* child wakeup after fork */
1101#define WF_MIGRATED 0x4 /* internal use, task got migrated */
1102
029632fb
PZ
1103/*
1104 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1105 * of tasks with abnormal "nice" values across CPUs the contribution that
1106 * each task makes to its run queue's load is weighted according to its
1107 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1108 * scaled version of the new time slice allocation that they receive on time
1109 * slice expiry etc.
1110 */
1111
1112#define WEIGHT_IDLEPRIO 3
1113#define WMULT_IDLEPRIO 1431655765
1114
1115/*
1116 * Nice levels are multiplicative, with a gentle 10% change for every
1117 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1118 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1119 * that remained on nice 0.
1120 *
1121 * The "10% effect" is relative and cumulative: from _any_ nice level,
1122 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
1123 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1124 * If a task goes up by ~10% and another task goes down by ~10% then
1125 * the relative distance between them is ~25%.)
1126 */
1127static const int prio_to_weight[40] = {
1128 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1129 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1130 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1131 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1132 /* 0 */ 1024, 820, 655, 526, 423,
1133 /* 5 */ 335, 272, 215, 172, 137,
1134 /* 10 */ 110, 87, 70, 56, 45,
1135 /* 15 */ 36, 29, 23, 18, 15,
1136};
1137
1138/*
1139 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1140 *
1141 * In cases where the weight does not change often, we can use the
1142 * precalculated inverse to speed up arithmetics by turning divisions
1143 * into multiplications:
1144 */
1145static const u32 prio_to_wmult[40] = {
1146 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1147 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1148 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1149 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1150 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1151 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1152 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1153 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1154};
1155
c82ba9fa
LZ
1156#define ENQUEUE_WAKEUP 1
1157#define ENQUEUE_HEAD 2
1158#ifdef CONFIG_SMP
1159#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
1160#else
1161#define ENQUEUE_WAKING 0
1162#endif
aab03e05 1163#define ENQUEUE_REPLENISH 8
c82ba9fa
LZ
1164
1165#define DEQUEUE_SLEEP 1
1166
37e117c0
PZ
1167#define RETRY_TASK ((void *)-1UL)
1168
c82ba9fa
LZ
1169struct sched_class {
1170 const struct sched_class *next;
1171
1172 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1173 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1174 void (*yield_task) (struct rq *rq);
1175 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1176
1177 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1178
606dba2e
PZ
1179 /*
1180 * It is the responsibility of the pick_next_task() method that will
1181 * return the next task to call put_prev_task() on the @prev task or
1182 * something equivalent.
37e117c0
PZ
1183 *
1184 * May return RETRY_TASK when it finds a higher prio class has runnable
1185 * tasks.
606dba2e
PZ
1186 */
1187 struct task_struct * (*pick_next_task) (struct rq *rq,
1188 struct task_struct *prev);
c82ba9fa
LZ
1189 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1190
1191#ifdef CONFIG_SMP
ac66f547 1192 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
c82ba9fa
LZ
1193 void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
1194
c82ba9fa
LZ
1195 void (*post_schedule) (struct rq *this_rq);
1196 void (*task_waking) (struct task_struct *task);
1197 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1198
1199 void (*set_cpus_allowed)(struct task_struct *p,
1200 const struct cpumask *newmask);
1201
1202 void (*rq_online)(struct rq *rq);
1203 void (*rq_offline)(struct rq *rq);
1204#endif
1205
1206 void (*set_curr_task) (struct rq *rq);
1207 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1208 void (*task_fork) (struct task_struct *p);
e6c390f2 1209 void (*task_dead) (struct task_struct *p);
c82ba9fa 1210
67dfa1b7
KT
1211 /*
1212 * The switched_from() call is allowed to drop rq->lock, therefore we
1213 * cannot assume the switched_from/switched_to pair is serliazed by
1214 * rq->lock. They are however serialized by p->pi_lock.
1215 */
c82ba9fa
LZ
1216 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1217 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1218 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1219 int oldprio);
1220
1221 unsigned int (*get_rr_interval) (struct rq *rq,
1222 struct task_struct *task);
1223
6e998916
SG
1224 void (*update_curr) (struct rq *rq);
1225
c82ba9fa
LZ
1226#ifdef CONFIG_FAIR_GROUP_SCHED
1227 void (*task_move_group) (struct task_struct *p, int on_rq);
1228#endif
1229};
029632fb 1230
3f1d2a31
PZ
1231static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1232{
1233 prev->sched_class->put_prev_task(rq, prev);
1234}
1235
029632fb
PZ
1236#define sched_class_highest (&stop_sched_class)
1237#define for_each_class(class) \
1238 for (class = sched_class_highest; class; class = class->next)
1239
1240extern const struct sched_class stop_sched_class;
aab03e05 1241extern const struct sched_class dl_sched_class;
029632fb
PZ
1242extern const struct sched_class rt_sched_class;
1243extern const struct sched_class fair_sched_class;
1244extern const struct sched_class idle_sched_class;
1245
1246
1247#ifdef CONFIG_SMP
1248
63b2ca30 1249extern void update_group_capacity(struct sched_domain *sd, int cpu);
b719203b 1250
7caff66f 1251extern void trigger_load_balance(struct rq *rq);
029632fb 1252
642dbc39
VG
1253extern void idle_enter_fair(struct rq *this_rq);
1254extern void idle_exit_fair(struct rq *this_rq);
642dbc39 1255
dc877341
PZ
1256#else
1257
1258static inline void idle_enter_fair(struct rq *rq) { }
1259static inline void idle_exit_fair(struct rq *rq) { }
1260
029632fb
PZ
1261#endif
1262
442bf3aa
DL
1263#ifdef CONFIG_CPU_IDLE
1264static inline void idle_set_state(struct rq *rq,
1265 struct cpuidle_state *idle_state)
1266{
1267 rq->idle_state = idle_state;
1268}
1269
1270static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1271{
1272 WARN_ON(!rcu_read_lock_held());
1273 return rq->idle_state;
1274}
1275#else
1276static inline void idle_set_state(struct rq *rq,
1277 struct cpuidle_state *idle_state)
1278{
1279}
1280
1281static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1282{
1283 return NULL;
1284}
1285#endif
1286
029632fb
PZ
1287extern void sysrq_sched_debug_show(void);
1288extern void sched_init_granularity(void);
1289extern void update_max_interval(void);
1baca4ce
JL
1290
1291extern void init_sched_dl_class(void);
029632fb
PZ
1292extern void init_sched_rt_class(void);
1293extern void init_sched_fair_class(void);
1294
8875125e 1295extern void resched_curr(struct rq *rq);
029632fb
PZ
1296extern void resched_cpu(int cpu);
1297
1298extern struct rt_bandwidth def_rt_bandwidth;
1299extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1300
332ac17e
DF
1301extern struct dl_bandwidth def_dl_bandwidth;
1302extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
aab03e05
DF
1303extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1304
332ac17e
DF
1305unsigned long to_ratio(u64 period, u64 runtime);
1306
a75cdaa9
AS
1307extern void init_task_runnable_average(struct task_struct *p);
1308
72465447 1309static inline void add_nr_running(struct rq *rq, unsigned count)
029632fb 1310{
72465447
KT
1311 unsigned prev_nr = rq->nr_running;
1312
1313 rq->nr_running = prev_nr + count;
9f3660c2 1314
72465447 1315 if (prev_nr < 2 && rq->nr_running >= 2) {
4486edd1
TC
1316#ifdef CONFIG_SMP
1317 if (!rq->rd->overload)
1318 rq->rd->overload = true;
1319#endif
1320
1321#ifdef CONFIG_NO_HZ_FULL
9f3660c2 1322 if (tick_nohz_full_cpu(rq->cpu)) {
3882ec64
FW
1323 /*
1324 * Tick is needed if more than one task runs on a CPU.
1325 * Send the target an IPI to kick it out of nohz mode.
1326 *
1327 * We assume that IPI implies full memory barrier and the
1328 * new value of rq->nr_running is visible on reception
1329 * from the target.
1330 */
fd2ac4f4 1331 tick_nohz_full_kick_cpu(rq->cpu);
9f3660c2 1332 }
9f3660c2 1333#endif
4486edd1 1334 }
029632fb
PZ
1335}
1336
72465447 1337static inline void sub_nr_running(struct rq *rq, unsigned count)
029632fb 1338{
72465447 1339 rq->nr_running -= count;
029632fb
PZ
1340}
1341
265f22a9
FW
1342static inline void rq_last_tick_reset(struct rq *rq)
1343{
1344#ifdef CONFIG_NO_HZ_FULL
1345 rq->last_sched_tick = jiffies;
1346#endif
1347}
1348
029632fb
PZ
1349extern void update_rq_clock(struct rq *rq);
1350
1351extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1352extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1353
1354extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1355
1356extern const_debug unsigned int sysctl_sched_time_avg;
1357extern const_debug unsigned int sysctl_sched_nr_migrate;
1358extern const_debug unsigned int sysctl_sched_migration_cost;
1359
1360static inline u64 sched_avg_period(void)
1361{
1362 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1363}
1364
029632fb
PZ
1365#ifdef CONFIG_SCHED_HRTICK
1366
1367/*
1368 * Use hrtick when:
1369 * - enabled by features
1370 * - hrtimer is actually high res
1371 */
1372static inline int hrtick_enabled(struct rq *rq)
1373{
1374 if (!sched_feat(HRTICK))
1375 return 0;
1376 if (!cpu_active(cpu_of(rq)))
1377 return 0;
1378 return hrtimer_is_hres_active(&rq->hrtick_timer);
1379}
1380
1381void hrtick_start(struct rq *rq, u64 delay);
1382
b39e66ea
MG
1383#else
1384
1385static inline int hrtick_enabled(struct rq *rq)
1386{
1387 return 0;
1388}
1389
029632fb
PZ
1390#endif /* CONFIG_SCHED_HRTICK */
1391
1392#ifdef CONFIG_SMP
1393extern void sched_avg_update(struct rq *rq);
dfbca41f
PZ
1394
1395#ifndef arch_scale_freq_capacity
1396static __always_inline
1397unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1398{
1399 return SCHED_CAPACITY_SCALE;
1400}
1401#endif
b5b4860d 1402
029632fb
PZ
1403static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1404{
b5b4860d 1405 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
029632fb
PZ
1406 sched_avg_update(rq);
1407}
1408#else
1409static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1410static inline void sched_avg_update(struct rq *rq) { }
1411#endif
1412
3960c8c0
PZ
1413/*
1414 * __task_rq_lock - lock the rq @p resides on.
1415 */
1416static inline struct rq *__task_rq_lock(struct task_struct *p)
1417 __acquires(rq->lock)
1418{
1419 struct rq *rq;
1420
1421 lockdep_assert_held(&p->pi_lock);
1422
1423 for (;;) {
1424 rq = task_rq(p);
1425 raw_spin_lock(&rq->lock);
1426 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
1427 return rq;
1428 raw_spin_unlock(&rq->lock);
1429
1430 while (unlikely(task_on_rq_migrating(p)))
1431 cpu_relax();
1432 }
1433}
1434
1435/*
1436 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1437 */
1438static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
1439 __acquires(p->pi_lock)
1440 __acquires(rq->lock)
1441{
1442 struct rq *rq;
1443
1444 for (;;) {
1445 raw_spin_lock_irqsave(&p->pi_lock, *flags);
1446 rq = task_rq(p);
1447 raw_spin_lock(&rq->lock);
1448 /*
1449 * move_queued_task() task_rq_lock()
1450 *
1451 * ACQUIRE (rq->lock)
1452 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
1453 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
1454 * [S] ->cpu = new_cpu [L] task_rq()
1455 * [L] ->on_rq
1456 * RELEASE (rq->lock)
1457 *
1458 * If we observe the old cpu in task_rq_lock, the acquire of
1459 * the old rq->lock will fully serialize against the stores.
1460 *
1461 * If we observe the new cpu in task_rq_lock, the acquire will
1462 * pair with the WMB to ensure we must then also see migrating.
1463 */
1464 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
1465 return rq;
1466 raw_spin_unlock(&rq->lock);
1467 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1468
1469 while (unlikely(task_on_rq_migrating(p)))
1470 cpu_relax();
1471 }
1472}
1473
1474static inline void __task_rq_unlock(struct rq *rq)
1475 __releases(rq->lock)
1476{
1477 raw_spin_unlock(&rq->lock);
1478}
1479
1480static inline void
1481task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1482 __releases(rq->lock)
1483 __releases(p->pi_lock)
1484{
1485 raw_spin_unlock(&rq->lock);
1486 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1487}
1488
029632fb
PZ
1489#ifdef CONFIG_SMP
1490#ifdef CONFIG_PREEMPT
1491
1492static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1493
1494/*
1495 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1496 * way at the expense of forcing extra atomic operations in all
1497 * invocations. This assures that the double_lock is acquired using the
1498 * same underlying policy as the spinlock_t on this architecture, which
1499 * reduces latency compared to the unfair variant below. However, it
1500 * also adds more overhead and therefore may reduce throughput.
1501 */
1502static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1503 __releases(this_rq->lock)
1504 __acquires(busiest->lock)
1505 __acquires(this_rq->lock)
1506{
1507 raw_spin_unlock(&this_rq->lock);
1508 double_rq_lock(this_rq, busiest);
1509
1510 return 1;
1511}
1512
1513#else
1514/*
1515 * Unfair double_lock_balance: Optimizes throughput at the expense of
1516 * latency by eliminating extra atomic operations when the locks are
1517 * already in proper order on entry. This favors lower cpu-ids and will
1518 * grant the double lock to lower cpus over higher ids under contention,
1519 * regardless of entry order into the function.
1520 */
1521static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1522 __releases(this_rq->lock)
1523 __acquires(busiest->lock)
1524 __acquires(this_rq->lock)
1525{
1526 int ret = 0;
1527
1528 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1529 if (busiest < this_rq) {
1530 raw_spin_unlock(&this_rq->lock);
1531 raw_spin_lock(&busiest->lock);
1532 raw_spin_lock_nested(&this_rq->lock,
1533 SINGLE_DEPTH_NESTING);
1534 ret = 1;
1535 } else
1536 raw_spin_lock_nested(&busiest->lock,
1537 SINGLE_DEPTH_NESTING);
1538 }
1539 return ret;
1540}
1541
1542#endif /* CONFIG_PREEMPT */
1543
1544/*
1545 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1546 */
1547static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1548{
1549 if (unlikely(!irqs_disabled())) {
1550 /* printk() doesn't work good under rq->lock */
1551 raw_spin_unlock(&this_rq->lock);
1552 BUG_ON(1);
1553 }
1554
1555 return _double_lock_balance(this_rq, busiest);
1556}
1557
1558static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1559 __releases(busiest->lock)
1560{
1561 raw_spin_unlock(&busiest->lock);
1562 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1563}
1564
74602315
PZ
1565static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1566{
1567 if (l1 > l2)
1568 swap(l1, l2);
1569
1570 spin_lock(l1);
1571 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1572}
1573
60e69eed
MG
1574static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1575{
1576 if (l1 > l2)
1577 swap(l1, l2);
1578
1579 spin_lock_irq(l1);
1580 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1581}
1582
74602315
PZ
1583static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1584{
1585 if (l1 > l2)
1586 swap(l1, l2);
1587
1588 raw_spin_lock(l1);
1589 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1590}
1591
029632fb
PZ
1592/*
1593 * double_rq_lock - safely lock two runqueues
1594 *
1595 * Note this does not disable interrupts like task_rq_lock,
1596 * you need to do so manually before calling.
1597 */
1598static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1599 __acquires(rq1->lock)
1600 __acquires(rq2->lock)
1601{
1602 BUG_ON(!irqs_disabled());
1603 if (rq1 == rq2) {
1604 raw_spin_lock(&rq1->lock);
1605 __acquire(rq2->lock); /* Fake it out ;) */
1606 } else {
1607 if (rq1 < rq2) {
1608 raw_spin_lock(&rq1->lock);
1609 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1610 } else {
1611 raw_spin_lock(&rq2->lock);
1612 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1613 }
1614 }
1615}
1616
1617/*
1618 * double_rq_unlock - safely unlock two runqueues
1619 *
1620 * Note this does not restore interrupts like task_rq_unlock,
1621 * you need to do so manually after calling.
1622 */
1623static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1624 __releases(rq1->lock)
1625 __releases(rq2->lock)
1626{
1627 raw_spin_unlock(&rq1->lock);
1628 if (rq1 != rq2)
1629 raw_spin_unlock(&rq2->lock);
1630 else
1631 __release(rq2->lock);
1632}
1633
1634#else /* CONFIG_SMP */
1635
1636/*
1637 * double_rq_lock - safely lock two runqueues
1638 *
1639 * Note this does not disable interrupts like task_rq_lock,
1640 * you need to do so manually before calling.
1641 */
1642static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1643 __acquires(rq1->lock)
1644 __acquires(rq2->lock)
1645{
1646 BUG_ON(!irqs_disabled());
1647 BUG_ON(rq1 != rq2);
1648 raw_spin_lock(&rq1->lock);
1649 __acquire(rq2->lock); /* Fake it out ;) */
1650}
1651
1652/*
1653 * double_rq_unlock - safely unlock two runqueues
1654 *
1655 * Note this does not restore interrupts like task_rq_unlock,
1656 * you need to do so manually after calling.
1657 */
1658static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1659 __releases(rq1->lock)
1660 __releases(rq2->lock)
1661{
1662 BUG_ON(rq1 != rq2);
1663 raw_spin_unlock(&rq1->lock);
1664 __release(rq2->lock);
1665}
1666
1667#endif
1668
1669extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1670extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
6b55c965
SD
1671
1672#ifdef CONFIG_SCHED_DEBUG
029632fb
PZ
1673extern void print_cfs_stats(struct seq_file *m, int cpu);
1674extern void print_rt_stats(struct seq_file *m, int cpu);
acb32132 1675extern void print_dl_stats(struct seq_file *m, int cpu);
6b55c965
SD
1676extern void
1677print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
1678#endif
029632fb
PZ
1679
1680extern void init_cfs_rq(struct cfs_rq *cfs_rq);
07c54f7a
AV
1681extern void init_rt_rq(struct rt_rq *rt_rq);
1682extern void init_dl_rq(struct dl_rq *dl_rq);
029632fb 1683
1ee14e6c
BS
1684extern void cfs_bandwidth_usage_inc(void);
1685extern void cfs_bandwidth_usage_dec(void);
1c792db7 1686
3451d024 1687#ifdef CONFIG_NO_HZ_COMMON
1c792db7
SS
1688enum rq_nohz_flag_bits {
1689 NOHZ_TICK_STOPPED,
1690 NOHZ_BALANCE_KICK,
1691};
1692
1693#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1694#endif
73fbec60
FW
1695
1696#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1697
1698DECLARE_PER_CPU(u64, cpu_hardirq_time);
1699DECLARE_PER_CPU(u64, cpu_softirq_time);
1700
1701#ifndef CONFIG_64BIT
1702DECLARE_PER_CPU(seqcount_t, irq_time_seq);
1703
1704static inline void irq_time_write_begin(void)
1705{
1706 __this_cpu_inc(irq_time_seq.sequence);
1707 smp_wmb();
1708}
1709
1710static inline void irq_time_write_end(void)
1711{
1712 smp_wmb();
1713 __this_cpu_inc(irq_time_seq.sequence);
1714}
1715
1716static inline u64 irq_time_read(int cpu)
1717{
1718 u64 irq_time;
1719 unsigned seq;
1720
1721 do {
1722 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1723 irq_time = per_cpu(cpu_softirq_time, cpu) +
1724 per_cpu(cpu_hardirq_time, cpu);
1725 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1726
1727 return irq_time;
1728}
1729#else /* CONFIG_64BIT */
1730static inline void irq_time_write_begin(void)
1731{
1732}
1733
1734static inline void irq_time_write_end(void)
1735{
1736}
1737
1738static inline u64 irq_time_read(int cpu)
1739{
1740 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1741}
1742#endif /* CONFIG_64BIT */
1743#endif /* CONFIG_IRQ_TIME_ACCOUNTING */