sched/fair: Reduce local_group logic
[linux-2.6-block.git] / kernel / sched / fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
9745512c 23#include <linux/latencytop.h>
1983a922 24#include <linux/sched.h>
3436ae12 25#include <linux/cpumask.h>
029632fb
PZ
26#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
cbee9f88 29#include <linux/mempolicy.h>
e14808b4 30#include <linux/migrate.h>
cbee9f88 31#include <linux/task_work.h>
029632fb
PZ
32
33#include <trace/events/sched.h>
34
35#include "sched.h"
9745512c 36
bf0f6f24 37/*
21805085 38 * Targeted preemption latency for CPU-bound tasks:
864616ee 39 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 40 *
21805085 41 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
42 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
bf0f6f24 45 *
d274a4ce
IM
46 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 48 */
21406928
MG
49unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 51
1983a922
CE
52/*
53 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
2bd8e6d4 64/*
b2be5e96 65 * Minimal preemption granularity for CPU-bound tasks:
864616ee 66 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 67 */
0bf377bb
IM
68unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
21805085
PZ
70
71/*
b2be5e96
PZ
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
0bf377bb 74static unsigned int sched_nr_latency = 8;
b2be5e96
PZ
75
76/*
2bba22c5 77 * After fork, child runs first. If set to 0 (default) then
b2be5e96 78 * parent will (try to) run first.
21805085 79 */
2bba22c5 80unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 81
bf0f6f24
IM
82/*
83 * SCHED_OTHER wake-up granularity.
172e082a 84 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
85 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
172e082a 90unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
0bcdcf28 91unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 92
da84d961
IM
93const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
a7a4f8a7
PT
95/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
ec12cb7f
PT
102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
8527632d
PG
116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
029632fb
PZ
134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#if BITS_PER_LONG == 32
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32
188
189/*
190 * Shift right and round:
191 */
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193
194/*
195 * delta *= weight / lw
196 */
197static unsigned long
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{
201 u64 tmp;
202
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212
213 if (!lw->inv_weight) {
214 unsigned long w = scale_load_down(lw->weight);
215
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 lw->inv_weight = 1;
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 }
223
224 /*
225 * Check whether we'd overflow the 64-bit multiplication:
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234}
235
236
237const struct sched_class fair_sched_class;
a4c2f00f 238
bf0f6f24
IM
239/**************************************************************
240 * CFS operations on generic schedulable entities:
241 */
242
62160e3f 243#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 244
62160e3f 245/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
246static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247{
62160e3f 248 return cfs_rq->rq;
bf0f6f24
IM
249}
250
62160e3f
IM
251/* An entity is a task if it doesn't "own" a runqueue */
252#define entity_is_task(se) (!se->my_q)
bf0f6f24 253
8f48894f
PZ
254static inline struct task_struct *task_of(struct sched_entity *se)
255{
256#ifdef CONFIG_SCHED_DEBUG
257 WARN_ON_ONCE(!entity_is_task(se));
258#endif
259 return container_of(se, struct task_struct, se);
260}
261
b758149c
PZ
262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
aff3e498
PT
283static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 int force_update);
9ee474f5 285
3d4b47b4
PZ
286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
67e86250
PT
289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
3d4b47b4 301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
67e86250 302 }
3d4b47b4
PZ
303
304 cfs_rq->on_list = 1;
9ee474f5 305 /* We should have no load, but we need to update last_decay. */
aff3e498 306 update_cfs_rq_blocked_load(cfs_rq, 0);
3d4b47b4
PZ
307 }
308}
309
310static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311{
312 if (cfs_rq->on_list) {
313 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 cfs_rq->on_list = 0;
315 }
316}
317
b758149c
PZ
318/* Iterate thr' all leaf cfs_rq's on a runqueue */
319#define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321
322/* Do the two (enqueued) entities belong to the same group ? */
323static inline int
324is_same_group(struct sched_entity *se, struct sched_entity *pse)
325{
326 if (se->cfs_rq == pse->cfs_rq)
327 return 1;
328
329 return 0;
330}
331
332static inline struct sched_entity *parent_entity(struct sched_entity *se)
333{
334 return se->parent;
335}
336
464b7527
PZ
337/* return depth at which a sched entity is present in the hierarchy */
338static inline int depth_se(struct sched_entity *se)
339{
340 int depth = 0;
341
342 for_each_sched_entity(se)
343 depth++;
344
345 return depth;
346}
347
348static void
349find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350{
351 int se_depth, pse_depth;
352
353 /*
354 * preemption test can be made between sibling entities who are in the
355 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 * both tasks until we find their ancestors who are siblings of common
357 * parent.
358 */
359
360 /* First walk up until both entities are at same depth */
361 se_depth = depth_se(*se);
362 pse_depth = depth_se(*pse);
363
364 while (se_depth > pse_depth) {
365 se_depth--;
366 *se = parent_entity(*se);
367 }
368
369 while (pse_depth > se_depth) {
370 pse_depth--;
371 *pse = parent_entity(*pse);
372 }
373
374 while (!is_same_group(*se, *pse)) {
375 *se = parent_entity(*se);
376 *pse = parent_entity(*pse);
377 }
378}
379
8f48894f
PZ
380#else /* !CONFIG_FAIR_GROUP_SCHED */
381
382static inline struct task_struct *task_of(struct sched_entity *se)
383{
384 return container_of(se, struct task_struct, se);
385}
bf0f6f24 386
62160e3f
IM
387static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388{
389 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
390}
391
392#define entity_is_task(se) 1
393
b758149c
PZ
394#define for_each_sched_entity(se) \
395 for (; se; se = NULL)
bf0f6f24 396
b758149c 397static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 398{
b758149c 399 return &task_rq(p)->cfs;
bf0f6f24
IM
400}
401
b758149c
PZ
402static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403{
404 struct task_struct *p = task_of(se);
405 struct rq *rq = task_rq(p);
406
407 return &rq->cfs;
408}
409
410/* runqueue "owned" by this group */
411static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412{
413 return NULL;
414}
415
3d4b47b4
PZ
416static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417{
418}
419
420static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421{
422}
423
b758149c
PZ
424#define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426
427static inline int
428is_same_group(struct sched_entity *se, struct sched_entity *pse)
429{
430 return 1;
431}
432
433static inline struct sched_entity *parent_entity(struct sched_entity *se)
434{
435 return NULL;
436}
437
464b7527
PZ
438static inline void
439find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440{
441}
442
b758149c
PZ
443#endif /* CONFIG_FAIR_GROUP_SCHED */
444
6c16a6dc
PZ
445static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
bf0f6f24
IM
447
448/**************************************************************
449 * Scheduling class tree data structure manipulation methods:
450 */
451
1bf08230 452static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
02e0431a 453{
1bf08230 454 s64 delta = (s64)(vruntime - max_vruntime);
368059a9 455 if (delta > 0)
1bf08230 456 max_vruntime = vruntime;
02e0431a 457
1bf08230 458 return max_vruntime;
02e0431a
PZ
459}
460
0702e3eb 461static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
462{
463 s64 delta = (s64)(vruntime - min_vruntime);
464 if (delta < 0)
465 min_vruntime = vruntime;
466
467 return min_vruntime;
468}
469
54fdc581
FC
470static inline int entity_before(struct sched_entity *a,
471 struct sched_entity *b)
472{
473 return (s64)(a->vruntime - b->vruntime) < 0;
474}
475
1af5f730
PZ
476static void update_min_vruntime(struct cfs_rq *cfs_rq)
477{
478 u64 vruntime = cfs_rq->min_vruntime;
479
480 if (cfs_rq->curr)
481 vruntime = cfs_rq->curr->vruntime;
482
483 if (cfs_rq->rb_leftmost) {
484 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 struct sched_entity,
486 run_node);
487
e17036da 488 if (!cfs_rq->curr)
1af5f730
PZ
489 vruntime = se->vruntime;
490 else
491 vruntime = min_vruntime(vruntime, se->vruntime);
492 }
493
1bf08230 494 /* ensure we never gain time by being placed backwards. */
1af5f730 495 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
3fe1698b
PZ
496#ifndef CONFIG_64BIT
497 smp_wmb();
498 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499#endif
1af5f730
PZ
500}
501
bf0f6f24
IM
502/*
503 * Enqueue an entity into the rb-tree:
504 */
0702e3eb 505static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
506{
507 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 struct rb_node *parent = NULL;
509 struct sched_entity *entry;
bf0f6f24
IM
510 int leftmost = 1;
511
512 /*
513 * Find the right place in the rbtree:
514 */
515 while (*link) {
516 parent = *link;
517 entry = rb_entry(parent, struct sched_entity, run_node);
518 /*
519 * We dont care about collisions. Nodes with
520 * the same key stay together.
521 */
2bd2d6f2 522 if (entity_before(se, entry)) {
bf0f6f24
IM
523 link = &parent->rb_left;
524 } else {
525 link = &parent->rb_right;
526 leftmost = 0;
527 }
528 }
529
530 /*
531 * Maintain a cache of leftmost tree entries (it is frequently
532 * used):
533 */
1af5f730 534 if (leftmost)
57cb499d 535 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
536
537 rb_link_node(&se->run_node, parent, link);
538 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
539}
540
0702e3eb 541static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 542{
3fe69747
PZ
543 if (cfs_rq->rb_leftmost == &se->run_node) {
544 struct rb_node *next_node;
3fe69747
PZ
545
546 next_node = rb_next(&se->run_node);
547 cfs_rq->rb_leftmost = next_node;
3fe69747 548 }
e9acbff6 549
bf0f6f24 550 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
551}
552
029632fb 553struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
bf0f6f24 554{
f4b6755f
PZ
555 struct rb_node *left = cfs_rq->rb_leftmost;
556
557 if (!left)
558 return NULL;
559
560 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
561}
562
ac53db59
RR
563static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564{
565 struct rb_node *next = rb_next(&se->run_node);
566
567 if (!next)
568 return NULL;
569
570 return rb_entry(next, struct sched_entity, run_node);
571}
572
573#ifdef CONFIG_SCHED_DEBUG
029632fb 574struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 575{
7eee3e67 576 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 577
70eee74b
BS
578 if (!last)
579 return NULL;
7eee3e67
IM
580
581 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
582}
583
bf0f6f24
IM
584/**************************************************************
585 * Scheduling class statistics methods:
586 */
587
acb4a848 588int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 589 void __user *buffer, size_t *lenp,
b2be5e96
PZ
590 loff_t *ppos)
591{
8d65af78 592 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
acb4a848 593 int factor = get_update_sysctl_factor();
b2be5e96
PZ
594
595 if (ret || !write)
596 return ret;
597
598 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 sysctl_sched_min_granularity);
600
acb4a848
CE
601#define WRT_SYSCTL(name) \
602 (normalized_sysctl_##name = sysctl_##name / (factor))
603 WRT_SYSCTL(sched_min_granularity);
604 WRT_SYSCTL(sched_latency);
605 WRT_SYSCTL(sched_wakeup_granularity);
acb4a848
CE
606#undef WRT_SYSCTL
607
b2be5e96
PZ
608 return 0;
609}
610#endif
647e7cac 611
a7be37ac 612/*
f9c0b095 613 * delta /= w
a7be37ac
PZ
614 */
615static inline unsigned long
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{
f9c0b095
PZ
618 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
620
621 return delta;
622}
623
647e7cac
IM
624/*
625 * The idea is to set a period in which each task runs once.
626 *
532b1858 627 * When there are too many tasks (sched_nr_latency) we have to stretch
647e7cac
IM
628 * this period because otherwise the slices get too small.
629 *
630 * p = (nr <= nl) ? l : l*nr/nl
631 */
4d78e7b6
PZ
632static u64 __sched_period(unsigned long nr_running)
633{
634 u64 period = sysctl_sched_latency;
b2be5e96 635 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
636
637 if (unlikely(nr_running > nr_latency)) {
4bf0b771 638 period = sysctl_sched_min_granularity;
4d78e7b6 639 period *= nr_running;
4d78e7b6
PZ
640 }
641
642 return period;
643}
644
647e7cac
IM
645/*
646 * We calculate the wall-time slice from the period by taking a part
647 * proportional to the weight.
648 *
f9c0b095 649 * s = p*P[w/rw]
647e7cac 650 */
6d0f0ebd 651static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 652{
0a582440 653 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 654
0a582440 655 for_each_sched_entity(se) {
6272d68c 656 struct load_weight *load;
3104bf03 657 struct load_weight lw;
6272d68c
LM
658
659 cfs_rq = cfs_rq_of(se);
660 load = &cfs_rq->load;
f9c0b095 661
0a582440 662 if (unlikely(!se->on_rq)) {
3104bf03 663 lw = cfs_rq->load;
0a582440
MG
664
665 update_load_add(&lw, se->load.weight);
666 load = &lw;
667 }
668 slice = calc_delta_mine(slice, se->load.weight, load);
669 }
670 return slice;
bf0f6f24
IM
671}
672
647e7cac 673/*
660cc00f 674 * We calculate the vruntime slice of a to-be-inserted task.
647e7cac 675 *
f9c0b095 676 * vs = s/w
647e7cac 677 */
f9c0b095 678static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 679{
f9c0b095 680 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
681}
682
a75cdaa9
AS
683#ifdef CONFIG_SMP
684static inline void __update_task_entity_contrib(struct sched_entity *se);
685
686/* Give new task start runnable values to heavy its load in infant time */
687void init_task_runnable_average(struct task_struct *p)
688{
689 u32 slice;
690
691 p->se.avg.decay_count = 0;
692 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
693 p->se.avg.runnable_avg_sum = slice;
694 p->se.avg.runnable_avg_period = slice;
695 __update_task_entity_contrib(&p->se);
696}
697#else
698void init_task_runnable_average(struct task_struct *p)
699{
700}
701#endif
702
bf0f6f24
IM
703/*
704 * Update the current task's runtime statistics. Skip current tasks that
705 * are not in our scheduling class.
706 */
707static inline void
8ebc91d9
IM
708__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
709 unsigned long delta_exec)
bf0f6f24 710{
bbdba7c0 711 unsigned long delta_exec_weighted;
bf0f6f24 712
41acab88
LDM
713 schedstat_set(curr->statistics.exec_max,
714 max((u64)delta_exec, curr->statistics.exec_max));
bf0f6f24
IM
715
716 curr->sum_exec_runtime += delta_exec;
7a62eabc 717 schedstat_add(cfs_rq, exec_clock, delta_exec);
a7be37ac 718 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
88ec22d3 719
e9acbff6 720 curr->vruntime += delta_exec_weighted;
1af5f730 721 update_min_vruntime(cfs_rq);
bf0f6f24
IM
722}
723
b7cc0896 724static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 725{
429d43bc 726 struct sched_entity *curr = cfs_rq->curr;
78becc27 727 u64 now = rq_clock_task(rq_of(cfs_rq));
bf0f6f24
IM
728 unsigned long delta_exec;
729
730 if (unlikely(!curr))
731 return;
732
733 /*
734 * Get the amount of time the current task was running
735 * since the last time we changed load (this cannot
736 * overflow on 32 bits):
737 */
8ebc91d9 738 delta_exec = (unsigned long)(now - curr->exec_start);
34f28ecd
PZ
739 if (!delta_exec)
740 return;
bf0f6f24 741
8ebc91d9
IM
742 __update_curr(cfs_rq, curr, delta_exec);
743 curr->exec_start = now;
d842de87
SV
744
745 if (entity_is_task(curr)) {
746 struct task_struct *curtask = task_of(curr);
747
f977bb49 748 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d842de87 749 cpuacct_charge(curtask, delta_exec);
f06febc9 750 account_group_exec_runtime(curtask, delta_exec);
d842de87 751 }
ec12cb7f
PT
752
753 account_cfs_rq_runtime(cfs_rq, delta_exec);
bf0f6f24
IM
754}
755
756static inline void
5870db5b 757update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 758{
78becc27 759 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
bf0f6f24
IM
760}
761
bf0f6f24
IM
762/*
763 * Task is being enqueued - update stats:
764 */
d2417e5a 765static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 766{
bf0f6f24
IM
767 /*
768 * Are we enqueueing a waiting task? (for current tasks
769 * a dequeue/enqueue event is a NOP)
770 */
429d43bc 771 if (se != cfs_rq->curr)
5870db5b 772 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
773}
774
bf0f6f24 775static void
9ef0a961 776update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 777{
41acab88 778 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
78becc27 779 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
41acab88
LDM
780 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
781 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
78becc27 782 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
768d0c27
PZ
783#ifdef CONFIG_SCHEDSTATS
784 if (entity_is_task(se)) {
785 trace_sched_stat_wait(task_of(se),
78becc27 786 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
768d0c27
PZ
787 }
788#endif
41acab88 789 schedstat_set(se->statistics.wait_start, 0);
bf0f6f24
IM
790}
791
792static inline void
19b6a2e3 793update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 794{
bf0f6f24
IM
795 /*
796 * Mark the end of the wait period if dequeueing a
797 * waiting task:
798 */
429d43bc 799 if (se != cfs_rq->curr)
9ef0a961 800 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
801}
802
803/*
804 * We are picking a new current task - update its stats:
805 */
806static inline void
79303e9e 807update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
808{
809 /*
810 * We are starting a new run period:
811 */
78becc27 812 se->exec_start = rq_clock_task(rq_of(cfs_rq));
bf0f6f24
IM
813}
814
bf0f6f24
IM
815/**************************************************
816 * Scheduling class queueing methods:
817 */
818
cbee9f88
PZ
819#ifdef CONFIG_NUMA_BALANCING
820/*
6e5fb223 821 * numa task sample period in ms
cbee9f88 822 */
6e5fb223 823unsigned int sysctl_numa_balancing_scan_period_min = 100;
b8593bfd
MG
824unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
825unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
6e5fb223
PZ
826
827/* Portion of address space to scan in MB */
828unsigned int sysctl_numa_balancing_scan_size = 256;
cbee9f88 829
4b96a29b
PZ
830/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
831unsigned int sysctl_numa_balancing_scan_delay = 1000;
832
cbee9f88
PZ
833static void task_numa_placement(struct task_struct *p)
834{
2832bc19 835 int seq;
cbee9f88 836
2832bc19
HD
837 if (!p->mm) /* for example, ksmd faulting in a user's mm */
838 return;
839 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
cbee9f88
PZ
840 if (p->numa_scan_seq == seq)
841 return;
842 p->numa_scan_seq = seq;
843
844 /* FIXME: Scheduling placement policy hints go here */
845}
846
847/*
848 * Got a PROT_NONE fault for a page on @node.
849 */
b8593bfd 850void task_numa_fault(int node, int pages, bool migrated)
cbee9f88
PZ
851{
852 struct task_struct *p = current;
853
10e84b97 854 if (!numabalancing_enabled)
1a687c2e
MG
855 return;
856
cbee9f88
PZ
857 /* FIXME: Allocate task-specific structure for placement policy here */
858
fb003b80 859 /*
b8593bfd
MG
860 * If pages are properly placed (did not migrate) then scan slower.
861 * This is reset periodically in case of phase changes
fb003b80 862 */
b8593bfd
MG
863 if (!migrated)
864 p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
865 p->numa_scan_period + jiffies_to_msecs(10));
fb003b80 866
cbee9f88
PZ
867 task_numa_placement(p);
868}
869
6e5fb223
PZ
870static void reset_ptenuma_scan(struct task_struct *p)
871{
872 ACCESS_ONCE(p->mm->numa_scan_seq)++;
873 p->mm->numa_scan_offset = 0;
874}
875
cbee9f88
PZ
876/*
877 * The expensive part of numa migration is done from task_work context.
878 * Triggered from task_tick_numa().
879 */
880void task_numa_work(struct callback_head *work)
881{
882 unsigned long migrate, next_scan, now = jiffies;
883 struct task_struct *p = current;
884 struct mm_struct *mm = p->mm;
6e5fb223 885 struct vm_area_struct *vma;
9f40604c
MG
886 unsigned long start, end;
887 long pages;
cbee9f88
PZ
888
889 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
890
891 work->next = work; /* protect against double add */
892 /*
893 * Who cares about NUMA placement when they're dying.
894 *
895 * NOTE: make sure not to dereference p->mm before this check,
896 * exit_task_work() happens _after_ exit_mm() so we could be called
897 * without p->mm even though we still had it when we enqueued this
898 * work.
899 */
900 if (p->flags & PF_EXITING)
901 return;
902
5bca2303
MG
903 /*
904 * We do not care about task placement until a task runs on a node
905 * other than the first one used by the address space. This is
906 * largely because migrations are driven by what CPU the task
907 * is running on. If it's never scheduled on another node, it'll
908 * not migrate so why bother trapping the fault.
909 */
910 if (mm->first_nid == NUMA_PTE_SCAN_INIT)
911 mm->first_nid = numa_node_id();
912 if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
913 /* Are we running on a new node yet? */
914 if (numa_node_id() == mm->first_nid &&
915 !sched_feat_numa(NUMA_FORCE))
916 return;
917
918 mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
919 }
920
b8593bfd
MG
921 /*
922 * Reset the scan period if enough time has gone by. Objective is that
923 * scanning will be reduced if pages are properly placed. As tasks
924 * can enter different phases this needs to be re-examined. Lacking
925 * proper tracking of reference behaviour, this blunt hammer is used.
926 */
927 migrate = mm->numa_next_reset;
928 if (time_after(now, migrate)) {
929 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
930 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
931 xchg(&mm->numa_next_reset, next_scan);
932 }
933
cbee9f88
PZ
934 /*
935 * Enforce maximal scan/migration frequency..
936 */
937 migrate = mm->numa_next_scan;
938 if (time_before(now, migrate))
939 return;
940
941 if (p->numa_scan_period == 0)
942 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
943
fb003b80 944 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
cbee9f88
PZ
945 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
946 return;
947
e14808b4
MG
948 /*
949 * Do not set pte_numa if the current running node is rate-limited.
950 * This loses statistics on the fault but if we are unwilling to
951 * migrate to this node, it is less likely we can do useful work
952 */
953 if (migrate_ratelimited(numa_node_id()))
954 return;
955
9f40604c
MG
956 start = mm->numa_scan_offset;
957 pages = sysctl_numa_balancing_scan_size;
958 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
959 if (!pages)
960 return;
cbee9f88 961
6e5fb223 962 down_read(&mm->mmap_sem);
9f40604c 963 vma = find_vma(mm, start);
6e5fb223
PZ
964 if (!vma) {
965 reset_ptenuma_scan(p);
9f40604c 966 start = 0;
6e5fb223
PZ
967 vma = mm->mmap;
968 }
9f40604c 969 for (; vma; vma = vma->vm_next) {
6e5fb223
PZ
970 if (!vma_migratable(vma))
971 continue;
972
973 /* Skip small VMAs. They are not likely to be of relevance */
221392c3 974 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
6e5fb223
PZ
975 continue;
976
9f40604c
MG
977 do {
978 start = max(start, vma->vm_start);
979 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
980 end = min(end, vma->vm_end);
981 pages -= change_prot_numa(vma, start, end);
6e5fb223 982
9f40604c
MG
983 start = end;
984 if (pages <= 0)
985 goto out;
986 } while (end != vma->vm_end);
cbee9f88 987 }
6e5fb223 988
9f40604c 989out:
6e5fb223
PZ
990 /*
991 * It is possible to reach the end of the VMA list but the last few VMAs are
992 * not guaranteed to the vma_migratable. If they are not, we would find the
993 * !migratable VMA on the next scan but not reset the scanner to the start
994 * so check it now.
995 */
996 if (vma)
9f40604c 997 mm->numa_scan_offset = start;
6e5fb223
PZ
998 else
999 reset_ptenuma_scan(p);
1000 up_read(&mm->mmap_sem);
cbee9f88
PZ
1001}
1002
1003/*
1004 * Drive the periodic memory faults..
1005 */
1006void task_tick_numa(struct rq *rq, struct task_struct *curr)
1007{
1008 struct callback_head *work = &curr->numa_work;
1009 u64 period, now;
1010
1011 /*
1012 * We don't care about NUMA placement if we don't have memory.
1013 */
1014 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1015 return;
1016
1017 /*
1018 * Using runtime rather than walltime has the dual advantage that
1019 * we (mostly) drive the selection from busy threads and that the
1020 * task needs to have done some actual work before we bother with
1021 * NUMA placement.
1022 */
1023 now = curr->se.sum_exec_runtime;
1024 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1025
1026 if (now - curr->node_stamp > period) {
4b96a29b
PZ
1027 if (!curr->node_stamp)
1028 curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
cbee9f88
PZ
1029 curr->node_stamp = now;
1030
1031 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1032 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1033 task_work_add(curr, work, true);
1034 }
1035 }
1036}
1037#else
1038static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1039{
1040}
1041#endif /* CONFIG_NUMA_BALANCING */
1042
30cfdcfc
DA
1043static void
1044account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1045{
1046 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6 1047 if (!parent_entity(se))
029632fb 1048 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
367456c7
PZ
1049#ifdef CONFIG_SMP
1050 if (entity_is_task(se))
eb95308e 1051 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
367456c7 1052#endif
30cfdcfc 1053 cfs_rq->nr_running++;
30cfdcfc
DA
1054}
1055
1056static void
1057account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1058{
1059 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6 1060 if (!parent_entity(se))
029632fb 1061 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
367456c7 1062 if (entity_is_task(se))
b87f1724 1063 list_del_init(&se->group_node);
30cfdcfc 1064 cfs_rq->nr_running--;
30cfdcfc
DA
1065}
1066
3ff6dcac
YZ
1067#ifdef CONFIG_FAIR_GROUP_SCHED
1068# ifdef CONFIG_SMP
cf5f0acf
PZ
1069static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1070{
1071 long tg_weight;
1072
1073 /*
1074 * Use this CPU's actual weight instead of the last load_contribution
1075 * to gain a more accurate current total weight. See
1076 * update_cfs_rq_load_contribution().
1077 */
bf5b986e 1078 tg_weight = atomic_long_read(&tg->load_avg);
82958366 1079 tg_weight -= cfs_rq->tg_load_contrib;
cf5f0acf
PZ
1080 tg_weight += cfs_rq->load.weight;
1081
1082 return tg_weight;
1083}
1084
6d5ab293 1085static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac 1086{
cf5f0acf 1087 long tg_weight, load, shares;
3ff6dcac 1088
cf5f0acf 1089 tg_weight = calc_tg_weight(tg, cfs_rq);
6d5ab293 1090 load = cfs_rq->load.weight;
3ff6dcac 1091
3ff6dcac 1092 shares = (tg->shares * load);
cf5f0acf
PZ
1093 if (tg_weight)
1094 shares /= tg_weight;
3ff6dcac
YZ
1095
1096 if (shares < MIN_SHARES)
1097 shares = MIN_SHARES;
1098 if (shares > tg->shares)
1099 shares = tg->shares;
1100
1101 return shares;
1102}
3ff6dcac 1103# else /* CONFIG_SMP */
6d5ab293 1104static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac
YZ
1105{
1106 return tg->shares;
1107}
3ff6dcac 1108# endif /* CONFIG_SMP */
2069dd75
PZ
1109static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1110 unsigned long weight)
1111{
19e5eebb
PT
1112 if (se->on_rq) {
1113 /* commit outstanding execution time */
1114 if (cfs_rq->curr == se)
1115 update_curr(cfs_rq);
2069dd75 1116 account_entity_dequeue(cfs_rq, se);
19e5eebb 1117 }
2069dd75
PZ
1118
1119 update_load_set(&se->load, weight);
1120
1121 if (se->on_rq)
1122 account_entity_enqueue(cfs_rq, se);
1123}
1124
82958366
PT
1125static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1126
6d5ab293 1127static void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
1128{
1129 struct task_group *tg;
1130 struct sched_entity *se;
3ff6dcac 1131 long shares;
2069dd75 1132
2069dd75
PZ
1133 tg = cfs_rq->tg;
1134 se = tg->se[cpu_of(rq_of(cfs_rq))];
64660c86 1135 if (!se || throttled_hierarchy(cfs_rq))
2069dd75 1136 return;
3ff6dcac
YZ
1137#ifndef CONFIG_SMP
1138 if (likely(se->load.weight == tg->shares))
1139 return;
1140#endif
6d5ab293 1141 shares = calc_cfs_shares(cfs_rq, tg);
2069dd75
PZ
1142
1143 reweight_entity(cfs_rq_of(se), se, shares);
1144}
1145#else /* CONFIG_FAIR_GROUP_SCHED */
6d5ab293 1146static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
1147{
1148}
1149#endif /* CONFIG_FAIR_GROUP_SCHED */
1150
141965c7 1151#ifdef CONFIG_SMP
5b51f2f8
PT
1152/*
1153 * We choose a half-life close to 1 scheduling period.
1154 * Note: The tables below are dependent on this value.
1155 */
1156#define LOAD_AVG_PERIOD 32
1157#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1158#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1159
1160/* Precomputed fixed inverse multiplies for multiplication by y^n */
1161static const u32 runnable_avg_yN_inv[] = {
1162 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1163 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1164 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1165 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1166 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1167 0x85aac367, 0x82cd8698,
1168};
1169
1170/*
1171 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1172 * over-estimates when re-combining.
1173 */
1174static const u32 runnable_avg_yN_sum[] = {
1175 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1176 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1177 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1178};
1179
9d85f21c
PT
1180/*
1181 * Approximate:
1182 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1183 */
1184static __always_inline u64 decay_load(u64 val, u64 n)
1185{
5b51f2f8
PT
1186 unsigned int local_n;
1187
1188 if (!n)
1189 return val;
1190 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1191 return 0;
1192
1193 /* after bounds checking we can collapse to 32-bit */
1194 local_n = n;
1195
1196 /*
1197 * As y^PERIOD = 1/2, we can combine
1198 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1199 * With a look-up table which covers k^n (n<PERIOD)
1200 *
1201 * To achieve constant time decay_load.
1202 */
1203 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1204 val >>= local_n / LOAD_AVG_PERIOD;
1205 local_n %= LOAD_AVG_PERIOD;
9d85f21c
PT
1206 }
1207
5b51f2f8
PT
1208 val *= runnable_avg_yN_inv[local_n];
1209 /* We don't use SRR here since we always want to round down. */
1210 return val >> 32;
1211}
1212
1213/*
1214 * For updates fully spanning n periods, the contribution to runnable
1215 * average will be: \Sum 1024*y^n
1216 *
1217 * We can compute this reasonably efficiently by combining:
1218 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
1219 */
1220static u32 __compute_runnable_contrib(u64 n)
1221{
1222 u32 contrib = 0;
1223
1224 if (likely(n <= LOAD_AVG_PERIOD))
1225 return runnable_avg_yN_sum[n];
1226 else if (unlikely(n >= LOAD_AVG_MAX_N))
1227 return LOAD_AVG_MAX;
1228
1229 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1230 do {
1231 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1232 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1233
1234 n -= LOAD_AVG_PERIOD;
1235 } while (n > LOAD_AVG_PERIOD);
1236
1237 contrib = decay_load(contrib, n);
1238 return contrib + runnable_avg_yN_sum[n];
9d85f21c
PT
1239}
1240
1241/*
1242 * We can represent the historical contribution to runnable average as the
1243 * coefficients of a geometric series. To do this we sub-divide our runnable
1244 * history into segments of approximately 1ms (1024us); label the segment that
1245 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1246 *
1247 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1248 * p0 p1 p2
1249 * (now) (~1ms ago) (~2ms ago)
1250 *
1251 * Let u_i denote the fraction of p_i that the entity was runnable.
1252 *
1253 * We then designate the fractions u_i as our co-efficients, yielding the
1254 * following representation of historical load:
1255 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1256 *
1257 * We choose y based on the with of a reasonably scheduling period, fixing:
1258 * y^32 = 0.5
1259 *
1260 * This means that the contribution to load ~32ms ago (u_32) will be weighted
1261 * approximately half as much as the contribution to load within the last ms
1262 * (u_0).
1263 *
1264 * When a period "rolls over" and we have new u_0`, multiplying the previous
1265 * sum again by y is sufficient to update:
1266 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1267 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1268 */
1269static __always_inline int __update_entity_runnable_avg(u64 now,
1270 struct sched_avg *sa,
1271 int runnable)
1272{
5b51f2f8
PT
1273 u64 delta, periods;
1274 u32 runnable_contrib;
9d85f21c
PT
1275 int delta_w, decayed = 0;
1276
1277 delta = now - sa->last_runnable_update;
1278 /*
1279 * This should only happen when time goes backwards, which it
1280 * unfortunately does during sched clock init when we swap over to TSC.
1281 */
1282 if ((s64)delta < 0) {
1283 sa->last_runnable_update = now;
1284 return 0;
1285 }
1286
1287 /*
1288 * Use 1024ns as the unit of measurement since it's a reasonable
1289 * approximation of 1us and fast to compute.
1290 */
1291 delta >>= 10;
1292 if (!delta)
1293 return 0;
1294 sa->last_runnable_update = now;
1295
1296 /* delta_w is the amount already accumulated against our next period */
1297 delta_w = sa->runnable_avg_period % 1024;
1298 if (delta + delta_w >= 1024) {
1299 /* period roll-over */
1300 decayed = 1;
1301
1302 /*
1303 * Now that we know we're crossing a period boundary, figure
1304 * out how much from delta we need to complete the current
1305 * period and accrue it.
1306 */
1307 delta_w = 1024 - delta_w;
5b51f2f8
PT
1308 if (runnable)
1309 sa->runnable_avg_sum += delta_w;
1310 sa->runnable_avg_period += delta_w;
1311
1312 delta -= delta_w;
1313
1314 /* Figure out how many additional periods this update spans */
1315 periods = delta / 1024;
1316 delta %= 1024;
1317
1318 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1319 periods + 1);
1320 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1321 periods + 1);
1322
1323 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
1324 runnable_contrib = __compute_runnable_contrib(periods);
1325 if (runnable)
1326 sa->runnable_avg_sum += runnable_contrib;
1327 sa->runnable_avg_period += runnable_contrib;
9d85f21c
PT
1328 }
1329
1330 /* Remainder of delta accrued against u_0` */
1331 if (runnable)
1332 sa->runnable_avg_sum += delta;
1333 sa->runnable_avg_period += delta;
1334
1335 return decayed;
1336}
1337
9ee474f5 1338/* Synchronize an entity's decay with its parenting cfs_rq.*/
aff3e498 1339static inline u64 __synchronize_entity_decay(struct sched_entity *se)
9ee474f5
PT
1340{
1341 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1342 u64 decays = atomic64_read(&cfs_rq->decay_counter);
1343
1344 decays -= se->avg.decay_count;
1345 if (!decays)
aff3e498 1346 return 0;
9ee474f5
PT
1347
1348 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1349 se->avg.decay_count = 0;
aff3e498
PT
1350
1351 return decays;
9ee474f5
PT
1352}
1353
c566e8e9
PT
1354#ifdef CONFIG_FAIR_GROUP_SCHED
1355static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1356 int force_update)
1357{
1358 struct task_group *tg = cfs_rq->tg;
bf5b986e 1359 long tg_contrib;
c566e8e9
PT
1360
1361 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1362 tg_contrib -= cfs_rq->tg_load_contrib;
1363
bf5b986e
AS
1364 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1365 atomic_long_add(tg_contrib, &tg->load_avg);
c566e8e9
PT
1366 cfs_rq->tg_load_contrib += tg_contrib;
1367 }
1368}
8165e145 1369
bb17f655
PT
1370/*
1371 * Aggregate cfs_rq runnable averages into an equivalent task_group
1372 * representation for computing load contributions.
1373 */
1374static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1375 struct cfs_rq *cfs_rq)
1376{
1377 struct task_group *tg = cfs_rq->tg;
1378 long contrib;
1379
1380 /* The fraction of a cpu used by this cfs_rq */
1381 contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1382 sa->runnable_avg_period + 1);
1383 contrib -= cfs_rq->tg_runnable_contrib;
1384
1385 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1386 atomic_add(contrib, &tg->runnable_avg);
1387 cfs_rq->tg_runnable_contrib += contrib;
1388 }
1389}
1390
8165e145
PT
1391static inline void __update_group_entity_contrib(struct sched_entity *se)
1392{
1393 struct cfs_rq *cfs_rq = group_cfs_rq(se);
1394 struct task_group *tg = cfs_rq->tg;
bb17f655
PT
1395 int runnable_avg;
1396
8165e145
PT
1397 u64 contrib;
1398
1399 contrib = cfs_rq->tg_load_contrib * tg->shares;
bf5b986e
AS
1400 se->avg.load_avg_contrib = div_u64(contrib,
1401 atomic_long_read(&tg->load_avg) + 1);
bb17f655
PT
1402
1403 /*
1404 * For group entities we need to compute a correction term in the case
1405 * that they are consuming <1 cpu so that we would contribute the same
1406 * load as a task of equal weight.
1407 *
1408 * Explicitly co-ordinating this measurement would be expensive, but
1409 * fortunately the sum of each cpus contribution forms a usable
1410 * lower-bound on the true value.
1411 *
1412 * Consider the aggregate of 2 contributions. Either they are disjoint
1413 * (and the sum represents true value) or they are disjoint and we are
1414 * understating by the aggregate of their overlap.
1415 *
1416 * Extending this to N cpus, for a given overlap, the maximum amount we
1417 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1418 * cpus that overlap for this interval and w_i is the interval width.
1419 *
1420 * On a small machine; the first term is well-bounded which bounds the
1421 * total error since w_i is a subset of the period. Whereas on a
1422 * larger machine, while this first term can be larger, if w_i is the
1423 * of consequential size guaranteed to see n_i*w_i quickly converge to
1424 * our upper bound of 1-cpu.
1425 */
1426 runnable_avg = atomic_read(&tg->runnable_avg);
1427 if (runnable_avg < NICE_0_LOAD) {
1428 se->avg.load_avg_contrib *= runnable_avg;
1429 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1430 }
8165e145 1431}
c566e8e9
PT
1432#else
1433static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1434 int force_update) {}
bb17f655
PT
1435static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1436 struct cfs_rq *cfs_rq) {}
8165e145 1437static inline void __update_group_entity_contrib(struct sched_entity *se) {}
c566e8e9
PT
1438#endif
1439
8165e145
PT
1440static inline void __update_task_entity_contrib(struct sched_entity *se)
1441{
1442 u32 contrib;
1443
1444 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1445 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1446 contrib /= (se->avg.runnable_avg_period + 1);
1447 se->avg.load_avg_contrib = scale_load(contrib);
1448}
1449
2dac754e
PT
1450/* Compute the current contribution to load_avg by se, return any delta */
1451static long __update_entity_load_avg_contrib(struct sched_entity *se)
1452{
1453 long old_contrib = se->avg.load_avg_contrib;
1454
8165e145
PT
1455 if (entity_is_task(se)) {
1456 __update_task_entity_contrib(se);
1457 } else {
bb17f655 1458 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
8165e145
PT
1459 __update_group_entity_contrib(se);
1460 }
2dac754e
PT
1461
1462 return se->avg.load_avg_contrib - old_contrib;
1463}
1464
9ee474f5
PT
1465static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1466 long load_contrib)
1467{
1468 if (likely(load_contrib < cfs_rq->blocked_load_avg))
1469 cfs_rq->blocked_load_avg -= load_contrib;
1470 else
1471 cfs_rq->blocked_load_avg = 0;
1472}
1473
f1b17280
PT
1474static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1475
9d85f21c 1476/* Update a sched_entity's runnable average */
9ee474f5
PT
1477static inline void update_entity_load_avg(struct sched_entity *se,
1478 int update_cfs_rq)
9d85f21c 1479{
2dac754e
PT
1480 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1481 long contrib_delta;
f1b17280 1482 u64 now;
2dac754e 1483
f1b17280
PT
1484 /*
1485 * For a group entity we need to use their owned cfs_rq_clock_task() in
1486 * case they are the parent of a throttled hierarchy.
1487 */
1488 if (entity_is_task(se))
1489 now = cfs_rq_clock_task(cfs_rq);
1490 else
1491 now = cfs_rq_clock_task(group_cfs_rq(se));
1492
1493 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
2dac754e
PT
1494 return;
1495
1496 contrib_delta = __update_entity_load_avg_contrib(se);
9ee474f5
PT
1497
1498 if (!update_cfs_rq)
1499 return;
1500
2dac754e
PT
1501 if (se->on_rq)
1502 cfs_rq->runnable_load_avg += contrib_delta;
9ee474f5
PT
1503 else
1504 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1505}
1506
1507/*
1508 * Decay the load contributed by all blocked children and account this so that
1509 * their contribution may appropriately discounted when they wake up.
1510 */
aff3e498 1511static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
9ee474f5 1512{
f1b17280 1513 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
9ee474f5
PT
1514 u64 decays;
1515
1516 decays = now - cfs_rq->last_decay;
aff3e498 1517 if (!decays && !force_update)
9ee474f5
PT
1518 return;
1519
2509940f
AS
1520 if (atomic_long_read(&cfs_rq->removed_load)) {
1521 unsigned long removed_load;
1522 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
aff3e498
PT
1523 subtract_blocked_load_contrib(cfs_rq, removed_load);
1524 }
9ee474f5 1525
aff3e498
PT
1526 if (decays) {
1527 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1528 decays);
1529 atomic64_add(decays, &cfs_rq->decay_counter);
1530 cfs_rq->last_decay = now;
1531 }
c566e8e9
PT
1532
1533 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
9d85f21c 1534}
18bf2805
BS
1535
1536static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1537{
78becc27 1538 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
bb17f655 1539 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
18bf2805 1540}
2dac754e
PT
1541
1542/* Add the load generated by se into cfs_rq's child load-average */
1543static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1544 struct sched_entity *se,
1545 int wakeup)
2dac754e 1546{
aff3e498
PT
1547 /*
1548 * We track migrations using entity decay_count <= 0, on a wake-up
1549 * migration we use a negative decay count to track the remote decays
1550 * accumulated while sleeping.
a75cdaa9
AS
1551 *
1552 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
1553 * are seen by enqueue_entity_load_avg() as a migration with an already
1554 * constructed load_avg_contrib.
aff3e498
PT
1555 */
1556 if (unlikely(se->avg.decay_count <= 0)) {
78becc27 1557 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
aff3e498
PT
1558 if (se->avg.decay_count) {
1559 /*
1560 * In a wake-up migration we have to approximate the
1561 * time sleeping. This is because we can't synchronize
1562 * clock_task between the two cpus, and it is not
1563 * guaranteed to be read-safe. Instead, we can
1564 * approximate this using our carried decays, which are
1565 * explicitly atomically readable.
1566 */
1567 se->avg.last_runnable_update -= (-se->avg.decay_count)
1568 << 20;
1569 update_entity_load_avg(se, 0);
1570 /* Indicate that we're now synchronized and on-rq */
1571 se->avg.decay_count = 0;
1572 }
9ee474f5
PT
1573 wakeup = 0;
1574 } else {
282cf499
AS
1575 /*
1576 * Task re-woke on same cpu (or else migrate_task_rq_fair()
1577 * would have made count negative); we must be careful to avoid
1578 * double-accounting blocked time after synchronizing decays.
1579 */
1580 se->avg.last_runnable_update += __synchronize_entity_decay(se)
1581 << 20;
9ee474f5
PT
1582 }
1583
aff3e498
PT
1584 /* migrated tasks did not contribute to our blocked load */
1585 if (wakeup) {
9ee474f5 1586 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
aff3e498
PT
1587 update_entity_load_avg(se, 0);
1588 }
9ee474f5 1589
2dac754e 1590 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
aff3e498
PT
1591 /* we force update consideration on load-balancer moves */
1592 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
2dac754e
PT
1593}
1594
9ee474f5
PT
1595/*
1596 * Remove se's load from this cfs_rq child load-average, if the entity is
1597 * transitioning to a blocked state we track its projected decay using
1598 * blocked_load_avg.
1599 */
2dac754e 1600static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1601 struct sched_entity *se,
1602 int sleep)
2dac754e 1603{
9ee474f5 1604 update_entity_load_avg(se, 1);
aff3e498
PT
1605 /* we force update consideration on load-balancer moves */
1606 update_cfs_rq_blocked_load(cfs_rq, !sleep);
9ee474f5 1607
2dac754e 1608 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
9ee474f5
PT
1609 if (sleep) {
1610 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1611 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1612 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
2dac754e 1613}
642dbc39
VG
1614
1615/*
1616 * Update the rq's load with the elapsed running time before entering
1617 * idle. if the last scheduled task is not a CFS task, idle_enter will
1618 * be the only way to update the runnable statistic.
1619 */
1620void idle_enter_fair(struct rq *this_rq)
1621{
1622 update_rq_runnable_avg(this_rq, 1);
1623}
1624
1625/*
1626 * Update the rq's load with the elapsed idle time before a task is
1627 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
1628 * be the only way to update the runnable statistic.
1629 */
1630void idle_exit_fair(struct rq *this_rq)
1631{
1632 update_rq_runnable_avg(this_rq, 0);
1633}
1634
9d85f21c 1635#else
9ee474f5
PT
1636static inline void update_entity_load_avg(struct sched_entity *se,
1637 int update_cfs_rq) {}
18bf2805 1638static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2dac754e 1639static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1640 struct sched_entity *se,
1641 int wakeup) {}
2dac754e 1642static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1643 struct sched_entity *se,
1644 int sleep) {}
aff3e498
PT
1645static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1646 int force_update) {}
9d85f21c
PT
1647#endif
1648
2396af69 1649static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 1650{
bf0f6f24 1651#ifdef CONFIG_SCHEDSTATS
e414314c
PZ
1652 struct task_struct *tsk = NULL;
1653
1654 if (entity_is_task(se))
1655 tsk = task_of(se);
1656
41acab88 1657 if (se->statistics.sleep_start) {
78becc27 1658 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
bf0f6f24
IM
1659
1660 if ((s64)delta < 0)
1661 delta = 0;
1662
41acab88
LDM
1663 if (unlikely(delta > se->statistics.sleep_max))
1664 se->statistics.sleep_max = delta;
bf0f6f24 1665
8c79a045 1666 se->statistics.sleep_start = 0;
41acab88 1667 se->statistics.sum_sleep_runtime += delta;
9745512c 1668
768d0c27 1669 if (tsk) {
e414314c 1670 account_scheduler_latency(tsk, delta >> 10, 1);
768d0c27
PZ
1671 trace_sched_stat_sleep(tsk, delta);
1672 }
bf0f6f24 1673 }
41acab88 1674 if (se->statistics.block_start) {
78becc27 1675 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
bf0f6f24
IM
1676
1677 if ((s64)delta < 0)
1678 delta = 0;
1679
41acab88
LDM
1680 if (unlikely(delta > se->statistics.block_max))
1681 se->statistics.block_max = delta;
bf0f6f24 1682
8c79a045 1683 se->statistics.block_start = 0;
41acab88 1684 se->statistics.sum_sleep_runtime += delta;
30084fbd 1685
e414314c 1686 if (tsk) {
8f0dfc34 1687 if (tsk->in_iowait) {
41acab88
LDM
1688 se->statistics.iowait_sum += delta;
1689 se->statistics.iowait_count++;
768d0c27 1690 trace_sched_stat_iowait(tsk, delta);
8f0dfc34
AV
1691 }
1692
b781a602
AV
1693 trace_sched_stat_blocked(tsk, delta);
1694
e414314c
PZ
1695 /*
1696 * Blocking time is in units of nanosecs, so shift by
1697 * 20 to get a milliseconds-range estimation of the
1698 * amount of time that the task spent sleeping:
1699 */
1700 if (unlikely(prof_on == SLEEP_PROFILING)) {
1701 profile_hits(SLEEP_PROFILING,
1702 (void *)get_wchan(tsk),
1703 delta >> 20);
1704 }
1705 account_scheduler_latency(tsk, delta >> 10, 0);
30084fbd 1706 }
bf0f6f24
IM
1707 }
1708#endif
1709}
1710
ddc97297
PZ
1711static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1712{
1713#ifdef CONFIG_SCHED_DEBUG
1714 s64 d = se->vruntime - cfs_rq->min_vruntime;
1715
1716 if (d < 0)
1717 d = -d;
1718
1719 if (d > 3*sysctl_sched_latency)
1720 schedstat_inc(cfs_rq, nr_spread_over);
1721#endif
1722}
1723
aeb73b04
PZ
1724static void
1725place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1726{
1af5f730 1727 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 1728
2cb8600e
PZ
1729 /*
1730 * The 'current' period is already promised to the current tasks,
1731 * however the extra weight of the new task will slow them down a
1732 * little, place the new task so that it fits in the slot that
1733 * stays open at the end.
1734 */
94dfb5e7 1735 if (initial && sched_feat(START_DEBIT))
f9c0b095 1736 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 1737
a2e7a7eb 1738 /* sleeps up to a single latency don't count. */
5ca9880c 1739 if (!initial) {
a2e7a7eb 1740 unsigned long thresh = sysctl_sched_latency;
a7be37ac 1741
a2e7a7eb
MG
1742 /*
1743 * Halve their sleep time's effect, to allow
1744 * for a gentler effect of sleepers:
1745 */
1746 if (sched_feat(GENTLE_FAIR_SLEEPERS))
1747 thresh >>= 1;
51e0304c 1748
a2e7a7eb 1749 vruntime -= thresh;
aeb73b04
PZ
1750 }
1751
b5d9d734 1752 /* ensure we never gain time by being placed backwards. */
16c8f1c7 1753 se->vruntime = max_vruntime(se->vruntime, vruntime);
aeb73b04
PZ
1754}
1755
d3d9dc33
PT
1756static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1757
bf0f6f24 1758static void
88ec22d3 1759enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 1760{
88ec22d3
PZ
1761 /*
1762 * Update the normalized vruntime before updating min_vruntime
0fc576d5 1763 * through calling update_curr().
88ec22d3 1764 */
371fd7e7 1765 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
88ec22d3
PZ
1766 se->vruntime += cfs_rq->min_vruntime;
1767
bf0f6f24 1768 /*
a2a2d680 1769 * Update run-time statistics of the 'current'.
bf0f6f24 1770 */
b7cc0896 1771 update_curr(cfs_rq);
f269ae04 1772 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
17bc14b7
LT
1773 account_entity_enqueue(cfs_rq, se);
1774 update_cfs_shares(cfs_rq);
bf0f6f24 1775
88ec22d3 1776 if (flags & ENQUEUE_WAKEUP) {
aeb73b04 1777 place_entity(cfs_rq, se, 0);
2396af69 1778 enqueue_sleeper(cfs_rq, se);
e9acbff6 1779 }
bf0f6f24 1780
d2417e5a 1781 update_stats_enqueue(cfs_rq, se);
ddc97297 1782 check_spread(cfs_rq, se);
83b699ed
SV
1783 if (se != cfs_rq->curr)
1784 __enqueue_entity(cfs_rq, se);
2069dd75 1785 se->on_rq = 1;
3d4b47b4 1786
d3d9dc33 1787 if (cfs_rq->nr_running == 1) {
3d4b47b4 1788 list_add_leaf_cfs_rq(cfs_rq);
d3d9dc33
PT
1789 check_enqueue_throttle(cfs_rq);
1790 }
bf0f6f24
IM
1791}
1792
2c13c919 1793static void __clear_buddies_last(struct sched_entity *se)
2002c695 1794{
2c13c919
RR
1795 for_each_sched_entity(se) {
1796 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1797 if (cfs_rq->last == se)
1798 cfs_rq->last = NULL;
1799 else
1800 break;
1801 }
1802}
2002c695 1803
2c13c919
RR
1804static void __clear_buddies_next(struct sched_entity *se)
1805{
1806 for_each_sched_entity(se) {
1807 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1808 if (cfs_rq->next == se)
1809 cfs_rq->next = NULL;
1810 else
1811 break;
1812 }
2002c695
PZ
1813}
1814
ac53db59
RR
1815static void __clear_buddies_skip(struct sched_entity *se)
1816{
1817 for_each_sched_entity(se) {
1818 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1819 if (cfs_rq->skip == se)
1820 cfs_rq->skip = NULL;
1821 else
1822 break;
1823 }
1824}
1825
a571bbea
PZ
1826static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1827{
2c13c919
RR
1828 if (cfs_rq->last == se)
1829 __clear_buddies_last(se);
1830
1831 if (cfs_rq->next == se)
1832 __clear_buddies_next(se);
ac53db59
RR
1833
1834 if (cfs_rq->skip == se)
1835 __clear_buddies_skip(se);
a571bbea
PZ
1836}
1837
6c16a6dc 1838static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d8b4986d 1839
bf0f6f24 1840static void
371fd7e7 1841dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 1842{
a2a2d680
DA
1843 /*
1844 * Update run-time statistics of the 'current'.
1845 */
1846 update_curr(cfs_rq);
17bc14b7 1847 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
a2a2d680 1848
19b6a2e3 1849 update_stats_dequeue(cfs_rq, se);
371fd7e7 1850 if (flags & DEQUEUE_SLEEP) {
67e9fb2a 1851#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
1852 if (entity_is_task(se)) {
1853 struct task_struct *tsk = task_of(se);
1854
1855 if (tsk->state & TASK_INTERRUPTIBLE)
78becc27 1856 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
bf0f6f24 1857 if (tsk->state & TASK_UNINTERRUPTIBLE)
78becc27 1858 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
bf0f6f24 1859 }
db36cc7d 1860#endif
67e9fb2a
PZ
1861 }
1862
2002c695 1863 clear_buddies(cfs_rq, se);
4793241b 1864
83b699ed 1865 if (se != cfs_rq->curr)
30cfdcfc 1866 __dequeue_entity(cfs_rq, se);
17bc14b7 1867 se->on_rq = 0;
30cfdcfc 1868 account_entity_dequeue(cfs_rq, se);
88ec22d3
PZ
1869
1870 /*
1871 * Normalize the entity after updating the min_vruntime because the
1872 * update can refer to the ->curr item and we need to reflect this
1873 * movement in our normalized position.
1874 */
371fd7e7 1875 if (!(flags & DEQUEUE_SLEEP))
88ec22d3 1876 se->vruntime -= cfs_rq->min_vruntime;
1e876231 1877
d8b4986d
PT
1878 /* return excess runtime on last dequeue */
1879 return_cfs_rq_runtime(cfs_rq);
1880
1e876231 1881 update_min_vruntime(cfs_rq);
17bc14b7 1882 update_cfs_shares(cfs_rq);
bf0f6f24
IM
1883}
1884
1885/*
1886 * Preempt the current task with a newly woken task if needed:
1887 */
7c92e54f 1888static void
2e09bf55 1889check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 1890{
11697830 1891 unsigned long ideal_runtime, delta_exec;
f4cfb33e
WX
1892 struct sched_entity *se;
1893 s64 delta;
11697830 1894
6d0f0ebd 1895 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 1896 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 1897 if (delta_exec > ideal_runtime) {
bf0f6f24 1898 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5
MG
1899 /*
1900 * The current task ran long enough, ensure it doesn't get
1901 * re-elected due to buddy favours.
1902 */
1903 clear_buddies(cfs_rq, curr);
f685ceac
MG
1904 return;
1905 }
1906
1907 /*
1908 * Ensure that a task that missed wakeup preemption by a
1909 * narrow margin doesn't have to wait for a full slice.
1910 * This also mitigates buddy induced latencies under load.
1911 */
f685ceac
MG
1912 if (delta_exec < sysctl_sched_min_granularity)
1913 return;
1914
f4cfb33e
WX
1915 se = __pick_first_entity(cfs_rq);
1916 delta = curr->vruntime - se->vruntime;
f685ceac 1917
f4cfb33e
WX
1918 if (delta < 0)
1919 return;
d7d82944 1920
f4cfb33e
WX
1921 if (delta > ideal_runtime)
1922 resched_task(rq_of(cfs_rq)->curr);
bf0f6f24
IM
1923}
1924
83b699ed 1925static void
8494f412 1926set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 1927{
83b699ed
SV
1928 /* 'current' is not kept within the tree. */
1929 if (se->on_rq) {
1930 /*
1931 * Any task has to be enqueued before it get to execute on
1932 * a CPU. So account for the time it spent waiting on the
1933 * runqueue.
1934 */
1935 update_stats_wait_end(cfs_rq, se);
1936 __dequeue_entity(cfs_rq, se);
1937 }
1938
79303e9e 1939 update_stats_curr_start(cfs_rq, se);
429d43bc 1940 cfs_rq->curr = se;
eba1ed4b
IM
1941#ifdef CONFIG_SCHEDSTATS
1942 /*
1943 * Track our maximum slice length, if the CPU's load is at
1944 * least twice that of our own weight (i.e. dont track it
1945 * when there are only lesser-weight tasks around):
1946 */
495eca49 1947 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
41acab88 1948 se->statistics.slice_max = max(se->statistics.slice_max,
eba1ed4b
IM
1949 se->sum_exec_runtime - se->prev_sum_exec_runtime);
1950 }
1951#endif
4a55b450 1952 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
1953}
1954
3f3a4904
PZ
1955static int
1956wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1957
ac53db59
RR
1958/*
1959 * Pick the next process, keeping these things in mind, in this order:
1960 * 1) keep things fair between processes/task groups
1961 * 2) pick the "next" process, since someone really wants that to run
1962 * 3) pick the "last" process, for cache locality
1963 * 4) do not run the "skip" process, if something else is available
1964 */
f4b6755f 1965static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
aa2ac252 1966{
ac53db59 1967 struct sched_entity *se = __pick_first_entity(cfs_rq);
f685ceac 1968 struct sched_entity *left = se;
f4b6755f 1969
ac53db59
RR
1970 /*
1971 * Avoid running the skip buddy, if running something else can
1972 * be done without getting too unfair.
1973 */
1974 if (cfs_rq->skip == se) {
1975 struct sched_entity *second = __pick_next_entity(se);
1976 if (second && wakeup_preempt_entity(second, left) < 1)
1977 se = second;
1978 }
aa2ac252 1979
f685ceac
MG
1980 /*
1981 * Prefer last buddy, try to return the CPU to a preempted task.
1982 */
1983 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1984 se = cfs_rq->last;
1985
ac53db59
RR
1986 /*
1987 * Someone really wants this to run. If it's not unfair, run it.
1988 */
1989 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1990 se = cfs_rq->next;
1991
f685ceac 1992 clear_buddies(cfs_rq, se);
4793241b
PZ
1993
1994 return se;
aa2ac252
PZ
1995}
1996
d3d9dc33
PT
1997static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1998
ab6cde26 1999static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
2000{
2001 /*
2002 * If still on the runqueue then deactivate_task()
2003 * was not called and update_curr() has to be done:
2004 */
2005 if (prev->on_rq)
b7cc0896 2006 update_curr(cfs_rq);
bf0f6f24 2007
d3d9dc33
PT
2008 /* throttle cfs_rqs exceeding runtime */
2009 check_cfs_rq_runtime(cfs_rq);
2010
ddc97297 2011 check_spread(cfs_rq, prev);
30cfdcfc 2012 if (prev->on_rq) {
5870db5b 2013 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
2014 /* Put 'current' back into the tree. */
2015 __enqueue_entity(cfs_rq, prev);
9d85f21c 2016 /* in !on_rq case, update occurred at dequeue */
9ee474f5 2017 update_entity_load_avg(prev, 1);
30cfdcfc 2018 }
429d43bc 2019 cfs_rq->curr = NULL;
bf0f6f24
IM
2020}
2021
8f4d37ec
PZ
2022static void
2023entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 2024{
bf0f6f24 2025 /*
30cfdcfc 2026 * Update run-time statistics of the 'current'.
bf0f6f24 2027 */
30cfdcfc 2028 update_curr(cfs_rq);
bf0f6f24 2029
9d85f21c
PT
2030 /*
2031 * Ensure that runnable average is periodically updated.
2032 */
9ee474f5 2033 update_entity_load_avg(curr, 1);
aff3e498 2034 update_cfs_rq_blocked_load(cfs_rq, 1);
bf0bd948 2035 update_cfs_shares(cfs_rq);
9d85f21c 2036
8f4d37ec
PZ
2037#ifdef CONFIG_SCHED_HRTICK
2038 /*
2039 * queued ticks are scheduled to match the slice, so don't bother
2040 * validating it and just reschedule.
2041 */
983ed7a6
HH
2042 if (queued) {
2043 resched_task(rq_of(cfs_rq)->curr);
2044 return;
2045 }
8f4d37ec
PZ
2046 /*
2047 * don't let the period tick interfere with the hrtick preemption
2048 */
2049 if (!sched_feat(DOUBLE_TICK) &&
2050 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2051 return;
2052#endif
2053
2c2efaed 2054 if (cfs_rq->nr_running > 1)
2e09bf55 2055 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
2056}
2057
ab84d31e
PT
2058
2059/**************************************************
2060 * CFS bandwidth control machinery
2061 */
2062
2063#ifdef CONFIG_CFS_BANDWIDTH
029632fb
PZ
2064
2065#ifdef HAVE_JUMP_LABEL
c5905afb 2066static struct static_key __cfs_bandwidth_used;
029632fb
PZ
2067
2068static inline bool cfs_bandwidth_used(void)
2069{
c5905afb 2070 return static_key_false(&__cfs_bandwidth_used);
029632fb
PZ
2071}
2072
2073void account_cfs_bandwidth_used(int enabled, int was_enabled)
2074{
2075 /* only need to count groups transitioning between enabled/!enabled */
2076 if (enabled && !was_enabled)
c5905afb 2077 static_key_slow_inc(&__cfs_bandwidth_used);
029632fb 2078 else if (!enabled && was_enabled)
c5905afb 2079 static_key_slow_dec(&__cfs_bandwidth_used);
029632fb
PZ
2080}
2081#else /* HAVE_JUMP_LABEL */
2082static bool cfs_bandwidth_used(void)
2083{
2084 return true;
2085}
2086
2087void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2088#endif /* HAVE_JUMP_LABEL */
2089
ab84d31e
PT
2090/*
2091 * default period for cfs group bandwidth.
2092 * default: 0.1s, units: nanoseconds
2093 */
2094static inline u64 default_cfs_period(void)
2095{
2096 return 100000000ULL;
2097}
ec12cb7f
PT
2098
2099static inline u64 sched_cfs_bandwidth_slice(void)
2100{
2101 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2102}
2103
a9cf55b2
PT
2104/*
2105 * Replenish runtime according to assigned quota and update expiration time.
2106 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2107 * additional synchronization around rq->lock.
2108 *
2109 * requires cfs_b->lock
2110 */
029632fb 2111void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
a9cf55b2
PT
2112{
2113 u64 now;
2114
2115 if (cfs_b->quota == RUNTIME_INF)
2116 return;
2117
2118 now = sched_clock_cpu(smp_processor_id());
2119 cfs_b->runtime = cfs_b->quota;
2120 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2121}
2122
029632fb
PZ
2123static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2124{
2125 return &tg->cfs_bandwidth;
2126}
2127
f1b17280
PT
2128/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2129static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2130{
2131 if (unlikely(cfs_rq->throttle_count))
2132 return cfs_rq->throttled_clock_task;
2133
78becc27 2134 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
f1b17280
PT
2135}
2136
85dac906
PT
2137/* returns 0 on failure to allocate runtime */
2138static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f
PT
2139{
2140 struct task_group *tg = cfs_rq->tg;
2141 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
a9cf55b2 2142 u64 amount = 0, min_amount, expires;
ec12cb7f
PT
2143
2144 /* note: this is a positive sum as runtime_remaining <= 0 */
2145 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2146
2147 raw_spin_lock(&cfs_b->lock);
2148 if (cfs_b->quota == RUNTIME_INF)
2149 amount = min_amount;
58088ad0 2150 else {
a9cf55b2
PT
2151 /*
2152 * If the bandwidth pool has become inactive, then at least one
2153 * period must have elapsed since the last consumption.
2154 * Refresh the global state and ensure bandwidth timer becomes
2155 * active.
2156 */
2157 if (!cfs_b->timer_active) {
2158 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0 2159 __start_cfs_bandwidth(cfs_b);
a9cf55b2 2160 }
58088ad0
PT
2161
2162 if (cfs_b->runtime > 0) {
2163 amount = min(cfs_b->runtime, min_amount);
2164 cfs_b->runtime -= amount;
2165 cfs_b->idle = 0;
2166 }
ec12cb7f 2167 }
a9cf55b2 2168 expires = cfs_b->runtime_expires;
ec12cb7f
PT
2169 raw_spin_unlock(&cfs_b->lock);
2170
2171 cfs_rq->runtime_remaining += amount;
a9cf55b2
PT
2172 /*
2173 * we may have advanced our local expiration to account for allowed
2174 * spread between our sched_clock and the one on which runtime was
2175 * issued.
2176 */
2177 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2178 cfs_rq->runtime_expires = expires;
85dac906
PT
2179
2180 return cfs_rq->runtime_remaining > 0;
ec12cb7f
PT
2181}
2182
a9cf55b2
PT
2183/*
2184 * Note: This depends on the synchronization provided by sched_clock and the
2185 * fact that rq->clock snapshots this value.
2186 */
2187static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f 2188{
a9cf55b2 2189 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
a9cf55b2
PT
2190
2191 /* if the deadline is ahead of our clock, nothing to do */
78becc27 2192 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
ec12cb7f
PT
2193 return;
2194
a9cf55b2
PT
2195 if (cfs_rq->runtime_remaining < 0)
2196 return;
2197
2198 /*
2199 * If the local deadline has passed we have to consider the
2200 * possibility that our sched_clock is 'fast' and the global deadline
2201 * has not truly expired.
2202 *
2203 * Fortunately we can check determine whether this the case by checking
2204 * whether the global deadline has advanced.
2205 */
2206
2207 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2208 /* extend local deadline, drift is bounded above by 2 ticks */
2209 cfs_rq->runtime_expires += TICK_NSEC;
2210 } else {
2211 /* global deadline is ahead, expiration has passed */
2212 cfs_rq->runtime_remaining = 0;
2213 }
2214}
2215
2216static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2217 unsigned long delta_exec)
2218{
2219 /* dock delta_exec before expiring quota (as it could span periods) */
ec12cb7f 2220 cfs_rq->runtime_remaining -= delta_exec;
a9cf55b2
PT
2221 expire_cfs_rq_runtime(cfs_rq);
2222
2223 if (likely(cfs_rq->runtime_remaining > 0))
ec12cb7f
PT
2224 return;
2225
85dac906
PT
2226 /*
2227 * if we're unable to extend our runtime we resched so that the active
2228 * hierarchy can be throttled
2229 */
2230 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2231 resched_task(rq_of(cfs_rq)->curr);
ec12cb7f
PT
2232}
2233
6c16a6dc
PZ
2234static __always_inline
2235void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
ec12cb7f 2236{
56f570e5 2237 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
ec12cb7f
PT
2238 return;
2239
2240 __account_cfs_rq_runtime(cfs_rq, delta_exec);
2241}
2242
85dac906
PT
2243static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2244{
56f570e5 2245 return cfs_bandwidth_used() && cfs_rq->throttled;
85dac906
PT
2246}
2247
64660c86
PT
2248/* check whether cfs_rq, or any parent, is throttled */
2249static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2250{
56f570e5 2251 return cfs_bandwidth_used() && cfs_rq->throttle_count;
64660c86
PT
2252}
2253
2254/*
2255 * Ensure that neither of the group entities corresponding to src_cpu or
2256 * dest_cpu are members of a throttled hierarchy when performing group
2257 * load-balance operations.
2258 */
2259static inline int throttled_lb_pair(struct task_group *tg,
2260 int src_cpu, int dest_cpu)
2261{
2262 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2263
2264 src_cfs_rq = tg->cfs_rq[src_cpu];
2265 dest_cfs_rq = tg->cfs_rq[dest_cpu];
2266
2267 return throttled_hierarchy(src_cfs_rq) ||
2268 throttled_hierarchy(dest_cfs_rq);
2269}
2270
2271/* updated child weight may affect parent so we have to do this bottom up */
2272static int tg_unthrottle_up(struct task_group *tg, void *data)
2273{
2274 struct rq *rq = data;
2275 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2276
2277 cfs_rq->throttle_count--;
2278#ifdef CONFIG_SMP
2279 if (!cfs_rq->throttle_count) {
f1b17280 2280 /* adjust cfs_rq_clock_task() */
78becc27 2281 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
f1b17280 2282 cfs_rq->throttled_clock_task;
64660c86
PT
2283 }
2284#endif
2285
2286 return 0;
2287}
2288
2289static int tg_throttle_down(struct task_group *tg, void *data)
2290{
2291 struct rq *rq = data;
2292 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2293
82958366
PT
2294 /* group is entering throttled state, stop time */
2295 if (!cfs_rq->throttle_count)
78becc27 2296 cfs_rq->throttled_clock_task = rq_clock_task(rq);
64660c86
PT
2297 cfs_rq->throttle_count++;
2298
2299 return 0;
2300}
2301
d3d9dc33 2302static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
85dac906
PT
2303{
2304 struct rq *rq = rq_of(cfs_rq);
2305 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2306 struct sched_entity *se;
2307 long task_delta, dequeue = 1;
2308
2309 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2310
f1b17280 2311 /* freeze hierarchy runnable averages while throttled */
64660c86
PT
2312 rcu_read_lock();
2313 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2314 rcu_read_unlock();
85dac906
PT
2315
2316 task_delta = cfs_rq->h_nr_running;
2317 for_each_sched_entity(se) {
2318 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2319 /* throttled entity or throttle-on-deactivate */
2320 if (!se->on_rq)
2321 break;
2322
2323 if (dequeue)
2324 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2325 qcfs_rq->h_nr_running -= task_delta;
2326
2327 if (qcfs_rq->load.weight)
2328 dequeue = 0;
2329 }
2330
2331 if (!se)
2332 rq->nr_running -= task_delta;
2333
2334 cfs_rq->throttled = 1;
78becc27 2335 cfs_rq->throttled_clock = rq_clock(rq);
85dac906
PT
2336 raw_spin_lock(&cfs_b->lock);
2337 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2338 raw_spin_unlock(&cfs_b->lock);
2339}
2340
029632fb 2341void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
671fd9da
PT
2342{
2343 struct rq *rq = rq_of(cfs_rq);
2344 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2345 struct sched_entity *se;
2346 int enqueue = 1;
2347 long task_delta;
2348
22b958d8 2349 se = cfs_rq->tg->se[cpu_of(rq)];
671fd9da
PT
2350
2351 cfs_rq->throttled = 0;
1a55af2e
FW
2352
2353 update_rq_clock(rq);
2354
671fd9da 2355 raw_spin_lock(&cfs_b->lock);
78becc27 2356 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
671fd9da
PT
2357 list_del_rcu(&cfs_rq->throttled_list);
2358 raw_spin_unlock(&cfs_b->lock);
2359
64660c86
PT
2360 /* update hierarchical throttle state */
2361 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2362
671fd9da
PT
2363 if (!cfs_rq->load.weight)
2364 return;
2365
2366 task_delta = cfs_rq->h_nr_running;
2367 for_each_sched_entity(se) {
2368 if (se->on_rq)
2369 enqueue = 0;
2370
2371 cfs_rq = cfs_rq_of(se);
2372 if (enqueue)
2373 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2374 cfs_rq->h_nr_running += task_delta;
2375
2376 if (cfs_rq_throttled(cfs_rq))
2377 break;
2378 }
2379
2380 if (!se)
2381 rq->nr_running += task_delta;
2382
2383 /* determine whether we need to wake up potentially idle cpu */
2384 if (rq->curr == rq->idle && rq->cfs.nr_running)
2385 resched_task(rq->curr);
2386}
2387
2388static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2389 u64 remaining, u64 expires)
2390{
2391 struct cfs_rq *cfs_rq;
2392 u64 runtime = remaining;
2393
2394 rcu_read_lock();
2395 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2396 throttled_list) {
2397 struct rq *rq = rq_of(cfs_rq);
2398
2399 raw_spin_lock(&rq->lock);
2400 if (!cfs_rq_throttled(cfs_rq))
2401 goto next;
2402
2403 runtime = -cfs_rq->runtime_remaining + 1;
2404 if (runtime > remaining)
2405 runtime = remaining;
2406 remaining -= runtime;
2407
2408 cfs_rq->runtime_remaining += runtime;
2409 cfs_rq->runtime_expires = expires;
2410
2411 /* we check whether we're throttled above */
2412 if (cfs_rq->runtime_remaining > 0)
2413 unthrottle_cfs_rq(cfs_rq);
2414
2415next:
2416 raw_spin_unlock(&rq->lock);
2417
2418 if (!remaining)
2419 break;
2420 }
2421 rcu_read_unlock();
2422
2423 return remaining;
2424}
2425
58088ad0
PT
2426/*
2427 * Responsible for refilling a task_group's bandwidth and unthrottling its
2428 * cfs_rqs as appropriate. If there has been no activity within the last
2429 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2430 * used to track this state.
2431 */
2432static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2433{
671fd9da
PT
2434 u64 runtime, runtime_expires;
2435 int idle = 1, throttled;
58088ad0
PT
2436
2437 raw_spin_lock(&cfs_b->lock);
2438 /* no need to continue the timer with no bandwidth constraint */
2439 if (cfs_b->quota == RUNTIME_INF)
2440 goto out_unlock;
2441
671fd9da
PT
2442 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2443 /* idle depends on !throttled (for the case of a large deficit) */
2444 idle = cfs_b->idle && !throttled;
e8da1b18 2445 cfs_b->nr_periods += overrun;
671fd9da 2446
a9cf55b2
PT
2447 /* if we're going inactive then everything else can be deferred */
2448 if (idle)
2449 goto out_unlock;
2450
2451 __refill_cfs_bandwidth_runtime(cfs_b);
2452
671fd9da
PT
2453 if (!throttled) {
2454 /* mark as potentially idle for the upcoming period */
2455 cfs_b->idle = 1;
2456 goto out_unlock;
2457 }
2458
e8da1b18
NR
2459 /* account preceding periods in which throttling occurred */
2460 cfs_b->nr_throttled += overrun;
2461
671fd9da
PT
2462 /*
2463 * There are throttled entities so we must first use the new bandwidth
2464 * to unthrottle them before making it generally available. This
2465 * ensures that all existing debts will be paid before a new cfs_rq is
2466 * allowed to run.
2467 */
2468 runtime = cfs_b->runtime;
2469 runtime_expires = cfs_b->runtime_expires;
2470 cfs_b->runtime = 0;
2471
2472 /*
2473 * This check is repeated as we are holding onto the new bandwidth
2474 * while we unthrottle. This can potentially race with an unthrottled
2475 * group trying to acquire new bandwidth from the global pool.
2476 */
2477 while (throttled && runtime > 0) {
2478 raw_spin_unlock(&cfs_b->lock);
2479 /* we can't nest cfs_b->lock while distributing bandwidth */
2480 runtime = distribute_cfs_runtime(cfs_b, runtime,
2481 runtime_expires);
2482 raw_spin_lock(&cfs_b->lock);
2483
2484 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2485 }
58088ad0 2486
671fd9da
PT
2487 /* return (any) remaining runtime */
2488 cfs_b->runtime = runtime;
2489 /*
2490 * While we are ensured activity in the period following an
2491 * unthrottle, this also covers the case in which the new bandwidth is
2492 * insufficient to cover the existing bandwidth deficit. (Forcing the
2493 * timer to remain active while there are any throttled entities.)
2494 */
2495 cfs_b->idle = 0;
58088ad0
PT
2496out_unlock:
2497 if (idle)
2498 cfs_b->timer_active = 0;
2499 raw_spin_unlock(&cfs_b->lock);
2500
2501 return idle;
2502}
d3d9dc33 2503
d8b4986d
PT
2504/* a cfs_rq won't donate quota below this amount */
2505static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2506/* minimum remaining period time to redistribute slack quota */
2507static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2508/* how long we wait to gather additional slack before distributing */
2509static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2510
2511/* are we near the end of the current quota period? */
2512static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2513{
2514 struct hrtimer *refresh_timer = &cfs_b->period_timer;
2515 u64 remaining;
2516
2517 /* if the call-back is running a quota refresh is already occurring */
2518 if (hrtimer_callback_running(refresh_timer))
2519 return 1;
2520
2521 /* is a quota refresh about to occur? */
2522 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2523 if (remaining < min_expire)
2524 return 1;
2525
2526 return 0;
2527}
2528
2529static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2530{
2531 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2532
2533 /* if there's a quota refresh soon don't bother with slack */
2534 if (runtime_refresh_within(cfs_b, min_left))
2535 return;
2536
2537 start_bandwidth_timer(&cfs_b->slack_timer,
2538 ns_to_ktime(cfs_bandwidth_slack_period));
2539}
2540
2541/* we know any runtime found here is valid as update_curr() precedes return */
2542static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2543{
2544 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2545 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2546
2547 if (slack_runtime <= 0)
2548 return;
2549
2550 raw_spin_lock(&cfs_b->lock);
2551 if (cfs_b->quota != RUNTIME_INF &&
2552 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2553 cfs_b->runtime += slack_runtime;
2554
2555 /* we are under rq->lock, defer unthrottling using a timer */
2556 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2557 !list_empty(&cfs_b->throttled_cfs_rq))
2558 start_cfs_slack_bandwidth(cfs_b);
2559 }
2560 raw_spin_unlock(&cfs_b->lock);
2561
2562 /* even if it's not valid for return we don't want to try again */
2563 cfs_rq->runtime_remaining -= slack_runtime;
2564}
2565
2566static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2567{
56f570e5
PT
2568 if (!cfs_bandwidth_used())
2569 return;
2570
fccfdc6f 2571 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
d8b4986d
PT
2572 return;
2573
2574 __return_cfs_rq_runtime(cfs_rq);
2575}
2576
2577/*
2578 * This is done with a timer (instead of inline with bandwidth return) since
2579 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2580 */
2581static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2582{
2583 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2584 u64 expires;
2585
2586 /* confirm we're still not at a refresh boundary */
2587 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2588 return;
2589
2590 raw_spin_lock(&cfs_b->lock);
2591 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2592 runtime = cfs_b->runtime;
2593 cfs_b->runtime = 0;
2594 }
2595 expires = cfs_b->runtime_expires;
2596 raw_spin_unlock(&cfs_b->lock);
2597
2598 if (!runtime)
2599 return;
2600
2601 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2602
2603 raw_spin_lock(&cfs_b->lock);
2604 if (expires == cfs_b->runtime_expires)
2605 cfs_b->runtime = runtime;
2606 raw_spin_unlock(&cfs_b->lock);
2607}
2608
d3d9dc33
PT
2609/*
2610 * When a group wakes up we want to make sure that its quota is not already
2611 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2612 * runtime as update_curr() throttling can not not trigger until it's on-rq.
2613 */
2614static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2615{
56f570e5
PT
2616 if (!cfs_bandwidth_used())
2617 return;
2618
d3d9dc33
PT
2619 /* an active group must be handled by the update_curr()->put() path */
2620 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2621 return;
2622
2623 /* ensure the group is not already throttled */
2624 if (cfs_rq_throttled(cfs_rq))
2625 return;
2626
2627 /* update runtime allocation */
2628 account_cfs_rq_runtime(cfs_rq, 0);
2629 if (cfs_rq->runtime_remaining <= 0)
2630 throttle_cfs_rq(cfs_rq);
2631}
2632
2633/* conditionally throttle active cfs_rq's from put_prev_entity() */
2634static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2635{
56f570e5
PT
2636 if (!cfs_bandwidth_used())
2637 return;
2638
d3d9dc33
PT
2639 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2640 return;
2641
2642 /*
2643 * it's possible for a throttled entity to be forced into a running
2644 * state (e.g. set_curr_task), in this case we're finished.
2645 */
2646 if (cfs_rq_throttled(cfs_rq))
2647 return;
2648
2649 throttle_cfs_rq(cfs_rq);
2650}
029632fb 2651
029632fb
PZ
2652static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2653{
2654 struct cfs_bandwidth *cfs_b =
2655 container_of(timer, struct cfs_bandwidth, slack_timer);
2656 do_sched_cfs_slack_timer(cfs_b);
2657
2658 return HRTIMER_NORESTART;
2659}
2660
2661static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2662{
2663 struct cfs_bandwidth *cfs_b =
2664 container_of(timer, struct cfs_bandwidth, period_timer);
2665 ktime_t now;
2666 int overrun;
2667 int idle = 0;
2668
2669 for (;;) {
2670 now = hrtimer_cb_get_time(timer);
2671 overrun = hrtimer_forward(timer, now, cfs_b->period);
2672
2673 if (!overrun)
2674 break;
2675
2676 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2677 }
2678
2679 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2680}
2681
2682void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2683{
2684 raw_spin_lock_init(&cfs_b->lock);
2685 cfs_b->runtime = 0;
2686 cfs_b->quota = RUNTIME_INF;
2687 cfs_b->period = ns_to_ktime(default_cfs_period());
2688
2689 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2690 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2691 cfs_b->period_timer.function = sched_cfs_period_timer;
2692 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2693 cfs_b->slack_timer.function = sched_cfs_slack_timer;
2694}
2695
2696static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2697{
2698 cfs_rq->runtime_enabled = 0;
2699 INIT_LIST_HEAD(&cfs_rq->throttled_list);
2700}
2701
2702/* requires cfs_b->lock, may release to reprogram timer */
2703void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2704{
2705 /*
2706 * The timer may be active because we're trying to set a new bandwidth
2707 * period or because we're racing with the tear-down path
2708 * (timer_active==0 becomes visible before the hrtimer call-back
2709 * terminates). In either case we ensure that it's re-programmed
2710 */
2711 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2712 raw_spin_unlock(&cfs_b->lock);
2713 /* ensure cfs_b->lock is available while we wait */
2714 hrtimer_cancel(&cfs_b->period_timer);
2715
2716 raw_spin_lock(&cfs_b->lock);
2717 /* if someone else restarted the timer then we're done */
2718 if (cfs_b->timer_active)
2719 return;
2720 }
2721
2722 cfs_b->timer_active = 1;
2723 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2724}
2725
2726static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2727{
2728 hrtimer_cancel(&cfs_b->period_timer);
2729 hrtimer_cancel(&cfs_b->slack_timer);
2730}
2731
38dc3348 2732static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
029632fb
PZ
2733{
2734 struct cfs_rq *cfs_rq;
2735
2736 for_each_leaf_cfs_rq(rq, cfs_rq) {
2737 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2738
2739 if (!cfs_rq->runtime_enabled)
2740 continue;
2741
2742 /*
2743 * clock_task is not advancing so we just need to make sure
2744 * there's some valid quota amount
2745 */
2746 cfs_rq->runtime_remaining = cfs_b->quota;
2747 if (cfs_rq_throttled(cfs_rq))
2748 unthrottle_cfs_rq(cfs_rq);
2749 }
2750}
2751
2752#else /* CONFIG_CFS_BANDWIDTH */
f1b17280
PT
2753static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2754{
78becc27 2755 return rq_clock_task(rq_of(cfs_rq));
f1b17280
PT
2756}
2757
2758static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2759 unsigned long delta_exec) {}
d3d9dc33
PT
2760static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2761static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
6c16a6dc 2762static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
85dac906
PT
2763
2764static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2765{
2766 return 0;
2767}
64660c86
PT
2768
2769static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2770{
2771 return 0;
2772}
2773
2774static inline int throttled_lb_pair(struct task_group *tg,
2775 int src_cpu, int dest_cpu)
2776{
2777 return 0;
2778}
029632fb
PZ
2779
2780void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2781
2782#ifdef CONFIG_FAIR_GROUP_SCHED
2783static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
ab84d31e
PT
2784#endif
2785
029632fb
PZ
2786static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2787{
2788 return NULL;
2789}
2790static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
a4c96ae3 2791static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
029632fb
PZ
2792
2793#endif /* CONFIG_CFS_BANDWIDTH */
2794
bf0f6f24
IM
2795/**************************************************
2796 * CFS operations on tasks:
2797 */
2798
8f4d37ec
PZ
2799#ifdef CONFIG_SCHED_HRTICK
2800static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2801{
8f4d37ec
PZ
2802 struct sched_entity *se = &p->se;
2803 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2804
2805 WARN_ON(task_rq(p) != rq);
2806
b39e66ea 2807 if (cfs_rq->nr_running > 1) {
8f4d37ec
PZ
2808 u64 slice = sched_slice(cfs_rq, se);
2809 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2810 s64 delta = slice - ran;
2811
2812 if (delta < 0) {
2813 if (rq->curr == p)
2814 resched_task(p);
2815 return;
2816 }
2817
2818 /*
2819 * Don't schedule slices shorter than 10000ns, that just
2820 * doesn't make sense. Rely on vruntime for fairness.
2821 */
31656519 2822 if (rq->curr != p)
157124c1 2823 delta = max_t(s64, 10000LL, delta);
8f4d37ec 2824
31656519 2825 hrtick_start(rq, delta);
8f4d37ec
PZ
2826 }
2827}
a4c2f00f
PZ
2828
2829/*
2830 * called from enqueue/dequeue and updates the hrtick when the
2831 * current task is from our class and nr_running is low enough
2832 * to matter.
2833 */
2834static void hrtick_update(struct rq *rq)
2835{
2836 struct task_struct *curr = rq->curr;
2837
b39e66ea 2838 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
a4c2f00f
PZ
2839 return;
2840
2841 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2842 hrtick_start_fair(rq, curr);
2843}
55e12e5e 2844#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
2845static inline void
2846hrtick_start_fair(struct rq *rq, struct task_struct *p)
2847{
2848}
a4c2f00f
PZ
2849
2850static inline void hrtick_update(struct rq *rq)
2851{
2852}
8f4d37ec
PZ
2853#endif
2854
bf0f6f24
IM
2855/*
2856 * The enqueue_task method is called before nr_running is
2857 * increased. Here we update the fair scheduling stats and
2858 * then put the task into the rbtree:
2859 */
ea87bb78 2860static void
371fd7e7 2861enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
2862{
2863 struct cfs_rq *cfs_rq;
62fb1851 2864 struct sched_entity *se = &p->se;
bf0f6f24
IM
2865
2866 for_each_sched_entity(se) {
62fb1851 2867 if (se->on_rq)
bf0f6f24
IM
2868 break;
2869 cfs_rq = cfs_rq_of(se);
88ec22d3 2870 enqueue_entity(cfs_rq, se, flags);
85dac906
PT
2871
2872 /*
2873 * end evaluation on encountering a throttled cfs_rq
2874 *
2875 * note: in the case of encountering a throttled cfs_rq we will
2876 * post the final h_nr_running increment below.
2877 */
2878 if (cfs_rq_throttled(cfs_rq))
2879 break;
953bfcd1 2880 cfs_rq->h_nr_running++;
85dac906 2881
88ec22d3 2882 flags = ENQUEUE_WAKEUP;
bf0f6f24 2883 }
8f4d37ec 2884
2069dd75 2885 for_each_sched_entity(se) {
0f317143 2886 cfs_rq = cfs_rq_of(se);
953bfcd1 2887 cfs_rq->h_nr_running++;
2069dd75 2888
85dac906
PT
2889 if (cfs_rq_throttled(cfs_rq))
2890 break;
2891
17bc14b7 2892 update_cfs_shares(cfs_rq);
9ee474f5 2893 update_entity_load_avg(se, 1);
2069dd75
PZ
2894 }
2895
18bf2805
BS
2896 if (!se) {
2897 update_rq_runnable_avg(rq, rq->nr_running);
85dac906 2898 inc_nr_running(rq);
18bf2805 2899 }
a4c2f00f 2900 hrtick_update(rq);
bf0f6f24
IM
2901}
2902
2f36825b
VP
2903static void set_next_buddy(struct sched_entity *se);
2904
bf0f6f24
IM
2905/*
2906 * The dequeue_task method is called before nr_running is
2907 * decreased. We remove the task from the rbtree and
2908 * update the fair scheduling stats:
2909 */
371fd7e7 2910static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
2911{
2912 struct cfs_rq *cfs_rq;
62fb1851 2913 struct sched_entity *se = &p->se;
2f36825b 2914 int task_sleep = flags & DEQUEUE_SLEEP;
bf0f6f24
IM
2915
2916 for_each_sched_entity(se) {
2917 cfs_rq = cfs_rq_of(se);
371fd7e7 2918 dequeue_entity(cfs_rq, se, flags);
85dac906
PT
2919
2920 /*
2921 * end evaluation on encountering a throttled cfs_rq
2922 *
2923 * note: in the case of encountering a throttled cfs_rq we will
2924 * post the final h_nr_running decrement below.
2925 */
2926 if (cfs_rq_throttled(cfs_rq))
2927 break;
953bfcd1 2928 cfs_rq->h_nr_running--;
2069dd75 2929
bf0f6f24 2930 /* Don't dequeue parent if it has other entities besides us */
2f36825b
VP
2931 if (cfs_rq->load.weight) {
2932 /*
2933 * Bias pick_next to pick a task from this cfs_rq, as
2934 * p is sleeping when it is within its sched_slice.
2935 */
2936 if (task_sleep && parent_entity(se))
2937 set_next_buddy(parent_entity(se));
9598c82d
PT
2938
2939 /* avoid re-evaluating load for this entity */
2940 se = parent_entity(se);
bf0f6f24 2941 break;
2f36825b 2942 }
371fd7e7 2943 flags |= DEQUEUE_SLEEP;
bf0f6f24 2944 }
8f4d37ec 2945
2069dd75 2946 for_each_sched_entity(se) {
0f317143 2947 cfs_rq = cfs_rq_of(se);
953bfcd1 2948 cfs_rq->h_nr_running--;
2069dd75 2949
85dac906
PT
2950 if (cfs_rq_throttled(cfs_rq))
2951 break;
2952
17bc14b7 2953 update_cfs_shares(cfs_rq);
9ee474f5 2954 update_entity_load_avg(se, 1);
2069dd75
PZ
2955 }
2956
18bf2805 2957 if (!se) {
85dac906 2958 dec_nr_running(rq);
18bf2805
BS
2959 update_rq_runnable_avg(rq, 1);
2960 }
a4c2f00f 2961 hrtick_update(rq);
bf0f6f24
IM
2962}
2963
e7693a36 2964#ifdef CONFIG_SMP
029632fb
PZ
2965/* Used instead of source_load when we know the type == 0 */
2966static unsigned long weighted_cpuload(const int cpu)
2967{
b92486cb 2968 return cpu_rq(cpu)->cfs.runnable_load_avg;
029632fb
PZ
2969}
2970
2971/*
2972 * Return a low guess at the load of a migration-source cpu weighted
2973 * according to the scheduling class and "nice" value.
2974 *
2975 * We want to under-estimate the load of migration sources, to
2976 * balance conservatively.
2977 */
2978static unsigned long source_load(int cpu, int type)
2979{
2980 struct rq *rq = cpu_rq(cpu);
2981 unsigned long total = weighted_cpuload(cpu);
2982
2983 if (type == 0 || !sched_feat(LB_BIAS))
2984 return total;
2985
2986 return min(rq->cpu_load[type-1], total);
2987}
2988
2989/*
2990 * Return a high guess at the load of a migration-target cpu weighted
2991 * according to the scheduling class and "nice" value.
2992 */
2993static unsigned long target_load(int cpu, int type)
2994{
2995 struct rq *rq = cpu_rq(cpu);
2996 unsigned long total = weighted_cpuload(cpu);
2997
2998 if (type == 0 || !sched_feat(LB_BIAS))
2999 return total;
3000
3001 return max(rq->cpu_load[type-1], total);
3002}
3003
3004static unsigned long power_of(int cpu)
3005{
3006 return cpu_rq(cpu)->cpu_power;
3007}
3008
3009static unsigned long cpu_avg_load_per_task(int cpu)
3010{
3011 struct rq *rq = cpu_rq(cpu);
3012 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
b92486cb 3013 unsigned long load_avg = rq->cfs.runnable_load_avg;
029632fb
PZ
3014
3015 if (nr_running)
b92486cb 3016 return load_avg / nr_running;
029632fb
PZ
3017
3018 return 0;
3019}
3020
62470419
MW
3021static void record_wakee(struct task_struct *p)
3022{
3023 /*
3024 * Rough decay (wiping) for cost saving, don't worry
3025 * about the boundary, really active task won't care
3026 * about the loss.
3027 */
3028 if (jiffies > current->wakee_flip_decay_ts + HZ) {
3029 current->wakee_flips = 0;
3030 current->wakee_flip_decay_ts = jiffies;
3031 }
3032
3033 if (current->last_wakee != p) {
3034 current->last_wakee = p;
3035 current->wakee_flips++;
3036 }
3037}
098fb9db 3038
74f8e4b2 3039static void task_waking_fair(struct task_struct *p)
88ec22d3
PZ
3040{
3041 struct sched_entity *se = &p->se;
3042 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3fe1698b
PZ
3043 u64 min_vruntime;
3044
3045#ifndef CONFIG_64BIT
3046 u64 min_vruntime_copy;
88ec22d3 3047
3fe1698b
PZ
3048 do {
3049 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3050 smp_rmb();
3051 min_vruntime = cfs_rq->min_vruntime;
3052 } while (min_vruntime != min_vruntime_copy);
3053#else
3054 min_vruntime = cfs_rq->min_vruntime;
3055#endif
88ec22d3 3056
3fe1698b 3057 se->vruntime -= min_vruntime;
62470419 3058 record_wakee(p);
88ec22d3
PZ
3059}
3060
bb3469ac 3061#ifdef CONFIG_FAIR_GROUP_SCHED
f5bfb7d9
PZ
3062/*
3063 * effective_load() calculates the load change as seen from the root_task_group
3064 *
3065 * Adding load to a group doesn't make a group heavier, but can cause movement
3066 * of group shares between cpus. Assuming the shares were perfectly aligned one
3067 * can calculate the shift in shares.
cf5f0acf
PZ
3068 *
3069 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3070 * on this @cpu and results in a total addition (subtraction) of @wg to the
3071 * total group weight.
3072 *
3073 * Given a runqueue weight distribution (rw_i) we can compute a shares
3074 * distribution (s_i) using:
3075 *
3076 * s_i = rw_i / \Sum rw_j (1)
3077 *
3078 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3079 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3080 * shares distribution (s_i):
3081 *
3082 * rw_i = { 2, 4, 1, 0 }
3083 * s_i = { 2/7, 4/7, 1/7, 0 }
3084 *
3085 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3086 * task used to run on and the CPU the waker is running on), we need to
3087 * compute the effect of waking a task on either CPU and, in case of a sync
3088 * wakeup, compute the effect of the current task going to sleep.
3089 *
3090 * So for a change of @wl to the local @cpu with an overall group weight change
3091 * of @wl we can compute the new shares distribution (s'_i) using:
3092 *
3093 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3094 *
3095 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3096 * differences in waking a task to CPU 0. The additional task changes the
3097 * weight and shares distributions like:
3098 *
3099 * rw'_i = { 3, 4, 1, 0 }
3100 * s'_i = { 3/8, 4/8, 1/8, 0 }
3101 *
3102 * We can then compute the difference in effective weight by using:
3103 *
3104 * dw_i = S * (s'_i - s_i) (3)
3105 *
3106 * Where 'S' is the group weight as seen by its parent.
3107 *
3108 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3109 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3110 * 4/7) times the weight of the group.
f5bfb7d9 3111 */
2069dd75 3112static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
bb3469ac 3113{
4be9daaa 3114 struct sched_entity *se = tg->se[cpu];
f1d239f7 3115
cf5f0acf 3116 if (!tg->parent) /* the trivial, non-cgroup case */
f1d239f7
PZ
3117 return wl;
3118
4be9daaa 3119 for_each_sched_entity(se) {
cf5f0acf 3120 long w, W;
4be9daaa 3121
977dda7c 3122 tg = se->my_q->tg;
bb3469ac 3123
cf5f0acf
PZ
3124 /*
3125 * W = @wg + \Sum rw_j
3126 */
3127 W = wg + calc_tg_weight(tg, se->my_q);
4be9daaa 3128
cf5f0acf
PZ
3129 /*
3130 * w = rw_i + @wl
3131 */
3132 w = se->my_q->load.weight + wl;
940959e9 3133
cf5f0acf
PZ
3134 /*
3135 * wl = S * s'_i; see (2)
3136 */
3137 if (W > 0 && w < W)
3138 wl = (w * tg->shares) / W;
977dda7c
PT
3139 else
3140 wl = tg->shares;
940959e9 3141
cf5f0acf
PZ
3142 /*
3143 * Per the above, wl is the new se->load.weight value; since
3144 * those are clipped to [MIN_SHARES, ...) do so now. See
3145 * calc_cfs_shares().
3146 */
977dda7c
PT
3147 if (wl < MIN_SHARES)
3148 wl = MIN_SHARES;
cf5f0acf
PZ
3149
3150 /*
3151 * wl = dw_i = S * (s'_i - s_i); see (3)
3152 */
977dda7c 3153 wl -= se->load.weight;
cf5f0acf
PZ
3154
3155 /*
3156 * Recursively apply this logic to all parent groups to compute
3157 * the final effective load change on the root group. Since
3158 * only the @tg group gets extra weight, all parent groups can
3159 * only redistribute existing shares. @wl is the shift in shares
3160 * resulting from this level per the above.
3161 */
4be9daaa 3162 wg = 0;
4be9daaa 3163 }
bb3469ac 3164
4be9daaa 3165 return wl;
bb3469ac
PZ
3166}
3167#else
4be9daaa 3168
83378269
PZ
3169static inline unsigned long effective_load(struct task_group *tg, int cpu,
3170 unsigned long wl, unsigned long wg)
4be9daaa 3171{
83378269 3172 return wl;
bb3469ac 3173}
4be9daaa 3174
bb3469ac
PZ
3175#endif
3176
62470419
MW
3177static int wake_wide(struct task_struct *p)
3178{
7d9ffa89 3179 int factor = this_cpu_read(sd_llc_size);
62470419
MW
3180
3181 /*
3182 * Yeah, it's the switching-frequency, could means many wakee or
3183 * rapidly switch, use factor here will just help to automatically
3184 * adjust the loose-degree, so bigger node will lead to more pull.
3185 */
3186 if (p->wakee_flips > factor) {
3187 /*
3188 * wakee is somewhat hot, it needs certain amount of cpu
3189 * resource, so if waker is far more hot, prefer to leave
3190 * it alone.
3191 */
3192 if (current->wakee_flips > (factor * p->wakee_flips))
3193 return 1;
3194 }
3195
3196 return 0;
3197}
3198
c88d5910 3199static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
098fb9db 3200{
e37b6a7b 3201 s64 this_load, load;
c88d5910 3202 int idx, this_cpu, prev_cpu;
098fb9db 3203 unsigned long tl_per_task;
c88d5910 3204 struct task_group *tg;
83378269 3205 unsigned long weight;
b3137bc8 3206 int balanced;
098fb9db 3207
62470419
MW
3208 /*
3209 * If we wake multiple tasks be careful to not bounce
3210 * ourselves around too much.
3211 */
3212 if (wake_wide(p))
3213 return 0;
3214
c88d5910
PZ
3215 idx = sd->wake_idx;
3216 this_cpu = smp_processor_id();
3217 prev_cpu = task_cpu(p);
3218 load = source_load(prev_cpu, idx);
3219 this_load = target_load(this_cpu, idx);
098fb9db 3220
b3137bc8
MG
3221 /*
3222 * If sync wakeup then subtract the (maximum possible)
3223 * effect of the currently running task from the load
3224 * of the current CPU:
3225 */
83378269
PZ
3226 if (sync) {
3227 tg = task_group(current);
3228 weight = current->se.load.weight;
3229
c88d5910 3230 this_load += effective_load(tg, this_cpu, -weight, -weight);
83378269
PZ
3231 load += effective_load(tg, prev_cpu, 0, -weight);
3232 }
b3137bc8 3233
83378269
PZ
3234 tg = task_group(p);
3235 weight = p->se.load.weight;
b3137bc8 3236
71a29aa7
PZ
3237 /*
3238 * In low-load situations, where prev_cpu is idle and this_cpu is idle
c88d5910
PZ
3239 * due to the sync cause above having dropped this_load to 0, we'll
3240 * always have an imbalance, but there's really nothing you can do
3241 * about that, so that's good too.
71a29aa7
PZ
3242 *
3243 * Otherwise check if either cpus are near enough in load to allow this
3244 * task to be woken on this_cpu.
3245 */
e37b6a7b
PT
3246 if (this_load > 0) {
3247 s64 this_eff_load, prev_eff_load;
e51fd5e2
PZ
3248
3249 this_eff_load = 100;
3250 this_eff_load *= power_of(prev_cpu);
3251 this_eff_load *= this_load +
3252 effective_load(tg, this_cpu, weight, weight);
3253
3254 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3255 prev_eff_load *= power_of(this_cpu);
3256 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3257
3258 balanced = this_eff_load <= prev_eff_load;
3259 } else
3260 balanced = true;
b3137bc8 3261
098fb9db 3262 /*
4ae7d5ce
IM
3263 * If the currently running task will sleep within
3264 * a reasonable amount of time then attract this newly
3265 * woken task:
098fb9db 3266 */
2fb7635c
PZ
3267 if (sync && balanced)
3268 return 1;
098fb9db 3269
41acab88 3270 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
098fb9db
IM
3271 tl_per_task = cpu_avg_load_per_task(this_cpu);
3272
c88d5910
PZ
3273 if (balanced ||
3274 (this_load <= load &&
3275 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
098fb9db
IM
3276 /*
3277 * This domain has SD_WAKE_AFFINE and
3278 * p is cache cold in this domain, and
3279 * there is no bad imbalance.
3280 */
c88d5910 3281 schedstat_inc(sd, ttwu_move_affine);
41acab88 3282 schedstat_inc(p, se.statistics.nr_wakeups_affine);
098fb9db
IM
3283
3284 return 1;
3285 }
3286 return 0;
3287}
3288
aaee1203
PZ
3289/*
3290 * find_idlest_group finds and returns the least busy CPU group within the
3291 * domain.
3292 */
3293static struct sched_group *
78e7ed53 3294find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5158f4e4 3295 int this_cpu, int load_idx)
e7693a36 3296{
b3bd3de6 3297 struct sched_group *idlest = NULL, *group = sd->groups;
aaee1203 3298 unsigned long min_load = ULONG_MAX, this_load = 0;
aaee1203 3299 int imbalance = 100 + (sd->imbalance_pct-100)/2;
e7693a36 3300
aaee1203
PZ
3301 do {
3302 unsigned long load, avg_load;
3303 int local_group;
3304 int i;
e7693a36 3305
aaee1203
PZ
3306 /* Skip over this group if it has no CPUs allowed */
3307 if (!cpumask_intersects(sched_group_cpus(group),
fa17b507 3308 tsk_cpus_allowed(p)))
aaee1203
PZ
3309 continue;
3310
3311 local_group = cpumask_test_cpu(this_cpu,
3312 sched_group_cpus(group));
3313
3314 /* Tally up the load of all CPUs in the group */
3315 avg_load = 0;
3316
3317 for_each_cpu(i, sched_group_cpus(group)) {
3318 /* Bias balancing toward cpus of our domain */
3319 if (local_group)
3320 load = source_load(i, load_idx);
3321 else
3322 load = target_load(i, load_idx);
3323
3324 avg_load += load;
3325 }
3326
3327 /* Adjust by relative CPU power of the group */
9c3f75cb 3328 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
aaee1203
PZ
3329
3330 if (local_group) {
3331 this_load = avg_load;
aaee1203
PZ
3332 } else if (avg_load < min_load) {
3333 min_load = avg_load;
3334 idlest = group;
3335 }
3336 } while (group = group->next, group != sd->groups);
3337
3338 if (!idlest || 100*this_load < imbalance*min_load)
3339 return NULL;
3340 return idlest;
3341}
3342
3343/*
3344 * find_idlest_cpu - find the idlest cpu among the cpus in group.
3345 */
3346static int
3347find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3348{
3349 unsigned long load, min_load = ULONG_MAX;
3350 int idlest = -1;
3351 int i;
3352
3353 /* Traverse only the allowed CPUs */
fa17b507 3354 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
aaee1203
PZ
3355 load = weighted_cpuload(i);
3356
3357 if (load < min_load || (load == min_load && i == this_cpu)) {
3358 min_load = load;
3359 idlest = i;
e7693a36
GH
3360 }
3361 }
3362
aaee1203
PZ
3363 return idlest;
3364}
e7693a36 3365
a50bde51
PZ
3366/*
3367 * Try and locate an idle CPU in the sched_domain.
3368 */
99bd5e2f 3369static int select_idle_sibling(struct task_struct *p, int target)
a50bde51 3370{
99bd5e2f 3371 struct sched_domain *sd;
37407ea7 3372 struct sched_group *sg;
e0a79f52 3373 int i = task_cpu(p);
a50bde51 3374
e0a79f52
MG
3375 if (idle_cpu(target))
3376 return target;
99bd5e2f
SS
3377
3378 /*
e0a79f52 3379 * If the prevous cpu is cache affine and idle, don't be stupid.
99bd5e2f 3380 */
e0a79f52
MG
3381 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3382 return i;
a50bde51
PZ
3383
3384 /*
37407ea7 3385 * Otherwise, iterate the domains and find an elegible idle cpu.
a50bde51 3386 */
518cd623 3387 sd = rcu_dereference(per_cpu(sd_llc, target));
970e1789 3388 for_each_lower_domain(sd) {
37407ea7
LT
3389 sg = sd->groups;
3390 do {
3391 if (!cpumask_intersects(sched_group_cpus(sg),
3392 tsk_cpus_allowed(p)))
3393 goto next;
3394
3395 for_each_cpu(i, sched_group_cpus(sg)) {
e0a79f52 3396 if (i == target || !idle_cpu(i))
37407ea7
LT
3397 goto next;
3398 }
970e1789 3399
37407ea7
LT
3400 target = cpumask_first_and(sched_group_cpus(sg),
3401 tsk_cpus_allowed(p));
3402 goto done;
3403next:
3404 sg = sg->next;
3405 } while (sg != sd->groups);
3406 }
3407done:
a50bde51
PZ
3408 return target;
3409}
3410
aaee1203
PZ
3411/*
3412 * sched_balance_self: balance the current task (running on cpu) in domains
3413 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3414 * SD_BALANCE_EXEC.
3415 *
3416 * Balance, ie. select the least loaded group.
3417 *
3418 * Returns the target CPU number, or the same CPU if no balancing is needed.
3419 *
3420 * preempt must be disabled.
3421 */
0017d735 3422static int
7608dec2 3423select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
aaee1203 3424{
29cd8bae 3425 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
c88d5910
PZ
3426 int cpu = smp_processor_id();
3427 int prev_cpu = task_cpu(p);
3428 int new_cpu = cpu;
99bd5e2f 3429 int want_affine = 0;
5158f4e4 3430 int sync = wake_flags & WF_SYNC;
c88d5910 3431
29baa747 3432 if (p->nr_cpus_allowed == 1)
76854c7e
MG
3433 return prev_cpu;
3434
0763a660 3435 if (sd_flag & SD_BALANCE_WAKE) {
fa17b507 3436 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
c88d5910
PZ
3437 want_affine = 1;
3438 new_cpu = prev_cpu;
3439 }
aaee1203 3440
dce840a0 3441 rcu_read_lock();
aaee1203 3442 for_each_domain(cpu, tmp) {
e4f42888
PZ
3443 if (!(tmp->flags & SD_LOAD_BALANCE))
3444 continue;
3445
fe3bcfe1 3446 /*
99bd5e2f
SS
3447 * If both cpu and prev_cpu are part of this domain,
3448 * cpu is a valid SD_WAKE_AFFINE target.
fe3bcfe1 3449 */
99bd5e2f
SS
3450 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3451 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3452 affine_sd = tmp;
29cd8bae 3453 break;
f03542a7 3454 }
29cd8bae 3455
f03542a7 3456 if (tmp->flags & sd_flag)
29cd8bae
PZ
3457 sd = tmp;
3458 }
3459
8b911acd 3460 if (affine_sd) {
f03542a7 3461 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
dce840a0
PZ
3462 prev_cpu = cpu;
3463
3464 new_cpu = select_idle_sibling(p, prev_cpu);
3465 goto unlock;
8b911acd 3466 }
e7693a36 3467
aaee1203 3468 while (sd) {
5158f4e4 3469 int load_idx = sd->forkexec_idx;
aaee1203 3470 struct sched_group *group;
c88d5910 3471 int weight;
098fb9db 3472
0763a660 3473 if (!(sd->flags & sd_flag)) {
aaee1203
PZ
3474 sd = sd->child;
3475 continue;
3476 }
098fb9db 3477
5158f4e4
PZ
3478 if (sd_flag & SD_BALANCE_WAKE)
3479 load_idx = sd->wake_idx;
098fb9db 3480
5158f4e4 3481 group = find_idlest_group(sd, p, cpu, load_idx);
aaee1203
PZ
3482 if (!group) {
3483 sd = sd->child;
3484 continue;
3485 }
4ae7d5ce 3486
d7c33c49 3487 new_cpu = find_idlest_cpu(group, p, cpu);
aaee1203
PZ
3488 if (new_cpu == -1 || new_cpu == cpu) {
3489 /* Now try balancing at a lower domain level of cpu */
3490 sd = sd->child;
3491 continue;
e7693a36 3492 }
aaee1203
PZ
3493
3494 /* Now try balancing at a lower domain level of new_cpu */
3495 cpu = new_cpu;
669c55e9 3496 weight = sd->span_weight;
aaee1203
PZ
3497 sd = NULL;
3498 for_each_domain(cpu, tmp) {
669c55e9 3499 if (weight <= tmp->span_weight)
aaee1203 3500 break;
0763a660 3501 if (tmp->flags & sd_flag)
aaee1203
PZ
3502 sd = tmp;
3503 }
3504 /* while loop will break here if sd == NULL */
e7693a36 3505 }
dce840a0
PZ
3506unlock:
3507 rcu_read_unlock();
e7693a36 3508
c88d5910 3509 return new_cpu;
e7693a36 3510}
0a74bef8
PT
3511
3512/*
3513 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3514 * cfs_rq_of(p) references at time of call are still valid and identify the
3515 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
3516 * other assumptions, including the state of rq->lock, should be made.
3517 */
3518static void
3519migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3520{
aff3e498
PT
3521 struct sched_entity *se = &p->se;
3522 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3523
3524 /*
3525 * Load tracking: accumulate removed load so that it can be processed
3526 * when we next update owning cfs_rq under rq->lock. Tasks contribute
3527 * to blocked load iff they have a positive decay-count. It can never
3528 * be negative here since on-rq tasks have decay-count == 0.
3529 */
3530 if (se->avg.decay_count) {
3531 se->avg.decay_count = -__synchronize_entity_decay(se);
2509940f
AS
3532 atomic_long_add(se->avg.load_avg_contrib,
3533 &cfs_rq->removed_load);
aff3e498 3534 }
0a74bef8 3535}
e7693a36
GH
3536#endif /* CONFIG_SMP */
3537
e52fb7c0
PZ
3538static unsigned long
3539wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
0bbd3336
PZ
3540{
3541 unsigned long gran = sysctl_sched_wakeup_granularity;
3542
3543 /*
e52fb7c0
PZ
3544 * Since its curr running now, convert the gran from real-time
3545 * to virtual-time in his units.
13814d42
MG
3546 *
3547 * By using 'se' instead of 'curr' we penalize light tasks, so
3548 * they get preempted easier. That is, if 'se' < 'curr' then
3549 * the resulting gran will be larger, therefore penalizing the
3550 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3551 * be smaller, again penalizing the lighter task.
3552 *
3553 * This is especially important for buddies when the leftmost
3554 * task is higher priority than the buddy.
0bbd3336 3555 */
f4ad9bd2 3556 return calc_delta_fair(gran, se);
0bbd3336
PZ
3557}
3558
464b7527
PZ
3559/*
3560 * Should 'se' preempt 'curr'.
3561 *
3562 * |s1
3563 * |s2
3564 * |s3
3565 * g
3566 * |<--->|c
3567 *
3568 * w(c, s1) = -1
3569 * w(c, s2) = 0
3570 * w(c, s3) = 1
3571 *
3572 */
3573static int
3574wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3575{
3576 s64 gran, vdiff = curr->vruntime - se->vruntime;
3577
3578 if (vdiff <= 0)
3579 return -1;
3580
e52fb7c0 3581 gran = wakeup_gran(curr, se);
464b7527
PZ
3582 if (vdiff > gran)
3583 return 1;
3584
3585 return 0;
3586}
3587
02479099
PZ
3588static void set_last_buddy(struct sched_entity *se)
3589{
69c80f3e
VP
3590 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3591 return;
3592
3593 for_each_sched_entity(se)
3594 cfs_rq_of(se)->last = se;
02479099
PZ
3595}
3596
3597static void set_next_buddy(struct sched_entity *se)
3598{
69c80f3e
VP
3599 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3600 return;
3601
3602 for_each_sched_entity(se)
3603 cfs_rq_of(se)->next = se;
02479099
PZ
3604}
3605
ac53db59
RR
3606static void set_skip_buddy(struct sched_entity *se)
3607{
69c80f3e
VP
3608 for_each_sched_entity(se)
3609 cfs_rq_of(se)->skip = se;
ac53db59
RR
3610}
3611
bf0f6f24
IM
3612/*
3613 * Preempt the current task with a newly woken task if needed:
3614 */
5a9b86f6 3615static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
3616{
3617 struct task_struct *curr = rq->curr;
8651a86c 3618 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 3619 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 3620 int scale = cfs_rq->nr_running >= sched_nr_latency;
2f36825b 3621 int next_buddy_marked = 0;
bf0f6f24 3622
4ae7d5ce
IM
3623 if (unlikely(se == pse))
3624 return;
3625
5238cdd3 3626 /*
ddcdf6e7 3627 * This is possible from callers such as move_task(), in which we
5238cdd3
PT
3628 * unconditionally check_prempt_curr() after an enqueue (which may have
3629 * lead to a throttle). This both saves work and prevents false
3630 * next-buddy nomination below.
3631 */
3632 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3633 return;
3634
2f36825b 3635 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3cb63d52 3636 set_next_buddy(pse);
2f36825b
VP
3637 next_buddy_marked = 1;
3638 }
57fdc26d 3639
aec0a514
BR
3640 /*
3641 * We can come here with TIF_NEED_RESCHED already set from new task
3642 * wake up path.
5238cdd3
PT
3643 *
3644 * Note: this also catches the edge-case of curr being in a throttled
3645 * group (e.g. via set_curr_task), since update_curr() (in the
3646 * enqueue of curr) will have resulted in resched being set. This
3647 * prevents us from potentially nominating it as a false LAST_BUDDY
3648 * below.
aec0a514
BR
3649 */
3650 if (test_tsk_need_resched(curr))
3651 return;
3652
a2f5c9ab
DH
3653 /* Idle tasks are by definition preempted by non-idle tasks. */
3654 if (unlikely(curr->policy == SCHED_IDLE) &&
3655 likely(p->policy != SCHED_IDLE))
3656 goto preempt;
3657
91c234b4 3658 /*
a2f5c9ab
DH
3659 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3660 * is driven by the tick):
91c234b4 3661 */
8ed92e51 3662 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
91c234b4 3663 return;
bf0f6f24 3664
464b7527 3665 find_matching_se(&se, &pse);
9bbd7374 3666 update_curr(cfs_rq_of(se));
002f128b 3667 BUG_ON(!pse);
2f36825b
VP
3668 if (wakeup_preempt_entity(se, pse) == 1) {
3669 /*
3670 * Bias pick_next to pick the sched entity that is
3671 * triggering this preemption.
3672 */
3673 if (!next_buddy_marked)
3674 set_next_buddy(pse);
3a7e73a2 3675 goto preempt;
2f36825b 3676 }
464b7527 3677
3a7e73a2 3678 return;
a65ac745 3679
3a7e73a2
PZ
3680preempt:
3681 resched_task(curr);
3682 /*
3683 * Only set the backward buddy when the current task is still
3684 * on the rq. This can happen when a wakeup gets interleaved
3685 * with schedule on the ->pre_schedule() or idle_balance()
3686 * point, either of which can * drop the rq lock.
3687 *
3688 * Also, during early boot the idle thread is in the fair class,
3689 * for obvious reasons its a bad idea to schedule back to it.
3690 */
3691 if (unlikely(!se->on_rq || curr == rq->idle))
3692 return;
3693
3694 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3695 set_last_buddy(se);
bf0f6f24
IM
3696}
3697
fb8d4724 3698static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24 3699{
8f4d37ec 3700 struct task_struct *p;
bf0f6f24
IM
3701 struct cfs_rq *cfs_rq = &rq->cfs;
3702 struct sched_entity *se;
3703
36ace27e 3704 if (!cfs_rq->nr_running)
bf0f6f24
IM
3705 return NULL;
3706
3707 do {
9948f4b2 3708 se = pick_next_entity(cfs_rq);
f4b6755f 3709 set_next_entity(cfs_rq, se);
bf0f6f24
IM
3710 cfs_rq = group_cfs_rq(se);
3711 } while (cfs_rq);
3712
8f4d37ec 3713 p = task_of(se);
b39e66ea
MG
3714 if (hrtick_enabled(rq))
3715 hrtick_start_fair(rq, p);
8f4d37ec
PZ
3716
3717 return p;
bf0f6f24
IM
3718}
3719
3720/*
3721 * Account for a descheduled task:
3722 */
31ee529c 3723static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
3724{
3725 struct sched_entity *se = &prev->se;
3726 struct cfs_rq *cfs_rq;
3727
3728 for_each_sched_entity(se) {
3729 cfs_rq = cfs_rq_of(se);
ab6cde26 3730 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
3731 }
3732}
3733
ac53db59
RR
3734/*
3735 * sched_yield() is very simple
3736 *
3737 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3738 */
3739static void yield_task_fair(struct rq *rq)
3740{
3741 struct task_struct *curr = rq->curr;
3742 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3743 struct sched_entity *se = &curr->se;
3744
3745 /*
3746 * Are we the only task in the tree?
3747 */
3748 if (unlikely(rq->nr_running == 1))
3749 return;
3750
3751 clear_buddies(cfs_rq, se);
3752
3753 if (curr->policy != SCHED_BATCH) {
3754 update_rq_clock(rq);
3755 /*
3756 * Update run-time statistics of the 'current'.
3757 */
3758 update_curr(cfs_rq);
916671c0
MG
3759 /*
3760 * Tell update_rq_clock() that we've just updated,
3761 * so we don't do microscopic update in schedule()
3762 * and double the fastpath cost.
3763 */
3764 rq->skip_clock_update = 1;
ac53db59
RR
3765 }
3766
3767 set_skip_buddy(se);
3768}
3769
d95f4122
MG
3770static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3771{
3772 struct sched_entity *se = &p->se;
3773
5238cdd3
PT
3774 /* throttled hierarchies are not runnable */
3775 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
d95f4122
MG
3776 return false;
3777
3778 /* Tell the scheduler that we'd really like pse to run next. */
3779 set_next_buddy(se);
3780
d95f4122
MG
3781 yield_task_fair(rq);
3782
3783 return true;
3784}
3785
681f3e68 3786#ifdef CONFIG_SMP
bf0f6f24 3787/**************************************************
e9c84cb8
PZ
3788 * Fair scheduling class load-balancing methods.
3789 *
3790 * BASICS
3791 *
3792 * The purpose of load-balancing is to achieve the same basic fairness the
3793 * per-cpu scheduler provides, namely provide a proportional amount of compute
3794 * time to each task. This is expressed in the following equation:
3795 *
3796 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
3797 *
3798 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
3799 * W_i,0 is defined as:
3800 *
3801 * W_i,0 = \Sum_j w_i,j (2)
3802 *
3803 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
3804 * is derived from the nice value as per prio_to_weight[].
3805 *
3806 * The weight average is an exponential decay average of the instantaneous
3807 * weight:
3808 *
3809 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
3810 *
3811 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
3812 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
3813 * can also include other factors [XXX].
3814 *
3815 * To achieve this balance we define a measure of imbalance which follows
3816 * directly from (1):
3817 *
3818 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
3819 *
3820 * We them move tasks around to minimize the imbalance. In the continuous
3821 * function space it is obvious this converges, in the discrete case we get
3822 * a few fun cases generally called infeasible weight scenarios.
3823 *
3824 * [XXX expand on:
3825 * - infeasible weights;
3826 * - local vs global optima in the discrete case. ]
3827 *
3828 *
3829 * SCHED DOMAINS
3830 *
3831 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
3832 * for all i,j solution, we create a tree of cpus that follows the hardware
3833 * topology where each level pairs two lower groups (or better). This results
3834 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
3835 * tree to only the first of the previous level and we decrease the frequency
3836 * of load-balance at each level inv. proportional to the number of cpus in
3837 * the groups.
3838 *
3839 * This yields:
3840 *
3841 * log_2 n 1 n
3842 * \Sum { --- * --- * 2^i } = O(n) (5)
3843 * i = 0 2^i 2^i
3844 * `- size of each group
3845 * | | `- number of cpus doing load-balance
3846 * | `- freq
3847 * `- sum over all levels
3848 *
3849 * Coupled with a limit on how many tasks we can migrate every balance pass,
3850 * this makes (5) the runtime complexity of the balancer.
3851 *
3852 * An important property here is that each CPU is still (indirectly) connected
3853 * to every other cpu in at most O(log n) steps:
3854 *
3855 * The adjacency matrix of the resulting graph is given by:
3856 *
3857 * log_2 n
3858 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
3859 * k = 0
3860 *
3861 * And you'll find that:
3862 *
3863 * A^(log_2 n)_i,j != 0 for all i,j (7)
3864 *
3865 * Showing there's indeed a path between every cpu in at most O(log n) steps.
3866 * The task movement gives a factor of O(m), giving a convergence complexity
3867 * of:
3868 *
3869 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
3870 *
3871 *
3872 * WORK CONSERVING
3873 *
3874 * In order to avoid CPUs going idle while there's still work to do, new idle
3875 * balancing is more aggressive and has the newly idle cpu iterate up the domain
3876 * tree itself instead of relying on other CPUs to bring it work.
3877 *
3878 * This adds some complexity to both (5) and (8) but it reduces the total idle
3879 * time.
3880 *
3881 * [XXX more?]
3882 *
3883 *
3884 * CGROUPS
3885 *
3886 * Cgroups make a horror show out of (2), instead of a simple sum we get:
3887 *
3888 * s_k,i
3889 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
3890 * S_k
3891 *
3892 * Where
3893 *
3894 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
3895 *
3896 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
3897 *
3898 * The big problem is S_k, its a global sum needed to compute a local (W_i)
3899 * property.
3900 *
3901 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
3902 * rewrite all of this once again.]
3903 */
bf0f6f24 3904
ed387b78
HS
3905static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3906
ddcdf6e7 3907#define LBF_ALL_PINNED 0x01
367456c7 3908#define LBF_NEED_BREAK 0x02
6263322c
PZ
3909#define LBF_DST_PINNED 0x04
3910#define LBF_SOME_PINNED 0x08
ddcdf6e7
PZ
3911
3912struct lb_env {
3913 struct sched_domain *sd;
3914
ddcdf6e7 3915 struct rq *src_rq;
85c1e7da 3916 int src_cpu;
ddcdf6e7
PZ
3917
3918 int dst_cpu;
3919 struct rq *dst_rq;
3920
88b8dac0
SV
3921 struct cpumask *dst_grpmask;
3922 int new_dst_cpu;
ddcdf6e7 3923 enum cpu_idle_type idle;
bd939f45 3924 long imbalance;
b9403130
MW
3925 /* The set of CPUs under consideration for load-balancing */
3926 struct cpumask *cpus;
3927
ddcdf6e7 3928 unsigned int flags;
367456c7
PZ
3929
3930 unsigned int loop;
3931 unsigned int loop_break;
3932 unsigned int loop_max;
ddcdf6e7
PZ
3933};
3934
1e3c88bd 3935/*
ddcdf6e7 3936 * move_task - move a task from one runqueue to another runqueue.
1e3c88bd
PZ
3937 * Both runqueues must be locked.
3938 */
ddcdf6e7 3939static void move_task(struct task_struct *p, struct lb_env *env)
1e3c88bd 3940{
ddcdf6e7
PZ
3941 deactivate_task(env->src_rq, p, 0);
3942 set_task_cpu(p, env->dst_cpu);
3943 activate_task(env->dst_rq, p, 0);
3944 check_preempt_curr(env->dst_rq, p, 0);
1e3c88bd
PZ
3945}
3946
029632fb
PZ
3947/*
3948 * Is this task likely cache-hot:
3949 */
3950static int
3951task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3952{
3953 s64 delta;
3954
3955 if (p->sched_class != &fair_sched_class)
3956 return 0;
3957
3958 if (unlikely(p->policy == SCHED_IDLE))
3959 return 0;
3960
3961 /*
3962 * Buddy candidates are cache hot:
3963 */
3964 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3965 (&p->se == cfs_rq_of(&p->se)->next ||
3966 &p->se == cfs_rq_of(&p->se)->last))
3967 return 1;
3968
3969 if (sysctl_sched_migration_cost == -1)
3970 return 1;
3971 if (sysctl_sched_migration_cost == 0)
3972 return 0;
3973
3974 delta = now - p->se.exec_start;
3975
3976 return delta < (s64)sysctl_sched_migration_cost;
3977}
3978
1e3c88bd
PZ
3979/*
3980 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3981 */
3982static
8e45cb54 3983int can_migrate_task(struct task_struct *p, struct lb_env *env)
1e3c88bd
PZ
3984{
3985 int tsk_cache_hot = 0;
3986 /*
3987 * We do not migrate tasks that are:
d3198084 3988 * 1) throttled_lb_pair, or
1e3c88bd 3989 * 2) cannot be migrated to this CPU due to cpus_allowed, or
d3198084
JK
3990 * 3) running (obviously), or
3991 * 4) are cache-hot on their current CPU.
1e3c88bd 3992 */
d3198084
JK
3993 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
3994 return 0;
3995
ddcdf6e7 3996 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
e02e60c1 3997 int cpu;
88b8dac0 3998
41acab88 3999 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
88b8dac0 4000
6263322c
PZ
4001 env->flags |= LBF_SOME_PINNED;
4002
88b8dac0
SV
4003 /*
4004 * Remember if this task can be migrated to any other cpu in
4005 * our sched_group. We may want to revisit it if we couldn't
4006 * meet load balance goals by pulling other tasks on src_cpu.
4007 *
4008 * Also avoid computing new_dst_cpu if we have already computed
4009 * one in current iteration.
4010 */
6263322c 4011 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
88b8dac0
SV
4012 return 0;
4013
e02e60c1
JK
4014 /* Prevent to re-select dst_cpu via env's cpus */
4015 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
4016 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
6263322c 4017 env->flags |= LBF_DST_PINNED;
e02e60c1
JK
4018 env->new_dst_cpu = cpu;
4019 break;
4020 }
88b8dac0 4021 }
e02e60c1 4022
1e3c88bd
PZ
4023 return 0;
4024 }
88b8dac0
SV
4025
4026 /* Record that we found atleast one task that could run on dst_cpu */
8e45cb54 4027 env->flags &= ~LBF_ALL_PINNED;
1e3c88bd 4028
ddcdf6e7 4029 if (task_running(env->src_rq, p)) {
41acab88 4030 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
4031 return 0;
4032 }
4033
4034 /*
4035 * Aggressive migration if:
4036 * 1) task is cache cold, or
4037 * 2) too many balance attempts have failed.
4038 */
4039
78becc27 4040 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
1e3c88bd 4041 if (!tsk_cache_hot ||
8e45cb54 4042 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
4e2dcb73 4043
1e3c88bd 4044 if (tsk_cache_hot) {
8e45cb54 4045 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
41acab88 4046 schedstat_inc(p, se.statistics.nr_forced_migrations);
1e3c88bd 4047 }
4e2dcb73 4048
1e3c88bd
PZ
4049 return 1;
4050 }
4051
4e2dcb73
ZH
4052 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4053 return 0;
1e3c88bd
PZ
4054}
4055
897c395f
PZ
4056/*
4057 * move_one_task tries to move exactly one task from busiest to this_rq, as
4058 * part of active balancing operations within "domain".
4059 * Returns 1 if successful and 0 otherwise.
4060 *
4061 * Called with both runqueues locked.
4062 */
8e45cb54 4063static int move_one_task(struct lb_env *env)
897c395f
PZ
4064{
4065 struct task_struct *p, *n;
897c395f 4066
367456c7 4067 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
367456c7
PZ
4068 if (!can_migrate_task(p, env))
4069 continue;
897c395f 4070
367456c7
PZ
4071 move_task(p, env);
4072 /*
4073 * Right now, this is only the second place move_task()
4074 * is called, so we can safely collect move_task()
4075 * stats here rather than inside move_task().
4076 */
4077 schedstat_inc(env->sd, lb_gained[env->idle]);
4078 return 1;
897c395f 4079 }
897c395f
PZ
4080 return 0;
4081}
4082
367456c7
PZ
4083static unsigned long task_h_load(struct task_struct *p);
4084
eb95308e
PZ
4085static const unsigned int sched_nr_migrate_break = 32;
4086
5d6523eb 4087/*
bd939f45 4088 * move_tasks tries to move up to imbalance weighted load from busiest to
5d6523eb
PZ
4089 * this_rq, as part of a balancing operation within domain "sd".
4090 * Returns 1 if successful and 0 otherwise.
4091 *
4092 * Called with both runqueues locked.
4093 */
4094static int move_tasks(struct lb_env *env)
1e3c88bd 4095{
5d6523eb
PZ
4096 struct list_head *tasks = &env->src_rq->cfs_tasks;
4097 struct task_struct *p;
367456c7
PZ
4098 unsigned long load;
4099 int pulled = 0;
1e3c88bd 4100
bd939f45 4101 if (env->imbalance <= 0)
5d6523eb 4102 return 0;
1e3c88bd 4103
5d6523eb
PZ
4104 while (!list_empty(tasks)) {
4105 p = list_first_entry(tasks, struct task_struct, se.group_node);
1e3c88bd 4106
367456c7
PZ
4107 env->loop++;
4108 /* We've more or less seen every task there is, call it quits */
5d6523eb 4109 if (env->loop > env->loop_max)
367456c7 4110 break;
5d6523eb
PZ
4111
4112 /* take a breather every nr_migrate tasks */
367456c7 4113 if (env->loop > env->loop_break) {
eb95308e 4114 env->loop_break += sched_nr_migrate_break;
8e45cb54 4115 env->flags |= LBF_NEED_BREAK;
ee00e66f 4116 break;
a195f004 4117 }
1e3c88bd 4118
d3198084 4119 if (!can_migrate_task(p, env))
367456c7
PZ
4120 goto next;
4121
4122 load = task_h_load(p);
5d6523eb 4123
eb95308e 4124 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
367456c7
PZ
4125 goto next;
4126
bd939f45 4127 if ((load / 2) > env->imbalance)
367456c7 4128 goto next;
1e3c88bd 4129
ddcdf6e7 4130 move_task(p, env);
ee00e66f 4131 pulled++;
bd939f45 4132 env->imbalance -= load;
1e3c88bd
PZ
4133
4134#ifdef CONFIG_PREEMPT
ee00e66f
PZ
4135 /*
4136 * NEWIDLE balancing is a source of latency, so preemptible
4137 * kernels will stop after the first task is pulled to minimize
4138 * the critical section.
4139 */
5d6523eb 4140 if (env->idle == CPU_NEWLY_IDLE)
ee00e66f 4141 break;
1e3c88bd
PZ
4142#endif
4143
ee00e66f
PZ
4144 /*
4145 * We only want to steal up to the prescribed amount of
4146 * weighted load.
4147 */
bd939f45 4148 if (env->imbalance <= 0)
ee00e66f 4149 break;
367456c7
PZ
4150
4151 continue;
4152next:
5d6523eb 4153 list_move_tail(&p->se.group_node, tasks);
1e3c88bd 4154 }
5d6523eb 4155
1e3c88bd 4156 /*
ddcdf6e7
PZ
4157 * Right now, this is one of only two places move_task() is called,
4158 * so we can safely collect move_task() stats here rather than
4159 * inside move_task().
1e3c88bd 4160 */
8e45cb54 4161 schedstat_add(env->sd, lb_gained[env->idle], pulled);
1e3c88bd 4162
5d6523eb 4163 return pulled;
1e3c88bd
PZ
4164}
4165
230059de 4166#ifdef CONFIG_FAIR_GROUP_SCHED
9e3081ca
PZ
4167/*
4168 * update tg->load_weight by folding this cpu's load_avg
4169 */
48a16753 4170static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
9e3081ca 4171{
48a16753
PT
4172 struct sched_entity *se = tg->se[cpu];
4173 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
9e3081ca 4174
48a16753
PT
4175 /* throttled entities do not contribute to load */
4176 if (throttled_hierarchy(cfs_rq))
4177 return;
9e3081ca 4178
aff3e498 4179 update_cfs_rq_blocked_load(cfs_rq, 1);
9e3081ca 4180
82958366
PT
4181 if (se) {
4182 update_entity_load_avg(se, 1);
4183 /*
4184 * We pivot on our runnable average having decayed to zero for
4185 * list removal. This generally implies that all our children
4186 * have also been removed (modulo rounding error or bandwidth
4187 * control); however, such cases are rare and we can fix these
4188 * at enqueue.
4189 *
4190 * TODO: fix up out-of-order children on enqueue.
4191 */
4192 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4193 list_del_leaf_cfs_rq(cfs_rq);
4194 } else {
48a16753 4195 struct rq *rq = rq_of(cfs_rq);
82958366
PT
4196 update_rq_runnable_avg(rq, rq->nr_running);
4197 }
9e3081ca
PZ
4198}
4199
48a16753 4200static void update_blocked_averages(int cpu)
9e3081ca 4201{
9e3081ca 4202 struct rq *rq = cpu_rq(cpu);
48a16753
PT
4203 struct cfs_rq *cfs_rq;
4204 unsigned long flags;
9e3081ca 4205
48a16753
PT
4206 raw_spin_lock_irqsave(&rq->lock, flags);
4207 update_rq_clock(rq);
9763b67f
PZ
4208 /*
4209 * Iterates the task_group tree in a bottom up fashion, see
4210 * list_add_leaf_cfs_rq() for details.
4211 */
64660c86 4212 for_each_leaf_cfs_rq(rq, cfs_rq) {
48a16753
PT
4213 /*
4214 * Note: We may want to consider periodically releasing
4215 * rq->lock about these updates so that creating many task
4216 * groups does not result in continually extending hold time.
4217 */
4218 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
64660c86 4219 }
48a16753
PT
4220
4221 raw_spin_unlock_irqrestore(&rq->lock, flags);
9e3081ca
PZ
4222}
4223
9763b67f 4224/*
68520796 4225 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
9763b67f
PZ
4226 * This needs to be done in a top-down fashion because the load of a child
4227 * group is a fraction of its parents load.
4228 */
68520796 4229static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
9763b67f 4230{
68520796
VD
4231 struct rq *rq = rq_of(cfs_rq);
4232 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
a35b6466 4233 unsigned long now = jiffies;
68520796 4234 unsigned long load;
a35b6466 4235
68520796 4236 if (cfs_rq->last_h_load_update == now)
a35b6466
PZ
4237 return;
4238
68520796
VD
4239 cfs_rq->h_load_next = NULL;
4240 for_each_sched_entity(se) {
4241 cfs_rq = cfs_rq_of(se);
4242 cfs_rq->h_load_next = se;
4243 if (cfs_rq->last_h_load_update == now)
4244 break;
4245 }
a35b6466 4246
68520796
VD
4247 if (!se) {
4248 cfs_rq->h_load = rq->avg.load_avg_contrib;
4249 cfs_rq->last_h_load_update = now;
4250 }
4251
4252 while ((se = cfs_rq->h_load_next) != NULL) {
4253 load = cfs_rq->h_load;
4254 load = div64_ul(load * se->avg.load_avg_contrib,
4255 cfs_rq->runnable_load_avg + 1);
4256 cfs_rq = group_cfs_rq(se);
4257 cfs_rq->h_load = load;
4258 cfs_rq->last_h_load_update = now;
4259 }
9763b67f
PZ
4260}
4261
367456c7 4262static unsigned long task_h_load(struct task_struct *p)
230059de 4263{
367456c7 4264 struct cfs_rq *cfs_rq = task_cfs_rq(p);
230059de 4265
68520796 4266 update_cfs_rq_h_load(cfs_rq);
a003a25b
AS
4267 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
4268 cfs_rq->runnable_load_avg + 1);
230059de
PZ
4269}
4270#else
48a16753 4271static inline void update_blocked_averages(int cpu)
9e3081ca
PZ
4272{
4273}
4274
367456c7 4275static unsigned long task_h_load(struct task_struct *p)
1e3c88bd 4276{
a003a25b 4277 return p->se.avg.load_avg_contrib;
1e3c88bd 4278}
230059de 4279#endif
1e3c88bd 4280
1e3c88bd 4281/********** Helpers for find_busiest_group ************************/
1e3c88bd
PZ
4282/*
4283 * sg_lb_stats - stats of a sched_group required for load_balancing
4284 */
4285struct sg_lb_stats {
4286 unsigned long avg_load; /*Avg load across the CPUs of the group */
4287 unsigned long group_load; /* Total load over the CPUs of the group */
1e3c88bd 4288 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
56cf515b 4289 unsigned long load_per_task;
3ae11c90 4290 unsigned long group_power;
147c5fc2
PZ
4291 unsigned int sum_nr_running; /* Nr tasks running in the group */
4292 unsigned int group_capacity;
4293 unsigned int idle_cpus;
4294 unsigned int group_weight;
1e3c88bd 4295 int group_imb; /* Is there an imbalance in the group ? */
fab47622 4296 int group_has_capacity; /* Is there extra capacity in the group? */
1e3c88bd
PZ
4297};
4298
56cf515b
JK
4299/*
4300 * sd_lb_stats - Structure to store the statistics of a sched_domain
4301 * during load balancing.
4302 */
4303struct sd_lb_stats {
4304 struct sched_group *busiest; /* Busiest group in this sd */
4305 struct sched_group *local; /* Local group in this sd */
4306 unsigned long total_load; /* Total load of all groups in sd */
4307 unsigned long total_pwr; /* Total power of all groups in sd */
4308 unsigned long avg_load; /* Average load across all groups in sd */
4309
56cf515b 4310 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
147c5fc2 4311 struct sg_lb_stats local_stat; /* Statistics of the local group */
56cf515b
JK
4312};
4313
147c5fc2
PZ
4314static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
4315{
4316 /*
4317 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
4318 * local_stat because update_sg_lb_stats() does a full clear/assignment.
4319 * We must however clear busiest_stat::avg_load because
4320 * update_sd_pick_busiest() reads this before assignment.
4321 */
4322 *sds = (struct sd_lb_stats){
4323 .busiest = NULL,
4324 .local = NULL,
4325 .total_load = 0UL,
4326 .total_pwr = 0UL,
4327 .busiest_stat = {
4328 .avg_load = 0UL,
4329 },
4330 };
4331}
4332
1e3c88bd
PZ
4333/**
4334 * get_sd_load_idx - Obtain the load index for a given sched domain.
4335 * @sd: The sched_domain whose load_idx is to be obtained.
4336 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
e69f6186
YB
4337 *
4338 * Return: The load index.
1e3c88bd
PZ
4339 */
4340static inline int get_sd_load_idx(struct sched_domain *sd,
4341 enum cpu_idle_type idle)
4342{
4343 int load_idx;
4344
4345 switch (idle) {
4346 case CPU_NOT_IDLE:
4347 load_idx = sd->busy_idx;
4348 break;
4349
4350 case CPU_NEWLY_IDLE:
4351 load_idx = sd->newidle_idx;
4352 break;
4353 default:
4354 load_idx = sd->idle_idx;
4355 break;
4356 }
4357
4358 return load_idx;
4359}
4360
15f803c9 4361static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
1e3c88bd 4362{
1399fa78 4363 return SCHED_POWER_SCALE;
1e3c88bd
PZ
4364}
4365
4366unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4367{
4368 return default_scale_freq_power(sd, cpu);
4369}
4370
15f803c9 4371static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
1e3c88bd 4372{
669c55e9 4373 unsigned long weight = sd->span_weight;
1e3c88bd
PZ
4374 unsigned long smt_gain = sd->smt_gain;
4375
4376 smt_gain /= weight;
4377
4378 return smt_gain;
4379}
4380
4381unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4382{
4383 return default_scale_smt_power(sd, cpu);
4384}
4385
15f803c9 4386static unsigned long scale_rt_power(int cpu)
1e3c88bd
PZ
4387{
4388 struct rq *rq = cpu_rq(cpu);
b654f7de 4389 u64 total, available, age_stamp, avg;
1e3c88bd 4390
b654f7de
PZ
4391 /*
4392 * Since we're reading these variables without serialization make sure
4393 * we read them once before doing sanity checks on them.
4394 */
4395 age_stamp = ACCESS_ONCE(rq->age_stamp);
4396 avg = ACCESS_ONCE(rq->rt_avg);
4397
78becc27 4398 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
aa483808 4399
b654f7de 4400 if (unlikely(total < avg)) {
aa483808
VP
4401 /* Ensures that power won't end up being negative */
4402 available = 0;
4403 } else {
b654f7de 4404 available = total - avg;
aa483808 4405 }
1e3c88bd 4406
1399fa78
NR
4407 if (unlikely((s64)total < SCHED_POWER_SCALE))
4408 total = SCHED_POWER_SCALE;
1e3c88bd 4409
1399fa78 4410 total >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
4411
4412 return div_u64(available, total);
4413}
4414
4415static void update_cpu_power(struct sched_domain *sd, int cpu)
4416{
669c55e9 4417 unsigned long weight = sd->span_weight;
1399fa78 4418 unsigned long power = SCHED_POWER_SCALE;
1e3c88bd
PZ
4419 struct sched_group *sdg = sd->groups;
4420
1e3c88bd
PZ
4421 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4422 if (sched_feat(ARCH_POWER))
4423 power *= arch_scale_smt_power(sd, cpu);
4424 else
4425 power *= default_scale_smt_power(sd, cpu);
4426
1399fa78 4427 power >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
4428 }
4429
9c3f75cb 4430 sdg->sgp->power_orig = power;
9d5efe05
SV
4431
4432 if (sched_feat(ARCH_POWER))
4433 power *= arch_scale_freq_power(sd, cpu);
4434 else
4435 power *= default_scale_freq_power(sd, cpu);
4436
1399fa78 4437 power >>= SCHED_POWER_SHIFT;
9d5efe05 4438
1e3c88bd 4439 power *= scale_rt_power(cpu);
1399fa78 4440 power >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
4441
4442 if (!power)
4443 power = 1;
4444
e51fd5e2 4445 cpu_rq(cpu)->cpu_power = power;
9c3f75cb 4446 sdg->sgp->power = power;
1e3c88bd
PZ
4447}
4448
029632fb 4449void update_group_power(struct sched_domain *sd, int cpu)
1e3c88bd
PZ
4450{
4451 struct sched_domain *child = sd->child;
4452 struct sched_group *group, *sdg = sd->groups;
4453 unsigned long power;
4ec4412e
VG
4454 unsigned long interval;
4455
4456 interval = msecs_to_jiffies(sd->balance_interval);
4457 interval = clamp(interval, 1UL, max_load_balance_interval);
4458 sdg->sgp->next_update = jiffies + interval;
1e3c88bd
PZ
4459
4460 if (!child) {
4461 update_cpu_power(sd, cpu);
4462 return;
4463 }
4464
4465 power = 0;
4466
74a5ce20
PZ
4467 if (child->flags & SD_OVERLAP) {
4468 /*
4469 * SD_OVERLAP domains cannot assume that child groups
4470 * span the current group.
4471 */
4472
4473 for_each_cpu(cpu, sched_group_cpus(sdg))
4474 power += power_of(cpu);
4475 } else {
4476 /*
4477 * !SD_OVERLAP domains can assume that child groups
4478 * span the current group.
4479 */
4480
4481 group = child->groups;
4482 do {
4483 power += group->sgp->power;
4484 group = group->next;
4485 } while (group != child->groups);
4486 }
1e3c88bd 4487
c3decf0d 4488 sdg->sgp->power_orig = sdg->sgp->power = power;
1e3c88bd
PZ
4489}
4490
9d5efe05
SV
4491/*
4492 * Try and fix up capacity for tiny siblings, this is needed when
4493 * things like SD_ASYM_PACKING need f_b_g to select another sibling
4494 * which on its own isn't powerful enough.
4495 *
4496 * See update_sd_pick_busiest() and check_asym_packing().
4497 */
4498static inline int
4499fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4500{
4501 /*
1399fa78 4502 * Only siblings can have significantly less than SCHED_POWER_SCALE
9d5efe05 4503 */
a6c75f2f 4504 if (!(sd->flags & SD_SHARE_CPUPOWER))
9d5efe05
SV
4505 return 0;
4506
4507 /*
4508 * If ~90% of the cpu_power is still there, we're good.
4509 */
9c3f75cb 4510 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
9d5efe05
SV
4511 return 1;
4512
4513 return 0;
4514}
4515
30ce5dab
PZ
4516/*
4517 * Group imbalance indicates (and tries to solve) the problem where balancing
4518 * groups is inadequate due to tsk_cpus_allowed() constraints.
4519 *
4520 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
4521 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
4522 * Something like:
4523 *
4524 * { 0 1 2 3 } { 4 5 6 7 }
4525 * * * * *
4526 *
4527 * If we were to balance group-wise we'd place two tasks in the first group and
4528 * two tasks in the second group. Clearly this is undesired as it will overload
4529 * cpu 3 and leave one of the cpus in the second group unused.
4530 *
4531 * The current solution to this issue is detecting the skew in the first group
6263322c
PZ
4532 * by noticing the lower domain failed to reach balance and had difficulty
4533 * moving tasks due to affinity constraints.
30ce5dab
PZ
4534 *
4535 * When this is so detected; this group becomes a candidate for busiest; see
4536 * update_sd_pick_busiest(). And calculcate_imbalance() and
6263322c 4537 * find_busiest_group() avoid some of the usual balance conditions to allow it
30ce5dab
PZ
4538 * to create an effective group imbalance.
4539 *
4540 * This is a somewhat tricky proposition since the next run might not find the
4541 * group imbalance and decide the groups need to be balanced again. A most
4542 * subtle and fragile situation.
4543 */
4544
6263322c 4545static inline int sg_imbalanced(struct sched_group *group)
30ce5dab 4546{
6263322c 4547 return group->sgp->imbalance;
30ce5dab
PZ
4548}
4549
1e3c88bd
PZ
4550/**
4551 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
cd96891d 4552 * @env: The load balancing environment.
1e3c88bd 4553 * @group: sched_group whose statistics are to be updated.
1e3c88bd 4554 * @load_idx: Load index of sched_domain of this_cpu for load calc.
1e3c88bd 4555 * @local_group: Does group contain this_cpu.
1e3c88bd
PZ
4556 * @sgs: variable to hold the statistics for this group.
4557 */
bd939f45
PZ
4558static inline void update_sg_lb_stats(struct lb_env *env,
4559 struct sched_group *group, int load_idx,
23f0d209 4560 int local_group, struct sg_lb_stats *sgs)
1e3c88bd 4561{
30ce5dab
PZ
4562 unsigned long nr_running;
4563 unsigned long load;
bd939f45 4564 int i;
1e3c88bd 4565
b72ff13c
PZ
4566 memset(sgs, 0, sizeof(*sgs));
4567
b9403130 4568 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
1e3c88bd
PZ
4569 struct rq *rq = cpu_rq(i);
4570
e44bc5c5
PZ
4571 nr_running = rq->nr_running;
4572
1e3c88bd 4573 /* Bias balancing toward cpus of our domain */
6263322c 4574 if (local_group)
04f733b4 4575 load = target_load(i, load_idx);
6263322c 4576 else
1e3c88bd 4577 load = source_load(i, load_idx);
1e3c88bd
PZ
4578
4579 sgs->group_load += load;
e44bc5c5 4580 sgs->sum_nr_running += nr_running;
1e3c88bd 4581 sgs->sum_weighted_load += weighted_cpuload(i);
aae6d3dd
SS
4582 if (idle_cpu(i))
4583 sgs->idle_cpus++;
1e3c88bd
PZ
4584 }
4585
1e3c88bd 4586 /* Adjust by relative CPU power of the group */
3ae11c90
PZ
4587 sgs->group_power = group->sgp->power;
4588 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
1e3c88bd 4589
dd5feea1 4590 if (sgs->sum_nr_running)
38d0f770 4591 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd 4592
6263322c 4593 sgs->group_imb = sg_imbalanced(group);
1e3c88bd 4594
56cf515b 4595 sgs->group_capacity =
3ae11c90 4596 DIV_ROUND_CLOSEST(sgs->group_power, SCHED_POWER_SCALE);
1e3c88bd 4597
9d5efe05 4598 if (!sgs->group_capacity)
bd939f45 4599 sgs->group_capacity = fix_small_capacity(env->sd, group);
56cf515b 4600
aae6d3dd 4601 sgs->group_weight = group->group_weight;
fab47622
NR
4602
4603 if (sgs->group_capacity > sgs->sum_nr_running)
4604 sgs->group_has_capacity = 1;
1e3c88bd
PZ
4605}
4606
532cb4c4
MN
4607/**
4608 * update_sd_pick_busiest - return 1 on busiest group
cd96891d 4609 * @env: The load balancing environment.
532cb4c4
MN
4610 * @sds: sched_domain statistics
4611 * @sg: sched_group candidate to be checked for being the busiest
b6b12294 4612 * @sgs: sched_group statistics
532cb4c4
MN
4613 *
4614 * Determine if @sg is a busier group than the previously selected
4615 * busiest group.
e69f6186
YB
4616 *
4617 * Return: %true if @sg is a busier group than the previously selected
4618 * busiest group. %false otherwise.
532cb4c4 4619 */
bd939f45 4620static bool update_sd_pick_busiest(struct lb_env *env,
532cb4c4
MN
4621 struct sd_lb_stats *sds,
4622 struct sched_group *sg,
bd939f45 4623 struct sg_lb_stats *sgs)
532cb4c4 4624{
56cf515b 4625 if (sgs->avg_load <= sds->busiest_stat.avg_load)
532cb4c4
MN
4626 return false;
4627
4628 if (sgs->sum_nr_running > sgs->group_capacity)
4629 return true;
4630
4631 if (sgs->group_imb)
4632 return true;
4633
4634 /*
4635 * ASYM_PACKING needs to move all the work to the lowest
4636 * numbered CPUs in the group, therefore mark all groups
4637 * higher than ourself as busy.
4638 */
bd939f45
PZ
4639 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4640 env->dst_cpu < group_first_cpu(sg)) {
532cb4c4
MN
4641 if (!sds->busiest)
4642 return true;
4643
4644 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4645 return true;
4646 }
4647
4648 return false;
4649}
4650
1e3c88bd 4651/**
461819ac 4652 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
cd96891d 4653 * @env: The load balancing environment.
1e3c88bd
PZ
4654 * @balance: Should we balance.
4655 * @sds: variable to hold the statistics for this sched_domain.
4656 */
bd939f45 4657static inline void update_sd_lb_stats(struct lb_env *env,
23f0d209 4658 struct sd_lb_stats *sds)
1e3c88bd 4659{
bd939f45
PZ
4660 struct sched_domain *child = env->sd->child;
4661 struct sched_group *sg = env->sd->groups;
56cf515b 4662 struct sg_lb_stats tmp_sgs;
1e3c88bd
PZ
4663 int load_idx, prefer_sibling = 0;
4664
4665 if (child && child->flags & SD_PREFER_SIBLING)
4666 prefer_sibling = 1;
4667
bd939f45 4668 load_idx = get_sd_load_idx(env->sd, env->idle);
1e3c88bd
PZ
4669
4670 do {
56cf515b 4671 struct sg_lb_stats *sgs = &tmp_sgs;
1e3c88bd
PZ
4672 int local_group;
4673
bd939f45 4674 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
56cf515b
JK
4675 if (local_group) {
4676 sds->local = sg;
4677 sgs = &sds->local_stat;
b72ff13c
PZ
4678
4679 if (env->idle != CPU_NEWLY_IDLE ||
4680 time_after_eq(jiffies, sg->sgp->next_update))
4681 update_group_power(env->sd, env->dst_cpu);
56cf515b 4682 }
1e3c88bd 4683
56cf515b 4684 update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
1e3c88bd 4685
b72ff13c
PZ
4686 if (local_group)
4687 goto next_group;
4688
1e3c88bd
PZ
4689 /*
4690 * In case the child domain prefers tasks go to siblings
532cb4c4 4691 * first, lower the sg capacity to one so that we'll try
75dd321d
NR
4692 * and move all the excess tasks away. We lower the capacity
4693 * of a group only if the local group has the capacity to fit
4694 * these excess tasks, i.e. nr_running < group_capacity. The
4695 * extra check prevents the case where you always pull from the
4696 * heaviest group when it is already under-utilized (possible
4697 * with a large weight task outweighs the tasks on the system).
1e3c88bd 4698 */
b72ff13c
PZ
4699 if (prefer_sibling && sds->local &&
4700 sds->local_stat.group_has_capacity)
147c5fc2 4701 sgs->group_capacity = min(sgs->group_capacity, 1U);
1e3c88bd 4702
b72ff13c 4703 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
532cb4c4 4704 sds->busiest = sg;
56cf515b 4705 sds->busiest_stat = *sgs;
1e3c88bd
PZ
4706 }
4707
b72ff13c
PZ
4708next_group:
4709 /* Now, start updating sd_lb_stats */
4710 sds->total_load += sgs->group_load;
4711 sds->total_pwr += sgs->group_power;
4712
532cb4c4 4713 sg = sg->next;
bd939f45 4714 } while (sg != env->sd->groups);
532cb4c4
MN
4715}
4716
532cb4c4
MN
4717/**
4718 * check_asym_packing - Check to see if the group is packed into the
4719 * sched doman.
4720 *
4721 * This is primarily intended to used at the sibling level. Some
4722 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4723 * case of POWER7, it can move to lower SMT modes only when higher
4724 * threads are idle. When in lower SMT modes, the threads will
4725 * perform better since they share less core resources. Hence when we
4726 * have idle threads, we want them to be the higher ones.
4727 *
4728 * This packing function is run on idle threads. It checks to see if
4729 * the busiest CPU in this domain (core in the P7 case) has a higher
4730 * CPU number than the packing function is being run on. Here we are
4731 * assuming lower CPU number will be equivalent to lower a SMT thread
4732 * number.
4733 *
e69f6186 4734 * Return: 1 when packing is required and a task should be moved to
b6b12294
MN
4735 * this CPU. The amount of the imbalance is returned in *imbalance.
4736 *
cd96891d 4737 * @env: The load balancing environment.
532cb4c4 4738 * @sds: Statistics of the sched_domain which is to be packed
532cb4c4 4739 */
bd939f45 4740static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
532cb4c4
MN
4741{
4742 int busiest_cpu;
4743
bd939f45 4744 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
4745 return 0;
4746
4747 if (!sds->busiest)
4748 return 0;
4749
4750 busiest_cpu = group_first_cpu(sds->busiest);
bd939f45 4751 if (env->dst_cpu > busiest_cpu)
532cb4c4
MN
4752 return 0;
4753
bd939f45 4754 env->imbalance = DIV_ROUND_CLOSEST(
3ae11c90
PZ
4755 sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
4756 SCHED_POWER_SCALE);
bd939f45 4757
532cb4c4 4758 return 1;
1e3c88bd
PZ
4759}
4760
4761/**
4762 * fix_small_imbalance - Calculate the minor imbalance that exists
4763 * amongst the groups of a sched_domain, during
4764 * load balancing.
cd96891d 4765 * @env: The load balancing environment.
1e3c88bd 4766 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 4767 */
bd939f45
PZ
4768static inline
4769void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd
PZ
4770{
4771 unsigned long tmp, pwr_now = 0, pwr_move = 0;
4772 unsigned int imbn = 2;
dd5feea1 4773 unsigned long scaled_busy_load_per_task;
56cf515b 4774 struct sg_lb_stats *local, *busiest;
1e3c88bd 4775
56cf515b
JK
4776 local = &sds->local_stat;
4777 busiest = &sds->busiest_stat;
1e3c88bd 4778
56cf515b
JK
4779 if (!local->sum_nr_running)
4780 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
4781 else if (busiest->load_per_task > local->load_per_task)
4782 imbn = 1;
dd5feea1 4783
56cf515b
JK
4784 scaled_busy_load_per_task =
4785 (busiest->load_per_task * SCHED_POWER_SCALE) /
3ae11c90 4786 busiest->group_power;
56cf515b
JK
4787
4788 if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >=
4789 (scaled_busy_load_per_task * imbn)) {
4790 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
4791 return;
4792 }
4793
4794 /*
4795 * OK, we don't have enough imbalance to justify moving tasks,
4796 * however we may be able to increase total CPU power used by
4797 * moving them.
4798 */
4799
3ae11c90 4800 pwr_now += busiest->group_power *
56cf515b 4801 min(busiest->load_per_task, busiest->avg_load);
3ae11c90 4802 pwr_now += local->group_power *
56cf515b 4803 min(local->load_per_task, local->avg_load);
1399fa78 4804 pwr_now /= SCHED_POWER_SCALE;
1e3c88bd
PZ
4805
4806 /* Amount of load we'd subtract */
56cf515b 4807 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
3ae11c90 4808 busiest->group_power;
56cf515b 4809 if (busiest->avg_load > tmp) {
3ae11c90 4810 pwr_move += busiest->group_power *
56cf515b
JK
4811 min(busiest->load_per_task,
4812 busiest->avg_load - tmp);
4813 }
1e3c88bd
PZ
4814
4815 /* Amount of load we'd add */
3ae11c90 4816 if (busiest->avg_load * busiest->group_power <
56cf515b 4817 busiest->load_per_task * SCHED_POWER_SCALE) {
3ae11c90
PZ
4818 tmp = (busiest->avg_load * busiest->group_power) /
4819 local->group_power;
56cf515b
JK
4820 } else {
4821 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
3ae11c90 4822 local->group_power;
56cf515b 4823 }
3ae11c90
PZ
4824 pwr_move += local->group_power *
4825 min(local->load_per_task, local->avg_load + tmp);
1399fa78 4826 pwr_move /= SCHED_POWER_SCALE;
1e3c88bd
PZ
4827
4828 /* Move if we gain throughput */
4829 if (pwr_move > pwr_now)
56cf515b 4830 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
4831}
4832
4833/**
4834 * calculate_imbalance - Calculate the amount of imbalance present within the
4835 * groups of a given sched_domain during load balance.
bd939f45 4836 * @env: load balance environment
1e3c88bd 4837 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 4838 */
bd939f45 4839static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 4840{
dd5feea1 4841 unsigned long max_pull, load_above_capacity = ~0UL;
56cf515b
JK
4842 struct sg_lb_stats *local, *busiest;
4843
4844 local = &sds->local_stat;
56cf515b 4845 busiest = &sds->busiest_stat;
dd5feea1 4846
56cf515b 4847 if (busiest->group_imb) {
30ce5dab
PZ
4848 /*
4849 * In the group_imb case we cannot rely on group-wide averages
4850 * to ensure cpu-load equilibrium, look at wider averages. XXX
4851 */
56cf515b
JK
4852 busiest->load_per_task =
4853 min(busiest->load_per_task, sds->avg_load);
dd5feea1
SS
4854 }
4855
1e3c88bd
PZ
4856 /*
4857 * In the presence of smp nice balancing, certain scenarios can have
4858 * max load less than avg load(as we skip the groups at or below
4859 * its cpu_power, while calculating max_load..)
4860 */
56cf515b 4861 if (busiest->avg_load < sds->avg_load) {
bd939f45
PZ
4862 env->imbalance = 0;
4863 return fix_small_imbalance(env, sds);
1e3c88bd
PZ
4864 }
4865
56cf515b 4866 if (!busiest->group_imb) {
dd5feea1
SS
4867 /*
4868 * Don't want to pull so many tasks that a group would go idle.
30ce5dab
PZ
4869 * Except of course for the group_imb case, since then we might
4870 * have to drop below capacity to reach cpu-load equilibrium.
dd5feea1 4871 */
56cf515b
JK
4872 load_above_capacity =
4873 (busiest->sum_nr_running - busiest->group_capacity);
dd5feea1 4874
1399fa78 4875 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
3ae11c90 4876 load_above_capacity /= busiest->group_power;
dd5feea1
SS
4877 }
4878
4879 /*
4880 * We're trying to get all the cpus to the average_load, so we don't
4881 * want to push ourselves above the average load, nor do we wish to
4882 * reduce the max loaded cpu below the average load. At the same time,
4883 * we also don't want to reduce the group load below the group capacity
4884 * (so that we can implement power-savings policies etc). Thus we look
4885 * for the minimum possible imbalance.
dd5feea1 4886 */
30ce5dab 4887 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
4888
4889 /* How much load to actually move to equalise the imbalance */
56cf515b 4890 env->imbalance = min(
3ae11c90
PZ
4891 max_pull * busiest->group_power,
4892 (sds->avg_load - local->avg_load) * local->group_power
56cf515b 4893 ) / SCHED_POWER_SCALE;
1e3c88bd
PZ
4894
4895 /*
4896 * if *imbalance is less than the average load per runnable task
25985edc 4897 * there is no guarantee that any tasks will be moved so we'll have
1e3c88bd
PZ
4898 * a think about bumping its value to force at least one task to be
4899 * moved
4900 */
56cf515b 4901 if (env->imbalance < busiest->load_per_task)
bd939f45 4902 return fix_small_imbalance(env, sds);
1e3c88bd 4903}
fab47622 4904
1e3c88bd
PZ
4905/******* find_busiest_group() helpers end here *********************/
4906
4907/**
4908 * find_busiest_group - Returns the busiest group within the sched_domain
4909 * if there is an imbalance. If there isn't an imbalance, and
4910 * the user has opted for power-savings, it returns a group whose
4911 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4912 * such a group exists.
4913 *
4914 * Also calculates the amount of weighted load which should be moved
4915 * to restore balance.
4916 *
cd96891d 4917 * @env: The load balancing environment.
1e3c88bd 4918 *
e69f6186 4919 * Return: - The busiest group if imbalance exists.
1e3c88bd
PZ
4920 * - If no imbalance and user has opted for power-savings balance,
4921 * return the least loaded group whose CPUs can be
4922 * put to idle by rebalancing its tasks onto our group.
4923 */
56cf515b 4924static struct sched_group *find_busiest_group(struct lb_env *env)
1e3c88bd 4925{
56cf515b 4926 struct sg_lb_stats *local, *busiest;
1e3c88bd
PZ
4927 struct sd_lb_stats sds;
4928
147c5fc2 4929 init_sd_lb_stats(&sds);
1e3c88bd
PZ
4930
4931 /*
4932 * Compute the various statistics relavent for load balancing at
4933 * this level.
4934 */
23f0d209 4935 update_sd_lb_stats(env, &sds);
56cf515b
JK
4936 local = &sds.local_stat;
4937 busiest = &sds.busiest_stat;
1e3c88bd 4938
bd939f45
PZ
4939 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
4940 check_asym_packing(env, &sds))
532cb4c4
MN
4941 return sds.busiest;
4942
cc57aa8f 4943 /* There is no busy sibling group to pull tasks from */
56cf515b 4944 if (!sds.busiest || busiest->sum_nr_running == 0)
1e3c88bd
PZ
4945 goto out_balanced;
4946
1399fa78 4947 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
b0432d8f 4948
866ab43e
PZ
4949 /*
4950 * If the busiest group is imbalanced the below checks don't
30ce5dab 4951 * work because they assume all things are equal, which typically
866ab43e
PZ
4952 * isn't true due to cpus_allowed constraints and the like.
4953 */
56cf515b 4954 if (busiest->group_imb)
866ab43e
PZ
4955 goto force_balance;
4956
cc57aa8f 4957 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
56cf515b
JK
4958 if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
4959 !busiest->group_has_capacity)
fab47622
NR
4960 goto force_balance;
4961
cc57aa8f
PZ
4962 /*
4963 * If the local group is more busy than the selected busiest group
4964 * don't try and pull any tasks.
4965 */
56cf515b 4966 if (local->avg_load >= busiest->avg_load)
1e3c88bd
PZ
4967 goto out_balanced;
4968
cc57aa8f
PZ
4969 /*
4970 * Don't pull any tasks if this group is already above the domain
4971 * average load.
4972 */
56cf515b 4973 if (local->avg_load >= sds.avg_load)
1e3c88bd
PZ
4974 goto out_balanced;
4975
bd939f45 4976 if (env->idle == CPU_IDLE) {
aae6d3dd
SS
4977 /*
4978 * This cpu is idle. If the busiest group load doesn't
4979 * have more tasks than the number of available cpu's and
4980 * there is no imbalance between this and busiest group
4981 * wrt to idle cpu's, it is balanced.
4982 */
56cf515b
JK
4983 if ((local->idle_cpus < busiest->idle_cpus) &&
4984 busiest->sum_nr_running <= busiest->group_weight)
aae6d3dd 4985 goto out_balanced;
c186fafe
PZ
4986 } else {
4987 /*
4988 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4989 * imbalance_pct to be conservative.
4990 */
56cf515b
JK
4991 if (100 * busiest->avg_load <=
4992 env->sd->imbalance_pct * local->avg_load)
c186fafe 4993 goto out_balanced;
aae6d3dd 4994 }
1e3c88bd 4995
fab47622 4996force_balance:
1e3c88bd 4997 /* Looks like there is an imbalance. Compute it */
bd939f45 4998 calculate_imbalance(env, &sds);
1e3c88bd
PZ
4999 return sds.busiest;
5000
5001out_balanced:
bd939f45 5002 env->imbalance = 0;
1e3c88bd
PZ
5003 return NULL;
5004}
5005
5006/*
5007 * find_busiest_queue - find the busiest runqueue among the cpus in group.
5008 */
bd939f45 5009static struct rq *find_busiest_queue(struct lb_env *env,
b9403130 5010 struct sched_group *group)
1e3c88bd
PZ
5011{
5012 struct rq *busiest = NULL, *rq;
95a79b80 5013 unsigned long busiest_load = 0, busiest_power = 1;
1e3c88bd
PZ
5014 int i;
5015
6906a408 5016 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
1e3c88bd 5017 unsigned long power = power_of(i);
1399fa78
NR
5018 unsigned long capacity = DIV_ROUND_CLOSEST(power,
5019 SCHED_POWER_SCALE);
1e3c88bd
PZ
5020 unsigned long wl;
5021
9d5efe05 5022 if (!capacity)
bd939f45 5023 capacity = fix_small_capacity(env->sd, group);
9d5efe05 5024
1e3c88bd 5025 rq = cpu_rq(i);
6e40f5bb 5026 wl = weighted_cpuload(i);
1e3c88bd 5027
6e40f5bb
TG
5028 /*
5029 * When comparing with imbalance, use weighted_cpuload()
5030 * which is not scaled with the cpu power.
5031 */
bd939f45 5032 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
1e3c88bd
PZ
5033 continue;
5034
6e40f5bb
TG
5035 /*
5036 * For the load comparisons with the other cpu's, consider
5037 * the weighted_cpuload() scaled with the cpu power, so that
5038 * the load can be moved away from the cpu that is potentially
5039 * running at a lower capacity.
95a79b80
JK
5040 *
5041 * Thus we're looking for max(wl_i / power_i), crosswise
5042 * multiplication to rid ourselves of the division works out
5043 * to: wl_i * power_j > wl_j * power_i; where j is our
5044 * previous maximum.
6e40f5bb 5045 */
95a79b80
JK
5046 if (wl * busiest_power > busiest_load * power) {
5047 busiest_load = wl;
5048 busiest_power = power;
1e3c88bd
PZ
5049 busiest = rq;
5050 }
5051 }
5052
5053 return busiest;
5054}
5055
5056/*
5057 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
5058 * so long as it is large enough.
5059 */
5060#define MAX_PINNED_INTERVAL 512
5061
5062/* Working cpumask for load_balance and load_balance_newidle. */
e6252c3e 5063DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
1e3c88bd 5064
bd939f45 5065static int need_active_balance(struct lb_env *env)
1af3ed3d 5066{
bd939f45
PZ
5067 struct sched_domain *sd = env->sd;
5068
5069 if (env->idle == CPU_NEWLY_IDLE) {
532cb4c4
MN
5070
5071 /*
5072 * ASYM_PACKING needs to force migrate tasks from busy but
5073 * higher numbered CPUs in order to pack all tasks in the
5074 * lowest numbered CPUs.
5075 */
bd939f45 5076 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
532cb4c4 5077 return 1;
1af3ed3d
PZ
5078 }
5079
5080 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5081}
5082
969c7921
TH
5083static int active_load_balance_cpu_stop(void *data);
5084
23f0d209
JK
5085static int should_we_balance(struct lb_env *env)
5086{
5087 struct sched_group *sg = env->sd->groups;
5088 struct cpumask *sg_cpus, *sg_mask;
5089 int cpu, balance_cpu = -1;
5090
5091 /*
5092 * In the newly idle case, we will allow all the cpu's
5093 * to do the newly idle load balance.
5094 */
5095 if (env->idle == CPU_NEWLY_IDLE)
5096 return 1;
5097
5098 sg_cpus = sched_group_cpus(sg);
5099 sg_mask = sched_group_mask(sg);
5100 /* Try to find first idle cpu */
5101 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
5102 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
5103 continue;
5104
5105 balance_cpu = cpu;
5106 break;
5107 }
5108
5109 if (balance_cpu == -1)
5110 balance_cpu = group_balance_cpu(sg);
5111
5112 /*
5113 * First idle cpu or the first cpu(busiest) in this sched group
5114 * is eligible for doing load balancing at this and above domains.
5115 */
b0cff9d8 5116 return balance_cpu == env->dst_cpu;
23f0d209
JK
5117}
5118
1e3c88bd
PZ
5119/*
5120 * Check this_cpu to ensure it is balanced within domain. Attempt to move
5121 * tasks if there is an imbalance.
5122 */
5123static int load_balance(int this_cpu, struct rq *this_rq,
5124 struct sched_domain *sd, enum cpu_idle_type idle,
23f0d209 5125 int *continue_balancing)
1e3c88bd 5126{
88b8dac0 5127 int ld_moved, cur_ld_moved, active_balance = 0;
6263322c 5128 struct sched_domain *sd_parent = sd->parent;
1e3c88bd 5129 struct sched_group *group;
1e3c88bd
PZ
5130 struct rq *busiest;
5131 unsigned long flags;
e6252c3e 5132 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
1e3c88bd 5133
8e45cb54
PZ
5134 struct lb_env env = {
5135 .sd = sd,
ddcdf6e7
PZ
5136 .dst_cpu = this_cpu,
5137 .dst_rq = this_rq,
88b8dac0 5138 .dst_grpmask = sched_group_cpus(sd->groups),
8e45cb54 5139 .idle = idle,
eb95308e 5140 .loop_break = sched_nr_migrate_break,
b9403130 5141 .cpus = cpus,
8e45cb54
PZ
5142 };
5143
cfc03118
JK
5144 /*
5145 * For NEWLY_IDLE load_balancing, we don't need to consider
5146 * other cpus in our group
5147 */
e02e60c1 5148 if (idle == CPU_NEWLY_IDLE)
cfc03118 5149 env.dst_grpmask = NULL;
cfc03118 5150
1e3c88bd
PZ
5151 cpumask_copy(cpus, cpu_active_mask);
5152
1e3c88bd
PZ
5153 schedstat_inc(sd, lb_count[idle]);
5154
5155redo:
23f0d209
JK
5156 if (!should_we_balance(&env)) {
5157 *continue_balancing = 0;
1e3c88bd 5158 goto out_balanced;
23f0d209 5159 }
1e3c88bd 5160
23f0d209 5161 group = find_busiest_group(&env);
1e3c88bd
PZ
5162 if (!group) {
5163 schedstat_inc(sd, lb_nobusyg[idle]);
5164 goto out_balanced;
5165 }
5166
b9403130 5167 busiest = find_busiest_queue(&env, group);
1e3c88bd
PZ
5168 if (!busiest) {
5169 schedstat_inc(sd, lb_nobusyq[idle]);
5170 goto out_balanced;
5171 }
5172
78feefc5 5173 BUG_ON(busiest == env.dst_rq);
1e3c88bd 5174
bd939f45 5175 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
1e3c88bd
PZ
5176
5177 ld_moved = 0;
5178 if (busiest->nr_running > 1) {
5179 /*
5180 * Attempt to move tasks. If find_busiest_group has found
5181 * an imbalance but busiest->nr_running <= 1, the group is
5182 * still unbalanced. ld_moved simply stays zero, so it is
5183 * correctly treated as an imbalance.
5184 */
8e45cb54 5185 env.flags |= LBF_ALL_PINNED;
c82513e5
PZ
5186 env.src_cpu = busiest->cpu;
5187 env.src_rq = busiest;
5188 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
8e45cb54 5189
5d6523eb 5190more_balance:
1e3c88bd 5191 local_irq_save(flags);
78feefc5 5192 double_rq_lock(env.dst_rq, busiest);
88b8dac0
SV
5193
5194 /*
5195 * cur_ld_moved - load moved in current iteration
5196 * ld_moved - cumulative load moved across iterations
5197 */
5198 cur_ld_moved = move_tasks(&env);
5199 ld_moved += cur_ld_moved;
78feefc5 5200 double_rq_unlock(env.dst_rq, busiest);
1e3c88bd
PZ
5201 local_irq_restore(flags);
5202
5203 /*
5204 * some other cpu did the load balance for us.
5205 */
88b8dac0
SV
5206 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5207 resched_cpu(env.dst_cpu);
5208
f1cd0858
JK
5209 if (env.flags & LBF_NEED_BREAK) {
5210 env.flags &= ~LBF_NEED_BREAK;
5211 goto more_balance;
5212 }
5213
88b8dac0
SV
5214 /*
5215 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5216 * us and move them to an alternate dst_cpu in our sched_group
5217 * where they can run. The upper limit on how many times we
5218 * iterate on same src_cpu is dependent on number of cpus in our
5219 * sched_group.
5220 *
5221 * This changes load balance semantics a bit on who can move
5222 * load to a given_cpu. In addition to the given_cpu itself
5223 * (or a ilb_cpu acting on its behalf where given_cpu is
5224 * nohz-idle), we now have balance_cpu in a position to move
5225 * load to given_cpu. In rare situations, this may cause
5226 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5227 * _independently_ and at _same_ time to move some load to
5228 * given_cpu) causing exceess load to be moved to given_cpu.
5229 * This however should not happen so much in practice and
5230 * moreover subsequent load balance cycles should correct the
5231 * excess load moved.
5232 */
6263322c 5233 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
88b8dac0 5234
78feefc5 5235 env.dst_rq = cpu_rq(env.new_dst_cpu);
88b8dac0 5236 env.dst_cpu = env.new_dst_cpu;
6263322c 5237 env.flags &= ~LBF_DST_PINNED;
88b8dac0
SV
5238 env.loop = 0;
5239 env.loop_break = sched_nr_migrate_break;
e02e60c1
JK
5240
5241 /* Prevent to re-select dst_cpu via env's cpus */
5242 cpumask_clear_cpu(env.dst_cpu, env.cpus);
5243
88b8dac0
SV
5244 /*
5245 * Go back to "more_balance" rather than "redo" since we
5246 * need to continue with same src_cpu.
5247 */
5248 goto more_balance;
5249 }
1e3c88bd 5250
6263322c
PZ
5251 /*
5252 * We failed to reach balance because of affinity.
5253 */
5254 if (sd_parent) {
5255 int *group_imbalance = &sd_parent->groups->sgp->imbalance;
5256
5257 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
5258 *group_imbalance = 1;
5259 } else if (*group_imbalance)
5260 *group_imbalance = 0;
5261 }
5262
1e3c88bd 5263 /* All tasks on this runqueue were pinned by CPU affinity */
8e45cb54 5264 if (unlikely(env.flags & LBF_ALL_PINNED)) {
1e3c88bd 5265 cpumask_clear_cpu(cpu_of(busiest), cpus);
bbf18b19
PN
5266 if (!cpumask_empty(cpus)) {
5267 env.loop = 0;
5268 env.loop_break = sched_nr_migrate_break;
1e3c88bd 5269 goto redo;
bbf18b19 5270 }
1e3c88bd
PZ
5271 goto out_balanced;
5272 }
5273 }
5274
5275 if (!ld_moved) {
5276 schedstat_inc(sd, lb_failed[idle]);
58b26c4c
VP
5277 /*
5278 * Increment the failure counter only on periodic balance.
5279 * We do not want newidle balance, which can be very
5280 * frequent, pollute the failure counter causing
5281 * excessive cache_hot migrations and active balances.
5282 */
5283 if (idle != CPU_NEWLY_IDLE)
5284 sd->nr_balance_failed++;
1e3c88bd 5285
bd939f45 5286 if (need_active_balance(&env)) {
1e3c88bd
PZ
5287 raw_spin_lock_irqsave(&busiest->lock, flags);
5288
969c7921
TH
5289 /* don't kick the active_load_balance_cpu_stop,
5290 * if the curr task on busiest cpu can't be
5291 * moved to this_cpu
1e3c88bd
PZ
5292 */
5293 if (!cpumask_test_cpu(this_cpu,
fa17b507 5294 tsk_cpus_allowed(busiest->curr))) {
1e3c88bd
PZ
5295 raw_spin_unlock_irqrestore(&busiest->lock,
5296 flags);
8e45cb54 5297 env.flags |= LBF_ALL_PINNED;
1e3c88bd
PZ
5298 goto out_one_pinned;
5299 }
5300
969c7921
TH
5301 /*
5302 * ->active_balance synchronizes accesses to
5303 * ->active_balance_work. Once set, it's cleared
5304 * only after active load balance is finished.
5305 */
1e3c88bd
PZ
5306 if (!busiest->active_balance) {
5307 busiest->active_balance = 1;
5308 busiest->push_cpu = this_cpu;
5309 active_balance = 1;
5310 }
5311 raw_spin_unlock_irqrestore(&busiest->lock, flags);
969c7921 5312
bd939f45 5313 if (active_balance) {
969c7921
TH
5314 stop_one_cpu_nowait(cpu_of(busiest),
5315 active_load_balance_cpu_stop, busiest,
5316 &busiest->active_balance_work);
bd939f45 5317 }
1e3c88bd
PZ
5318
5319 /*
5320 * We've kicked active balancing, reset the failure
5321 * counter.
5322 */
5323 sd->nr_balance_failed = sd->cache_nice_tries+1;
5324 }
5325 } else
5326 sd->nr_balance_failed = 0;
5327
5328 if (likely(!active_balance)) {
5329 /* We were unbalanced, so reset the balancing interval */
5330 sd->balance_interval = sd->min_interval;
5331 } else {
5332 /*
5333 * If we've begun active balancing, start to back off. This
5334 * case may not be covered by the all_pinned logic if there
5335 * is only 1 task on the busy runqueue (because we don't call
5336 * move_tasks).
5337 */
5338 if (sd->balance_interval < sd->max_interval)
5339 sd->balance_interval *= 2;
5340 }
5341
1e3c88bd
PZ
5342 goto out;
5343
5344out_balanced:
5345 schedstat_inc(sd, lb_balanced[idle]);
5346
5347 sd->nr_balance_failed = 0;
5348
5349out_one_pinned:
5350 /* tune up the balancing interval */
8e45cb54 5351 if (((env.flags & LBF_ALL_PINNED) &&
5b54b56b 5352 sd->balance_interval < MAX_PINNED_INTERVAL) ||
1e3c88bd
PZ
5353 (sd->balance_interval < sd->max_interval))
5354 sd->balance_interval *= 2;
5355
46e49b38 5356 ld_moved = 0;
1e3c88bd 5357out:
1e3c88bd
PZ
5358 return ld_moved;
5359}
5360
1e3c88bd
PZ
5361/*
5362 * idle_balance is called by schedule() if this_cpu is about to become
5363 * idle. Attempts to pull tasks from other CPUs.
5364 */
029632fb 5365void idle_balance(int this_cpu, struct rq *this_rq)
1e3c88bd
PZ
5366{
5367 struct sched_domain *sd;
5368 int pulled_task = 0;
5369 unsigned long next_balance = jiffies + HZ;
5370
78becc27 5371 this_rq->idle_stamp = rq_clock(this_rq);
1e3c88bd
PZ
5372
5373 if (this_rq->avg_idle < sysctl_sched_migration_cost)
5374 return;
5375
f492e12e
PZ
5376 /*
5377 * Drop the rq->lock, but keep IRQ/preempt disabled.
5378 */
5379 raw_spin_unlock(&this_rq->lock);
5380
48a16753 5381 update_blocked_averages(this_cpu);
dce840a0 5382 rcu_read_lock();
1e3c88bd
PZ
5383 for_each_domain(this_cpu, sd) {
5384 unsigned long interval;
23f0d209 5385 int continue_balancing = 1;
1e3c88bd
PZ
5386
5387 if (!(sd->flags & SD_LOAD_BALANCE))
5388 continue;
5389
f492e12e 5390 if (sd->flags & SD_BALANCE_NEWIDLE) {
1e3c88bd 5391 /* If we've pulled tasks over stop searching: */
f492e12e 5392 pulled_task = load_balance(this_cpu, this_rq,
23f0d209
JK
5393 sd, CPU_NEWLY_IDLE,
5394 &continue_balancing);
f492e12e 5395 }
1e3c88bd
PZ
5396
5397 interval = msecs_to_jiffies(sd->balance_interval);
5398 if (time_after(next_balance, sd->last_balance + interval))
5399 next_balance = sd->last_balance + interval;
d5ad140b
NR
5400 if (pulled_task) {
5401 this_rq->idle_stamp = 0;
1e3c88bd 5402 break;
d5ad140b 5403 }
1e3c88bd 5404 }
dce840a0 5405 rcu_read_unlock();
f492e12e
PZ
5406
5407 raw_spin_lock(&this_rq->lock);
5408
1e3c88bd
PZ
5409 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5410 /*
5411 * We are going idle. next_balance may be set based on
5412 * a busy processor. So reset next_balance.
5413 */
5414 this_rq->next_balance = next_balance;
5415 }
5416}
5417
5418/*
969c7921
TH
5419 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5420 * running tasks off the busiest CPU onto idle CPUs. It requires at
5421 * least 1 task to be running on each physical CPU where possible, and
5422 * avoids physical / logical imbalances.
1e3c88bd 5423 */
969c7921 5424static int active_load_balance_cpu_stop(void *data)
1e3c88bd 5425{
969c7921
TH
5426 struct rq *busiest_rq = data;
5427 int busiest_cpu = cpu_of(busiest_rq);
1e3c88bd 5428 int target_cpu = busiest_rq->push_cpu;
969c7921 5429 struct rq *target_rq = cpu_rq(target_cpu);
1e3c88bd 5430 struct sched_domain *sd;
969c7921
TH
5431
5432 raw_spin_lock_irq(&busiest_rq->lock);
5433
5434 /* make sure the requested cpu hasn't gone down in the meantime */
5435 if (unlikely(busiest_cpu != smp_processor_id() ||
5436 !busiest_rq->active_balance))
5437 goto out_unlock;
1e3c88bd
PZ
5438
5439 /* Is there any task to move? */
5440 if (busiest_rq->nr_running <= 1)
969c7921 5441 goto out_unlock;
1e3c88bd
PZ
5442
5443 /*
5444 * This condition is "impossible", if it occurs
5445 * we need to fix it. Originally reported by
5446 * Bjorn Helgaas on a 128-cpu setup.
5447 */
5448 BUG_ON(busiest_rq == target_rq);
5449
5450 /* move a task from busiest_rq to target_rq */
5451 double_lock_balance(busiest_rq, target_rq);
1e3c88bd
PZ
5452
5453 /* Search for an sd spanning us and the target CPU. */
dce840a0 5454 rcu_read_lock();
1e3c88bd
PZ
5455 for_each_domain(target_cpu, sd) {
5456 if ((sd->flags & SD_LOAD_BALANCE) &&
5457 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5458 break;
5459 }
5460
5461 if (likely(sd)) {
8e45cb54
PZ
5462 struct lb_env env = {
5463 .sd = sd,
ddcdf6e7
PZ
5464 .dst_cpu = target_cpu,
5465 .dst_rq = target_rq,
5466 .src_cpu = busiest_rq->cpu,
5467 .src_rq = busiest_rq,
8e45cb54
PZ
5468 .idle = CPU_IDLE,
5469 };
5470
1e3c88bd
PZ
5471 schedstat_inc(sd, alb_count);
5472
8e45cb54 5473 if (move_one_task(&env))
1e3c88bd
PZ
5474 schedstat_inc(sd, alb_pushed);
5475 else
5476 schedstat_inc(sd, alb_failed);
5477 }
dce840a0 5478 rcu_read_unlock();
1e3c88bd 5479 double_unlock_balance(busiest_rq, target_rq);
969c7921
TH
5480out_unlock:
5481 busiest_rq->active_balance = 0;
5482 raw_spin_unlock_irq(&busiest_rq->lock);
5483 return 0;
1e3c88bd
PZ
5484}
5485
3451d024 5486#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
5487/*
5488 * idle load balancing details
83cd4fe2
VP
5489 * - When one of the busy CPUs notice that there may be an idle rebalancing
5490 * needed, they will kick the idle load balancer, which then does idle
5491 * load balancing for all the idle CPUs.
5492 */
1e3c88bd 5493static struct {
83cd4fe2 5494 cpumask_var_t idle_cpus_mask;
0b005cf5 5495 atomic_t nr_cpus;
83cd4fe2
VP
5496 unsigned long next_balance; /* in jiffy units */
5497} nohz ____cacheline_aligned;
1e3c88bd 5498
8e7fbcbc 5499static inline int find_new_ilb(int call_cpu)
1e3c88bd 5500{
0b005cf5 5501 int ilb = cpumask_first(nohz.idle_cpus_mask);
1e3c88bd 5502
786d6dc7
SS
5503 if (ilb < nr_cpu_ids && idle_cpu(ilb))
5504 return ilb;
5505
5506 return nr_cpu_ids;
1e3c88bd 5507}
1e3c88bd 5508
83cd4fe2
VP
5509/*
5510 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
5511 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
5512 * CPU (if there is one).
5513 */
5514static void nohz_balancer_kick(int cpu)
5515{
5516 int ilb_cpu;
5517
5518 nohz.next_balance++;
5519
0b005cf5 5520 ilb_cpu = find_new_ilb(cpu);
83cd4fe2 5521
0b005cf5
SS
5522 if (ilb_cpu >= nr_cpu_ids)
5523 return;
83cd4fe2 5524
cd490c5b 5525 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
1c792db7
SS
5526 return;
5527 /*
5528 * Use smp_send_reschedule() instead of resched_cpu().
5529 * This way we generate a sched IPI on the target cpu which
5530 * is idle. And the softirq performing nohz idle load balance
5531 * will be run before returning from the IPI.
5532 */
5533 smp_send_reschedule(ilb_cpu);
83cd4fe2
VP
5534 return;
5535}
5536
c1cc017c 5537static inline void nohz_balance_exit_idle(int cpu)
71325960
SS
5538{
5539 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5540 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5541 atomic_dec(&nohz.nr_cpus);
5542 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5543 }
5544}
5545
69e1e811
SS
5546static inline void set_cpu_sd_state_busy(void)
5547{
5548 struct sched_domain *sd;
69e1e811 5549
69e1e811 5550 rcu_read_lock();
424c93fe 5551 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
25f55d9d
VG
5552
5553 if (!sd || !sd->nohz_idle)
5554 goto unlock;
5555 sd->nohz_idle = 0;
5556
5557 for (; sd; sd = sd->parent)
69e1e811 5558 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
25f55d9d 5559unlock:
69e1e811
SS
5560 rcu_read_unlock();
5561}
5562
5563void set_cpu_sd_state_idle(void)
5564{
5565 struct sched_domain *sd;
69e1e811 5566
69e1e811 5567 rcu_read_lock();
424c93fe 5568 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
25f55d9d
VG
5569
5570 if (!sd || sd->nohz_idle)
5571 goto unlock;
5572 sd->nohz_idle = 1;
5573
5574 for (; sd; sd = sd->parent)
69e1e811 5575 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
25f55d9d 5576unlock:
69e1e811
SS
5577 rcu_read_unlock();
5578}
5579
1e3c88bd 5580/*
c1cc017c 5581 * This routine will record that the cpu is going idle with tick stopped.
0b005cf5 5582 * This info will be used in performing idle load balancing in the future.
1e3c88bd 5583 */
c1cc017c 5584void nohz_balance_enter_idle(int cpu)
1e3c88bd 5585{
71325960
SS
5586 /*
5587 * If this cpu is going down, then nothing needs to be done.
5588 */
5589 if (!cpu_active(cpu))
5590 return;
5591
c1cc017c
AS
5592 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5593 return;
1e3c88bd 5594
c1cc017c
AS
5595 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5596 atomic_inc(&nohz.nr_cpus);
5597 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
1e3c88bd 5598}
71325960 5599
0db0628d 5600static int sched_ilb_notifier(struct notifier_block *nfb,
71325960
SS
5601 unsigned long action, void *hcpu)
5602{
5603 switch (action & ~CPU_TASKS_FROZEN) {
5604 case CPU_DYING:
c1cc017c 5605 nohz_balance_exit_idle(smp_processor_id());
71325960
SS
5606 return NOTIFY_OK;
5607 default:
5608 return NOTIFY_DONE;
5609 }
5610}
1e3c88bd
PZ
5611#endif
5612
5613static DEFINE_SPINLOCK(balancing);
5614
49c022e6
PZ
5615/*
5616 * Scale the max load_balance interval with the number of CPUs in the system.
5617 * This trades load-balance latency on larger machines for less cross talk.
5618 */
029632fb 5619void update_max_interval(void)
49c022e6
PZ
5620{
5621 max_load_balance_interval = HZ*num_online_cpus()/10;
5622}
5623
1e3c88bd
PZ
5624/*
5625 * It checks each scheduling domain to see if it is due to be balanced,
5626 * and initiates a balancing operation if so.
5627 *
b9b0853a 5628 * Balancing parameters are set up in init_sched_domains.
1e3c88bd
PZ
5629 */
5630static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5631{
23f0d209 5632 int continue_balancing = 1;
1e3c88bd
PZ
5633 struct rq *rq = cpu_rq(cpu);
5634 unsigned long interval;
04f733b4 5635 struct sched_domain *sd;
1e3c88bd
PZ
5636 /* Earliest time when we have to do rebalance again */
5637 unsigned long next_balance = jiffies + 60*HZ;
5638 int update_next_balance = 0;
5639 int need_serialize;
5640
48a16753 5641 update_blocked_averages(cpu);
2069dd75 5642
dce840a0 5643 rcu_read_lock();
1e3c88bd
PZ
5644 for_each_domain(cpu, sd) {
5645 if (!(sd->flags & SD_LOAD_BALANCE))
5646 continue;
5647
5648 interval = sd->balance_interval;
5649 if (idle != CPU_IDLE)
5650 interval *= sd->busy_factor;
5651
5652 /* scale ms to jiffies */
5653 interval = msecs_to_jiffies(interval);
49c022e6 5654 interval = clamp(interval, 1UL, max_load_balance_interval);
1e3c88bd
PZ
5655
5656 need_serialize = sd->flags & SD_SERIALIZE;
5657
5658 if (need_serialize) {
5659 if (!spin_trylock(&balancing))
5660 goto out;
5661 }
5662
5663 if (time_after_eq(jiffies, sd->last_balance + interval)) {
23f0d209 5664 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
1e3c88bd 5665 /*
6263322c 5666 * The LBF_DST_PINNED logic could have changed
de5eb2dd
JK
5667 * env->dst_cpu, so we can't know our idle
5668 * state even if we migrated tasks. Update it.
1e3c88bd 5669 */
de5eb2dd 5670 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
1e3c88bd
PZ
5671 }
5672 sd->last_balance = jiffies;
5673 }
5674 if (need_serialize)
5675 spin_unlock(&balancing);
5676out:
5677 if (time_after(next_balance, sd->last_balance + interval)) {
5678 next_balance = sd->last_balance + interval;
5679 update_next_balance = 1;
5680 }
5681
5682 /*
5683 * Stop the load balance at this level. There is another
5684 * CPU in our sched group which is doing load balancing more
5685 * actively.
5686 */
23f0d209 5687 if (!continue_balancing)
1e3c88bd
PZ
5688 break;
5689 }
dce840a0 5690 rcu_read_unlock();
1e3c88bd
PZ
5691
5692 /*
5693 * next_balance will be updated only when there is a need.
5694 * When the cpu is attached to null domain for ex, it will not be
5695 * updated.
5696 */
5697 if (likely(update_next_balance))
5698 rq->next_balance = next_balance;
5699}
5700
3451d024 5701#ifdef CONFIG_NO_HZ_COMMON
1e3c88bd 5702/*
3451d024 5703 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
1e3c88bd
PZ
5704 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5705 */
83cd4fe2
VP
5706static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5707{
5708 struct rq *this_rq = cpu_rq(this_cpu);
5709 struct rq *rq;
5710 int balance_cpu;
5711
1c792db7
SS
5712 if (idle != CPU_IDLE ||
5713 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5714 goto end;
83cd4fe2
VP
5715
5716 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8a6d42d1 5717 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
83cd4fe2
VP
5718 continue;
5719
5720 /*
5721 * If this cpu gets work to do, stop the load balancing
5722 * work being done for other cpus. Next load
5723 * balancing owner will pick it up.
5724 */
1c792db7 5725 if (need_resched())
83cd4fe2 5726 break;
83cd4fe2 5727
5ed4f1d9
VG
5728 rq = cpu_rq(balance_cpu);
5729
5730 raw_spin_lock_irq(&rq->lock);
5731 update_rq_clock(rq);
5732 update_idle_cpu_load(rq);
5733 raw_spin_unlock_irq(&rq->lock);
83cd4fe2
VP
5734
5735 rebalance_domains(balance_cpu, CPU_IDLE);
5736
83cd4fe2
VP
5737 if (time_after(this_rq->next_balance, rq->next_balance))
5738 this_rq->next_balance = rq->next_balance;
5739 }
5740 nohz.next_balance = this_rq->next_balance;
1c792db7
SS
5741end:
5742 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
83cd4fe2
VP
5743}
5744
5745/*
0b005cf5
SS
5746 * Current heuristic for kicking the idle load balancer in the presence
5747 * of an idle cpu is the system.
5748 * - This rq has more than one task.
5749 * - At any scheduler domain level, this cpu's scheduler group has multiple
5750 * busy cpu's exceeding the group's power.
5751 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5752 * domain span are idle.
83cd4fe2
VP
5753 */
5754static inline int nohz_kick_needed(struct rq *rq, int cpu)
5755{
5756 unsigned long now = jiffies;
0b005cf5 5757 struct sched_domain *sd;
83cd4fe2 5758
1c792db7 5759 if (unlikely(idle_cpu(cpu)))
83cd4fe2
VP
5760 return 0;
5761
1c792db7
SS
5762 /*
5763 * We may be recently in ticked or tickless idle mode. At the first
5764 * busy tick after returning from idle, we will update the busy stats.
5765 */
69e1e811 5766 set_cpu_sd_state_busy();
c1cc017c 5767 nohz_balance_exit_idle(cpu);
0b005cf5
SS
5768
5769 /*
5770 * None are in tickless mode and hence no need for NOHZ idle load
5771 * balancing.
5772 */
5773 if (likely(!atomic_read(&nohz.nr_cpus)))
5774 return 0;
1c792db7
SS
5775
5776 if (time_before(now, nohz.next_balance))
83cd4fe2
VP
5777 return 0;
5778
0b005cf5
SS
5779 if (rq->nr_running >= 2)
5780 goto need_kick;
83cd4fe2 5781
067491b7 5782 rcu_read_lock();
0b005cf5
SS
5783 for_each_domain(cpu, sd) {
5784 struct sched_group *sg = sd->groups;
5785 struct sched_group_power *sgp = sg->sgp;
5786 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
83cd4fe2 5787
0b005cf5 5788 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
067491b7 5789 goto need_kick_unlock;
0b005cf5
SS
5790
5791 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5792 && (cpumask_first_and(nohz.idle_cpus_mask,
5793 sched_domain_span(sd)) < cpu))
067491b7 5794 goto need_kick_unlock;
0b005cf5
SS
5795
5796 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5797 break;
83cd4fe2 5798 }
067491b7 5799 rcu_read_unlock();
83cd4fe2 5800 return 0;
067491b7
PZ
5801
5802need_kick_unlock:
5803 rcu_read_unlock();
0b005cf5
SS
5804need_kick:
5805 return 1;
83cd4fe2
VP
5806}
5807#else
5808static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5809#endif
5810
5811/*
5812 * run_rebalance_domains is triggered when needed from the scheduler tick.
5813 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
5814 */
1e3c88bd
PZ
5815static void run_rebalance_domains(struct softirq_action *h)
5816{
5817 int this_cpu = smp_processor_id();
5818 struct rq *this_rq = cpu_rq(this_cpu);
6eb57e0d 5819 enum cpu_idle_type idle = this_rq->idle_balance ?
1e3c88bd
PZ
5820 CPU_IDLE : CPU_NOT_IDLE;
5821
5822 rebalance_domains(this_cpu, idle);
5823
1e3c88bd 5824 /*
83cd4fe2 5825 * If this cpu has a pending nohz_balance_kick, then do the
1e3c88bd
PZ
5826 * balancing on behalf of the other idle cpus whose ticks are
5827 * stopped.
5828 */
83cd4fe2 5829 nohz_idle_balance(this_cpu, idle);
1e3c88bd
PZ
5830}
5831
5832static inline int on_null_domain(int cpu)
5833{
90a6501f 5834 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
1e3c88bd
PZ
5835}
5836
5837/*
5838 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
1e3c88bd 5839 */
029632fb 5840void trigger_load_balance(struct rq *rq, int cpu)
1e3c88bd 5841{
1e3c88bd
PZ
5842 /* Don't need to rebalance while attached to NULL domain */
5843 if (time_after_eq(jiffies, rq->next_balance) &&
5844 likely(!on_null_domain(cpu)))
5845 raise_softirq(SCHED_SOFTIRQ);
3451d024 5846#ifdef CONFIG_NO_HZ_COMMON
1c792db7 5847 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
83cd4fe2
VP
5848 nohz_balancer_kick(cpu);
5849#endif
1e3c88bd
PZ
5850}
5851
0bcdcf28
CE
5852static void rq_online_fair(struct rq *rq)
5853{
5854 update_sysctl();
5855}
5856
5857static void rq_offline_fair(struct rq *rq)
5858{
5859 update_sysctl();
a4c96ae3
PB
5860
5861 /* Ensure any throttled groups are reachable by pick_next_task */
5862 unthrottle_offline_cfs_rqs(rq);
0bcdcf28
CE
5863}
5864
55e12e5e 5865#endif /* CONFIG_SMP */
e1d1484f 5866
bf0f6f24
IM
5867/*
5868 * scheduler tick hitting a task of our scheduling class:
5869 */
8f4d37ec 5870static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
5871{
5872 struct cfs_rq *cfs_rq;
5873 struct sched_entity *se = &curr->se;
5874
5875 for_each_sched_entity(se) {
5876 cfs_rq = cfs_rq_of(se);
8f4d37ec 5877 entity_tick(cfs_rq, se, queued);
bf0f6f24 5878 }
18bf2805 5879
10e84b97 5880 if (numabalancing_enabled)
cbee9f88 5881 task_tick_numa(rq, curr);
3d59eebc 5882
18bf2805 5883 update_rq_runnable_avg(rq, 1);
bf0f6f24
IM
5884}
5885
5886/*
cd29fe6f
PZ
5887 * called on fork with the child task as argument from the parent's context
5888 * - child not yet on the tasklist
5889 * - preemption disabled
bf0f6f24 5890 */
cd29fe6f 5891static void task_fork_fair(struct task_struct *p)
bf0f6f24 5892{
4fc420c9
DN
5893 struct cfs_rq *cfs_rq;
5894 struct sched_entity *se = &p->se, *curr;
00bf7bfc 5895 int this_cpu = smp_processor_id();
cd29fe6f
PZ
5896 struct rq *rq = this_rq();
5897 unsigned long flags;
5898
05fa785c 5899 raw_spin_lock_irqsave(&rq->lock, flags);
bf0f6f24 5900
861d034e
PZ
5901 update_rq_clock(rq);
5902
4fc420c9
DN
5903 cfs_rq = task_cfs_rq(current);
5904 curr = cfs_rq->curr;
5905
6c9a27f5
DN
5906 /*
5907 * Not only the cpu but also the task_group of the parent might have
5908 * been changed after parent->se.parent,cfs_rq were copied to
5909 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
5910 * of child point to valid ones.
5911 */
5912 rcu_read_lock();
5913 __set_task_cpu(p, this_cpu);
5914 rcu_read_unlock();
bf0f6f24 5915
7109c442 5916 update_curr(cfs_rq);
cd29fe6f 5917
b5d9d734
MG
5918 if (curr)
5919 se->vruntime = curr->vruntime;
aeb73b04 5920 place_entity(cfs_rq, se, 1);
4d78e7b6 5921
cd29fe6f 5922 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 5923 /*
edcb60a3
IM
5924 * Upon rescheduling, sched_class::put_prev_task() will place
5925 * 'current' within the tree based on its new key value.
5926 */
4d78e7b6 5927 swap(curr->vruntime, se->vruntime);
aec0a514 5928 resched_task(rq->curr);
4d78e7b6 5929 }
bf0f6f24 5930
88ec22d3
PZ
5931 se->vruntime -= cfs_rq->min_vruntime;
5932
05fa785c 5933 raw_spin_unlock_irqrestore(&rq->lock, flags);
bf0f6f24
IM
5934}
5935
cb469845
SR
5936/*
5937 * Priority of the task has changed. Check to see if we preempt
5938 * the current task.
5939 */
da7a735e
PZ
5940static void
5941prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 5942{
da7a735e
PZ
5943 if (!p->se.on_rq)
5944 return;
5945
cb469845
SR
5946 /*
5947 * Reschedule if we are currently running on this runqueue and
5948 * our priority decreased, or if we are not currently running on
5949 * this runqueue and our priority is higher than the current's
5950 */
da7a735e 5951 if (rq->curr == p) {
cb469845
SR
5952 if (p->prio > oldprio)
5953 resched_task(rq->curr);
5954 } else
15afe09b 5955 check_preempt_curr(rq, p, 0);
cb469845
SR
5956}
5957
da7a735e
PZ
5958static void switched_from_fair(struct rq *rq, struct task_struct *p)
5959{
5960 struct sched_entity *se = &p->se;
5961 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5962
5963 /*
5964 * Ensure the task's vruntime is normalized, so that when its
5965 * switched back to the fair class the enqueue_entity(.flags=0) will
5966 * do the right thing.
5967 *
5968 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5969 * have normalized the vruntime, if it was !on_rq, then only when
5970 * the task is sleeping will it still have non-normalized vruntime.
5971 */
5972 if (!se->on_rq && p->state != TASK_RUNNING) {
5973 /*
5974 * Fix up our vruntime so that the current sleep doesn't
5975 * cause 'unlimited' sleep bonus.
5976 */
5977 place_entity(cfs_rq, se, 0);
5978 se->vruntime -= cfs_rq->min_vruntime;
5979 }
9ee474f5 5980
141965c7 5981#ifdef CONFIG_SMP
9ee474f5
PT
5982 /*
5983 * Remove our load from contribution when we leave sched_fair
5984 * and ensure we don't carry in an old decay_count if we
5985 * switch back.
5986 */
87e3c8ae
KT
5987 if (se->avg.decay_count) {
5988 __synchronize_entity_decay(se);
5989 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
9ee474f5
PT
5990 }
5991#endif
da7a735e
PZ
5992}
5993
cb469845
SR
5994/*
5995 * We switched to the sched_fair class.
5996 */
da7a735e 5997static void switched_to_fair(struct rq *rq, struct task_struct *p)
cb469845 5998{
da7a735e
PZ
5999 if (!p->se.on_rq)
6000 return;
6001
cb469845
SR
6002 /*
6003 * We were most likely switched from sched_rt, so
6004 * kick off the schedule if running, otherwise just see
6005 * if we can still preempt the current task.
6006 */
da7a735e 6007 if (rq->curr == p)
cb469845
SR
6008 resched_task(rq->curr);
6009 else
15afe09b 6010 check_preempt_curr(rq, p, 0);
cb469845
SR
6011}
6012
83b699ed
SV
6013/* Account for a task changing its policy or group.
6014 *
6015 * This routine is mostly called to set cfs_rq->curr field when a task
6016 * migrates between groups/classes.
6017 */
6018static void set_curr_task_fair(struct rq *rq)
6019{
6020 struct sched_entity *se = &rq->curr->se;
6021
ec12cb7f
PT
6022 for_each_sched_entity(se) {
6023 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6024
6025 set_next_entity(cfs_rq, se);
6026 /* ensure bandwidth has been allocated on our new cfs_rq */
6027 account_cfs_rq_runtime(cfs_rq, 0);
6028 }
83b699ed
SV
6029}
6030
029632fb
PZ
6031void init_cfs_rq(struct cfs_rq *cfs_rq)
6032{
6033 cfs_rq->tasks_timeline = RB_ROOT;
029632fb
PZ
6034 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
6035#ifndef CONFIG_64BIT
6036 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
6037#endif
141965c7 6038#ifdef CONFIG_SMP
9ee474f5 6039 atomic64_set(&cfs_rq->decay_counter, 1);
2509940f 6040 atomic_long_set(&cfs_rq->removed_load, 0);
9ee474f5 6041#endif
029632fb
PZ
6042}
6043
810b3817 6044#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 6045static void task_move_group_fair(struct task_struct *p, int on_rq)
810b3817 6046{
aff3e498 6047 struct cfs_rq *cfs_rq;
b2b5ce02
PZ
6048 /*
6049 * If the task was not on the rq at the time of this cgroup movement
6050 * it must have been asleep, sleeping tasks keep their ->vruntime
6051 * absolute on their old rq until wakeup (needed for the fair sleeper
6052 * bonus in place_entity()).
6053 *
6054 * If it was on the rq, we've just 'preempted' it, which does convert
6055 * ->vruntime to a relative base.
6056 *
6057 * Make sure both cases convert their relative position when migrating
6058 * to another cgroup's rq. This does somewhat interfere with the
6059 * fair sleeper stuff for the first placement, but who cares.
6060 */
7ceff013
DN
6061 /*
6062 * When !on_rq, vruntime of the task has usually NOT been normalized.
6063 * But there are some cases where it has already been normalized:
6064 *
6065 * - Moving a forked child which is waiting for being woken up by
6066 * wake_up_new_task().
62af3783
DN
6067 * - Moving a task which has been woken up by try_to_wake_up() and
6068 * waiting for actually being woken up by sched_ttwu_pending().
7ceff013
DN
6069 *
6070 * To prevent boost or penalty in the new cfs_rq caused by delta
6071 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
6072 */
62af3783 6073 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
7ceff013
DN
6074 on_rq = 1;
6075
b2b5ce02
PZ
6076 if (!on_rq)
6077 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
6078 set_task_rq(p, task_cpu(p));
aff3e498
PT
6079 if (!on_rq) {
6080 cfs_rq = cfs_rq_of(&p->se);
6081 p->se.vruntime += cfs_rq->min_vruntime;
6082#ifdef CONFIG_SMP
6083 /*
6084 * migrate_task_rq_fair() will have removed our previous
6085 * contribution, but we must synchronize for ongoing future
6086 * decay.
6087 */
6088 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
6089 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
6090#endif
6091 }
810b3817 6092}
029632fb
PZ
6093
6094void free_fair_sched_group(struct task_group *tg)
6095{
6096 int i;
6097
6098 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
6099
6100 for_each_possible_cpu(i) {
6101 if (tg->cfs_rq)
6102 kfree(tg->cfs_rq[i]);
6103 if (tg->se)
6104 kfree(tg->se[i]);
6105 }
6106
6107 kfree(tg->cfs_rq);
6108 kfree(tg->se);
6109}
6110
6111int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6112{
6113 struct cfs_rq *cfs_rq;
6114 struct sched_entity *se;
6115 int i;
6116
6117 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
6118 if (!tg->cfs_rq)
6119 goto err;
6120 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
6121 if (!tg->se)
6122 goto err;
6123
6124 tg->shares = NICE_0_LOAD;
6125
6126 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6127
6128 for_each_possible_cpu(i) {
6129 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6130 GFP_KERNEL, cpu_to_node(i));
6131 if (!cfs_rq)
6132 goto err;
6133
6134 se = kzalloc_node(sizeof(struct sched_entity),
6135 GFP_KERNEL, cpu_to_node(i));
6136 if (!se)
6137 goto err_free_rq;
6138
6139 init_cfs_rq(cfs_rq);
6140 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6141 }
6142
6143 return 1;
6144
6145err_free_rq:
6146 kfree(cfs_rq);
6147err:
6148 return 0;
6149}
6150
6151void unregister_fair_sched_group(struct task_group *tg, int cpu)
6152{
6153 struct rq *rq = cpu_rq(cpu);
6154 unsigned long flags;
6155
6156 /*
6157 * Only empty task groups can be destroyed; so we can speculatively
6158 * check on_list without danger of it being re-added.
6159 */
6160 if (!tg->cfs_rq[cpu]->on_list)
6161 return;
6162
6163 raw_spin_lock_irqsave(&rq->lock, flags);
6164 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6165 raw_spin_unlock_irqrestore(&rq->lock, flags);
6166}
6167
6168void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6169 struct sched_entity *se, int cpu,
6170 struct sched_entity *parent)
6171{
6172 struct rq *rq = cpu_rq(cpu);
6173
6174 cfs_rq->tg = tg;
6175 cfs_rq->rq = rq;
029632fb
PZ
6176 init_cfs_rq_runtime(cfs_rq);
6177
6178 tg->cfs_rq[cpu] = cfs_rq;
6179 tg->se[cpu] = se;
6180
6181 /* se could be NULL for root_task_group */
6182 if (!se)
6183 return;
6184
6185 if (!parent)
6186 se->cfs_rq = &rq->cfs;
6187 else
6188 se->cfs_rq = parent->my_q;
6189
6190 se->my_q = cfs_rq;
6191 update_load_set(&se->load, 0);
6192 se->parent = parent;
6193}
6194
6195static DEFINE_MUTEX(shares_mutex);
6196
6197int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6198{
6199 int i;
6200 unsigned long flags;
6201
6202 /*
6203 * We can't change the weight of the root cgroup.
6204 */
6205 if (!tg->se[0])
6206 return -EINVAL;
6207
6208 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6209
6210 mutex_lock(&shares_mutex);
6211 if (tg->shares == shares)
6212 goto done;
6213
6214 tg->shares = shares;
6215 for_each_possible_cpu(i) {
6216 struct rq *rq = cpu_rq(i);
6217 struct sched_entity *se;
6218
6219 se = tg->se[i];
6220 /* Propagate contribution to hierarchy */
6221 raw_spin_lock_irqsave(&rq->lock, flags);
71b1da46
FW
6222
6223 /* Possible calls to update_curr() need rq clock */
6224 update_rq_clock(rq);
17bc14b7 6225 for_each_sched_entity(se)
029632fb
PZ
6226 update_cfs_shares(group_cfs_rq(se));
6227 raw_spin_unlock_irqrestore(&rq->lock, flags);
6228 }
6229
6230done:
6231 mutex_unlock(&shares_mutex);
6232 return 0;
6233}
6234#else /* CONFIG_FAIR_GROUP_SCHED */
6235
6236void free_fair_sched_group(struct task_group *tg) { }
6237
6238int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6239{
6240 return 1;
6241}
6242
6243void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6244
6245#endif /* CONFIG_FAIR_GROUP_SCHED */
6246
810b3817 6247
6d686f45 6248static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
6249{
6250 struct sched_entity *se = &task->se;
0d721cea
PW
6251 unsigned int rr_interval = 0;
6252
6253 /*
6254 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6255 * idle runqueue:
6256 */
0d721cea 6257 if (rq->cfs.load.weight)
a59f4e07 6258 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
0d721cea
PW
6259
6260 return rr_interval;
6261}
6262
bf0f6f24
IM
6263/*
6264 * All the scheduling class methods:
6265 */
029632fb 6266const struct sched_class fair_sched_class = {
5522d5d5 6267 .next = &idle_sched_class,
bf0f6f24
IM
6268 .enqueue_task = enqueue_task_fair,
6269 .dequeue_task = dequeue_task_fair,
6270 .yield_task = yield_task_fair,
d95f4122 6271 .yield_to_task = yield_to_task_fair,
bf0f6f24 6272
2e09bf55 6273 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
6274
6275 .pick_next_task = pick_next_task_fair,
6276 .put_prev_task = put_prev_task_fair,
6277
681f3e68 6278#ifdef CONFIG_SMP
4ce72a2c 6279 .select_task_rq = select_task_rq_fair,
0a74bef8 6280 .migrate_task_rq = migrate_task_rq_fair,
141965c7 6281
0bcdcf28
CE
6282 .rq_online = rq_online_fair,
6283 .rq_offline = rq_offline_fair,
88ec22d3
PZ
6284
6285 .task_waking = task_waking_fair,
681f3e68 6286#endif
bf0f6f24 6287
83b699ed 6288 .set_curr_task = set_curr_task_fair,
bf0f6f24 6289 .task_tick = task_tick_fair,
cd29fe6f 6290 .task_fork = task_fork_fair,
cb469845
SR
6291
6292 .prio_changed = prio_changed_fair,
da7a735e 6293 .switched_from = switched_from_fair,
cb469845 6294 .switched_to = switched_to_fair,
810b3817 6295
0d721cea
PW
6296 .get_rr_interval = get_rr_interval_fair,
6297
810b3817 6298#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 6299 .task_move_group = task_move_group_fair,
810b3817 6300#endif
bf0f6f24
IM
6301};
6302
6303#ifdef CONFIG_SCHED_DEBUG
029632fb 6304void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 6305{
bf0f6f24
IM
6306 struct cfs_rq *cfs_rq;
6307
5973e5b9 6308 rcu_read_lock();
c3b64f1e 6309 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 6310 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 6311 rcu_read_unlock();
bf0f6f24
IM
6312}
6313#endif
029632fb
PZ
6314
6315__init void init_sched_fair_class(void)
6316{
6317#ifdef CONFIG_SMP
6318 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6319
3451d024 6320#ifdef CONFIG_NO_HZ_COMMON
554cecaf 6321 nohz.next_balance = jiffies;
029632fb 6322 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
71325960 6323 cpu_notifier(sched_ilb_notifier, 0);
029632fb
PZ
6324#endif
6325#endif /* SMP */
6326
6327}