sched: Add an rq migration call-back to sched_class
[linux-2.6-block.git] / kernel / sched / fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
9745512c 23#include <linux/latencytop.h>
1983a922 24#include <linux/sched.h>
3436ae12 25#include <linux/cpumask.h>
029632fb
PZ
26#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
29
30#include <trace/events/sched.h>
31
32#include "sched.h"
9745512c 33
bf0f6f24 34/*
21805085 35 * Targeted preemption latency for CPU-bound tasks:
864616ee 36 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 37 *
21805085 38 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
39 * 'timeslice length' - timeslices in CFS are of variable length
40 * and have no persistent notion like in traditional, time-slice
41 * based scheduling concepts.
bf0f6f24 42 *
d274a4ce
IM
43 * (to see the precise effective timeslice length of your workload,
44 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 45 */
21406928
MG
46unsigned int sysctl_sched_latency = 6000000ULL;
47unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 48
1983a922
CE
49/*
50 * The initial- and re-scaling of tunables is configurable
51 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
52 *
53 * Options are:
54 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
55 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
56 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
57 */
58enum sched_tunable_scaling sysctl_sched_tunable_scaling
59 = SCHED_TUNABLESCALING_LOG;
60
2bd8e6d4 61/*
b2be5e96 62 * Minimal preemption granularity for CPU-bound tasks:
864616ee 63 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 64 */
0bf377bb
IM
65unsigned int sysctl_sched_min_granularity = 750000ULL;
66unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
21805085
PZ
67
68/*
b2be5e96
PZ
69 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
70 */
0bf377bb 71static unsigned int sched_nr_latency = 8;
b2be5e96
PZ
72
73/*
2bba22c5 74 * After fork, child runs first. If set to 0 (default) then
b2be5e96 75 * parent will (try to) run first.
21805085 76 */
2bba22c5 77unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 78
bf0f6f24
IM
79/*
80 * SCHED_OTHER wake-up granularity.
172e082a 81 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
82 *
83 * This option delays the preemption effects of decoupled workloads
84 * and reduces their over-scheduling. Synchronous workloads will still
85 * have immediate wakeup/sleep latencies.
86 */
172e082a 87unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
0bcdcf28 88unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 89
da84d961
IM
90const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
91
a7a4f8a7
PT
92/*
93 * The exponential sliding window over which load is averaged for shares
94 * distribution.
95 * (default: 10msec)
96 */
97unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
98
ec12cb7f
PT
99#ifdef CONFIG_CFS_BANDWIDTH
100/*
101 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
102 * each time a cfs_rq requests quota.
103 *
104 * Note: in the case that the slice exceeds the runtime remaining (either due
105 * to consumption or the quota being specified to be smaller than the slice)
106 * we will always only issue the remaining available time.
107 *
108 * default: 5 msec, units: microseconds
109 */
110unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
111#endif
112
029632fb
PZ
113/*
114 * Increase the granularity value when there are more CPUs,
115 * because with more CPUs the 'effective latency' as visible
116 * to users decreases. But the relationship is not linear,
117 * so pick a second-best guess by going with the log2 of the
118 * number of CPUs.
119 *
120 * This idea comes from the SD scheduler of Con Kolivas:
121 */
122static int get_update_sysctl_factor(void)
123{
124 unsigned int cpus = min_t(int, num_online_cpus(), 8);
125 unsigned int factor;
126
127 switch (sysctl_sched_tunable_scaling) {
128 case SCHED_TUNABLESCALING_NONE:
129 factor = 1;
130 break;
131 case SCHED_TUNABLESCALING_LINEAR:
132 factor = cpus;
133 break;
134 case SCHED_TUNABLESCALING_LOG:
135 default:
136 factor = 1 + ilog2(cpus);
137 break;
138 }
139
140 return factor;
141}
142
143static void update_sysctl(void)
144{
145 unsigned int factor = get_update_sysctl_factor();
146
147#define SET_SYSCTL(name) \
148 (sysctl_##name = (factor) * normalized_sysctl_##name)
149 SET_SYSCTL(sched_min_granularity);
150 SET_SYSCTL(sched_latency);
151 SET_SYSCTL(sched_wakeup_granularity);
152#undef SET_SYSCTL
153}
154
155void sched_init_granularity(void)
156{
157 update_sysctl();
158}
159
160#if BITS_PER_LONG == 32
161# define WMULT_CONST (~0UL)
162#else
163# define WMULT_CONST (1UL << 32)
164#endif
165
166#define WMULT_SHIFT 32
167
168/*
169 * Shift right and round:
170 */
171#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
172
173/*
174 * delta *= weight / lw
175 */
176static unsigned long
177calc_delta_mine(unsigned long delta_exec, unsigned long weight,
178 struct load_weight *lw)
179{
180 u64 tmp;
181
182 /*
183 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
184 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
185 * 2^SCHED_LOAD_RESOLUTION.
186 */
187 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
188 tmp = (u64)delta_exec * scale_load_down(weight);
189 else
190 tmp = (u64)delta_exec;
191
192 if (!lw->inv_weight) {
193 unsigned long w = scale_load_down(lw->weight);
194
195 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
196 lw->inv_weight = 1;
197 else if (unlikely(!w))
198 lw->inv_weight = WMULT_CONST;
199 else
200 lw->inv_weight = WMULT_CONST / w;
201 }
202
203 /*
204 * Check whether we'd overflow the 64-bit multiplication:
205 */
206 if (unlikely(tmp > WMULT_CONST))
207 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
208 WMULT_SHIFT/2);
209 else
210 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
211
212 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
213}
214
215
216const struct sched_class fair_sched_class;
a4c2f00f 217
bf0f6f24
IM
218/**************************************************************
219 * CFS operations on generic schedulable entities:
220 */
221
62160e3f 222#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 223
62160e3f 224/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
225static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
226{
62160e3f 227 return cfs_rq->rq;
bf0f6f24
IM
228}
229
62160e3f
IM
230/* An entity is a task if it doesn't "own" a runqueue */
231#define entity_is_task(se) (!se->my_q)
bf0f6f24 232
8f48894f
PZ
233static inline struct task_struct *task_of(struct sched_entity *se)
234{
235#ifdef CONFIG_SCHED_DEBUG
236 WARN_ON_ONCE(!entity_is_task(se));
237#endif
238 return container_of(se, struct task_struct, se);
239}
240
b758149c
PZ
241/* Walk up scheduling entities hierarchy */
242#define for_each_sched_entity(se) \
243 for (; se; se = se->parent)
244
245static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
246{
247 return p->se.cfs_rq;
248}
249
250/* runqueue on which this entity is (to be) queued */
251static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
252{
253 return se->cfs_rq;
254}
255
256/* runqueue "owned" by this group */
257static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
258{
259 return grp->my_q;
260}
261
9ee474f5
PT
262static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq);
263
3d4b47b4
PZ
264static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
265{
266 if (!cfs_rq->on_list) {
67e86250
PT
267 /*
268 * Ensure we either appear before our parent (if already
269 * enqueued) or force our parent to appear after us when it is
270 * enqueued. The fact that we always enqueue bottom-up
271 * reduces this to two cases.
272 */
273 if (cfs_rq->tg->parent &&
274 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
275 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
276 &rq_of(cfs_rq)->leaf_cfs_rq_list);
277 } else {
278 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
3d4b47b4 279 &rq_of(cfs_rq)->leaf_cfs_rq_list);
67e86250 280 }
3d4b47b4
PZ
281
282 cfs_rq->on_list = 1;
9ee474f5
PT
283 /* We should have no load, but we need to update last_decay. */
284 update_cfs_rq_blocked_load(cfs_rq);
3d4b47b4
PZ
285 }
286}
287
288static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
289{
290 if (cfs_rq->on_list) {
291 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
292 cfs_rq->on_list = 0;
293 }
294}
295
b758149c
PZ
296/* Iterate thr' all leaf cfs_rq's on a runqueue */
297#define for_each_leaf_cfs_rq(rq, cfs_rq) \
298 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
299
300/* Do the two (enqueued) entities belong to the same group ? */
301static inline int
302is_same_group(struct sched_entity *se, struct sched_entity *pse)
303{
304 if (se->cfs_rq == pse->cfs_rq)
305 return 1;
306
307 return 0;
308}
309
310static inline struct sched_entity *parent_entity(struct sched_entity *se)
311{
312 return se->parent;
313}
314
464b7527
PZ
315/* return depth at which a sched entity is present in the hierarchy */
316static inline int depth_se(struct sched_entity *se)
317{
318 int depth = 0;
319
320 for_each_sched_entity(se)
321 depth++;
322
323 return depth;
324}
325
326static void
327find_matching_se(struct sched_entity **se, struct sched_entity **pse)
328{
329 int se_depth, pse_depth;
330
331 /*
332 * preemption test can be made between sibling entities who are in the
333 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
334 * both tasks until we find their ancestors who are siblings of common
335 * parent.
336 */
337
338 /* First walk up until both entities are at same depth */
339 se_depth = depth_se(*se);
340 pse_depth = depth_se(*pse);
341
342 while (se_depth > pse_depth) {
343 se_depth--;
344 *se = parent_entity(*se);
345 }
346
347 while (pse_depth > se_depth) {
348 pse_depth--;
349 *pse = parent_entity(*pse);
350 }
351
352 while (!is_same_group(*se, *pse)) {
353 *se = parent_entity(*se);
354 *pse = parent_entity(*pse);
355 }
356}
357
8f48894f
PZ
358#else /* !CONFIG_FAIR_GROUP_SCHED */
359
360static inline struct task_struct *task_of(struct sched_entity *se)
361{
362 return container_of(se, struct task_struct, se);
363}
bf0f6f24 364
62160e3f
IM
365static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
366{
367 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
368}
369
370#define entity_is_task(se) 1
371
b758149c
PZ
372#define for_each_sched_entity(se) \
373 for (; se; se = NULL)
bf0f6f24 374
b758149c 375static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 376{
b758149c 377 return &task_rq(p)->cfs;
bf0f6f24
IM
378}
379
b758149c
PZ
380static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
381{
382 struct task_struct *p = task_of(se);
383 struct rq *rq = task_rq(p);
384
385 return &rq->cfs;
386}
387
388/* runqueue "owned" by this group */
389static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
390{
391 return NULL;
392}
393
3d4b47b4
PZ
394static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
395{
396}
397
398static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
399{
400}
401
b758149c
PZ
402#define for_each_leaf_cfs_rq(rq, cfs_rq) \
403 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
404
405static inline int
406is_same_group(struct sched_entity *se, struct sched_entity *pse)
407{
408 return 1;
409}
410
411static inline struct sched_entity *parent_entity(struct sched_entity *se)
412{
413 return NULL;
414}
415
464b7527
PZ
416static inline void
417find_matching_se(struct sched_entity **se, struct sched_entity **pse)
418{
419}
420
b758149c
PZ
421#endif /* CONFIG_FAIR_GROUP_SCHED */
422
6c16a6dc
PZ
423static __always_inline
424void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
bf0f6f24
IM
425
426/**************************************************************
427 * Scheduling class tree data structure manipulation methods:
428 */
429
0702e3eb 430static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
02e0431a 431{
368059a9
PZ
432 s64 delta = (s64)(vruntime - min_vruntime);
433 if (delta > 0)
02e0431a
PZ
434 min_vruntime = vruntime;
435
436 return min_vruntime;
437}
438
0702e3eb 439static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
440{
441 s64 delta = (s64)(vruntime - min_vruntime);
442 if (delta < 0)
443 min_vruntime = vruntime;
444
445 return min_vruntime;
446}
447
54fdc581
FC
448static inline int entity_before(struct sched_entity *a,
449 struct sched_entity *b)
450{
451 return (s64)(a->vruntime - b->vruntime) < 0;
452}
453
1af5f730
PZ
454static void update_min_vruntime(struct cfs_rq *cfs_rq)
455{
456 u64 vruntime = cfs_rq->min_vruntime;
457
458 if (cfs_rq->curr)
459 vruntime = cfs_rq->curr->vruntime;
460
461 if (cfs_rq->rb_leftmost) {
462 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
463 struct sched_entity,
464 run_node);
465
e17036da 466 if (!cfs_rq->curr)
1af5f730
PZ
467 vruntime = se->vruntime;
468 else
469 vruntime = min_vruntime(vruntime, se->vruntime);
470 }
471
472 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
3fe1698b
PZ
473#ifndef CONFIG_64BIT
474 smp_wmb();
475 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
476#endif
1af5f730
PZ
477}
478
bf0f6f24
IM
479/*
480 * Enqueue an entity into the rb-tree:
481 */
0702e3eb 482static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
483{
484 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
485 struct rb_node *parent = NULL;
486 struct sched_entity *entry;
bf0f6f24
IM
487 int leftmost = 1;
488
489 /*
490 * Find the right place in the rbtree:
491 */
492 while (*link) {
493 parent = *link;
494 entry = rb_entry(parent, struct sched_entity, run_node);
495 /*
496 * We dont care about collisions. Nodes with
497 * the same key stay together.
498 */
2bd2d6f2 499 if (entity_before(se, entry)) {
bf0f6f24
IM
500 link = &parent->rb_left;
501 } else {
502 link = &parent->rb_right;
503 leftmost = 0;
504 }
505 }
506
507 /*
508 * Maintain a cache of leftmost tree entries (it is frequently
509 * used):
510 */
1af5f730 511 if (leftmost)
57cb499d 512 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
513
514 rb_link_node(&se->run_node, parent, link);
515 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
516}
517
0702e3eb 518static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 519{
3fe69747
PZ
520 if (cfs_rq->rb_leftmost == &se->run_node) {
521 struct rb_node *next_node;
3fe69747
PZ
522
523 next_node = rb_next(&se->run_node);
524 cfs_rq->rb_leftmost = next_node;
3fe69747 525 }
e9acbff6 526
bf0f6f24 527 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
528}
529
029632fb 530struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
bf0f6f24 531{
f4b6755f
PZ
532 struct rb_node *left = cfs_rq->rb_leftmost;
533
534 if (!left)
535 return NULL;
536
537 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
538}
539
ac53db59
RR
540static struct sched_entity *__pick_next_entity(struct sched_entity *se)
541{
542 struct rb_node *next = rb_next(&se->run_node);
543
544 if (!next)
545 return NULL;
546
547 return rb_entry(next, struct sched_entity, run_node);
548}
549
550#ifdef CONFIG_SCHED_DEBUG
029632fb 551struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 552{
7eee3e67 553 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 554
70eee74b
BS
555 if (!last)
556 return NULL;
7eee3e67
IM
557
558 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
559}
560
bf0f6f24
IM
561/**************************************************************
562 * Scheduling class statistics methods:
563 */
564
acb4a848 565int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 566 void __user *buffer, size_t *lenp,
b2be5e96
PZ
567 loff_t *ppos)
568{
8d65af78 569 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
acb4a848 570 int factor = get_update_sysctl_factor();
b2be5e96
PZ
571
572 if (ret || !write)
573 return ret;
574
575 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
576 sysctl_sched_min_granularity);
577
acb4a848
CE
578#define WRT_SYSCTL(name) \
579 (normalized_sysctl_##name = sysctl_##name / (factor))
580 WRT_SYSCTL(sched_min_granularity);
581 WRT_SYSCTL(sched_latency);
582 WRT_SYSCTL(sched_wakeup_granularity);
acb4a848
CE
583#undef WRT_SYSCTL
584
b2be5e96
PZ
585 return 0;
586}
587#endif
647e7cac 588
a7be37ac 589/*
f9c0b095 590 * delta /= w
a7be37ac
PZ
591 */
592static inline unsigned long
593calc_delta_fair(unsigned long delta, struct sched_entity *se)
594{
f9c0b095
PZ
595 if (unlikely(se->load.weight != NICE_0_LOAD))
596 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
597
598 return delta;
599}
600
647e7cac
IM
601/*
602 * The idea is to set a period in which each task runs once.
603 *
532b1858 604 * When there are too many tasks (sched_nr_latency) we have to stretch
647e7cac
IM
605 * this period because otherwise the slices get too small.
606 *
607 * p = (nr <= nl) ? l : l*nr/nl
608 */
4d78e7b6
PZ
609static u64 __sched_period(unsigned long nr_running)
610{
611 u64 period = sysctl_sched_latency;
b2be5e96 612 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
613
614 if (unlikely(nr_running > nr_latency)) {
4bf0b771 615 period = sysctl_sched_min_granularity;
4d78e7b6 616 period *= nr_running;
4d78e7b6
PZ
617 }
618
619 return period;
620}
621
647e7cac
IM
622/*
623 * We calculate the wall-time slice from the period by taking a part
624 * proportional to the weight.
625 *
f9c0b095 626 * s = p*P[w/rw]
647e7cac 627 */
6d0f0ebd 628static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 629{
0a582440 630 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 631
0a582440 632 for_each_sched_entity(se) {
6272d68c 633 struct load_weight *load;
3104bf03 634 struct load_weight lw;
6272d68c
LM
635
636 cfs_rq = cfs_rq_of(se);
637 load = &cfs_rq->load;
f9c0b095 638
0a582440 639 if (unlikely(!se->on_rq)) {
3104bf03 640 lw = cfs_rq->load;
0a582440
MG
641
642 update_load_add(&lw, se->load.weight);
643 load = &lw;
644 }
645 slice = calc_delta_mine(slice, se->load.weight, load);
646 }
647 return slice;
bf0f6f24
IM
648}
649
647e7cac 650/*
ac884dec 651 * We calculate the vruntime slice of a to be inserted task
647e7cac 652 *
f9c0b095 653 * vs = s/w
647e7cac 654 */
f9c0b095 655static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 656{
f9c0b095 657 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
658}
659
d6b55918 660static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
6d5ab293 661static void update_cfs_shares(struct cfs_rq *cfs_rq);
3b3d190e 662
bf0f6f24
IM
663/*
664 * Update the current task's runtime statistics. Skip current tasks that
665 * are not in our scheduling class.
666 */
667static inline void
8ebc91d9
IM
668__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
669 unsigned long delta_exec)
bf0f6f24 670{
bbdba7c0 671 unsigned long delta_exec_weighted;
bf0f6f24 672
41acab88
LDM
673 schedstat_set(curr->statistics.exec_max,
674 max((u64)delta_exec, curr->statistics.exec_max));
bf0f6f24
IM
675
676 curr->sum_exec_runtime += delta_exec;
7a62eabc 677 schedstat_add(cfs_rq, exec_clock, delta_exec);
a7be37ac 678 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
88ec22d3 679
e9acbff6 680 curr->vruntime += delta_exec_weighted;
1af5f730 681 update_min_vruntime(cfs_rq);
3b3d190e 682
70caf8a6 683#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
3b3d190e 684 cfs_rq->load_unacc_exec_time += delta_exec;
3b3d190e 685#endif
bf0f6f24
IM
686}
687
b7cc0896 688static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 689{
429d43bc 690 struct sched_entity *curr = cfs_rq->curr;
305e6835 691 u64 now = rq_of(cfs_rq)->clock_task;
bf0f6f24
IM
692 unsigned long delta_exec;
693
694 if (unlikely(!curr))
695 return;
696
697 /*
698 * Get the amount of time the current task was running
699 * since the last time we changed load (this cannot
700 * overflow on 32 bits):
701 */
8ebc91d9 702 delta_exec = (unsigned long)(now - curr->exec_start);
34f28ecd
PZ
703 if (!delta_exec)
704 return;
bf0f6f24 705
8ebc91d9
IM
706 __update_curr(cfs_rq, curr, delta_exec);
707 curr->exec_start = now;
d842de87
SV
708
709 if (entity_is_task(curr)) {
710 struct task_struct *curtask = task_of(curr);
711
f977bb49 712 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d842de87 713 cpuacct_charge(curtask, delta_exec);
f06febc9 714 account_group_exec_runtime(curtask, delta_exec);
d842de87 715 }
ec12cb7f
PT
716
717 account_cfs_rq_runtime(cfs_rq, delta_exec);
bf0f6f24
IM
718}
719
720static inline void
5870db5b 721update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 722{
41acab88 723 schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
bf0f6f24
IM
724}
725
bf0f6f24
IM
726/*
727 * Task is being enqueued - update stats:
728 */
d2417e5a 729static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 730{
bf0f6f24
IM
731 /*
732 * Are we enqueueing a waiting task? (for current tasks
733 * a dequeue/enqueue event is a NOP)
734 */
429d43bc 735 if (se != cfs_rq->curr)
5870db5b 736 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
737}
738
bf0f6f24 739static void
9ef0a961 740update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 741{
41acab88
LDM
742 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
743 rq_of(cfs_rq)->clock - se->statistics.wait_start));
744 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
745 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
746 rq_of(cfs_rq)->clock - se->statistics.wait_start);
768d0c27
PZ
747#ifdef CONFIG_SCHEDSTATS
748 if (entity_is_task(se)) {
749 trace_sched_stat_wait(task_of(se),
41acab88 750 rq_of(cfs_rq)->clock - se->statistics.wait_start);
768d0c27
PZ
751 }
752#endif
41acab88 753 schedstat_set(se->statistics.wait_start, 0);
bf0f6f24
IM
754}
755
756static inline void
19b6a2e3 757update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 758{
bf0f6f24
IM
759 /*
760 * Mark the end of the wait period if dequeueing a
761 * waiting task:
762 */
429d43bc 763 if (se != cfs_rq->curr)
9ef0a961 764 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
765}
766
767/*
768 * We are picking a new current task - update its stats:
769 */
770static inline void
79303e9e 771update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
772{
773 /*
774 * We are starting a new run period:
775 */
305e6835 776 se->exec_start = rq_of(cfs_rq)->clock_task;
bf0f6f24
IM
777}
778
bf0f6f24
IM
779/**************************************************
780 * Scheduling class queueing methods:
781 */
782
30cfdcfc
DA
783static void
784account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
785{
786 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6 787 if (!parent_entity(se))
029632fb 788 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
367456c7
PZ
789#ifdef CONFIG_SMP
790 if (entity_is_task(se))
eb95308e 791 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
367456c7 792#endif
30cfdcfc 793 cfs_rq->nr_running++;
30cfdcfc
DA
794}
795
796static void
797account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
798{
799 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6 800 if (!parent_entity(se))
029632fb 801 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
367456c7 802 if (entity_is_task(se))
b87f1724 803 list_del_init(&se->group_node);
30cfdcfc 804 cfs_rq->nr_running--;
30cfdcfc
DA
805}
806
3ff6dcac 807#ifdef CONFIG_FAIR_GROUP_SCHED
64660c86
PT
808/* we need this in update_cfs_load and load-balance functions below */
809static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3ff6dcac 810# ifdef CONFIG_SMP
d6b55918
PT
811static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
812 int global_update)
813{
814 struct task_group *tg = cfs_rq->tg;
815 long load_avg;
816
817 load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
818 load_avg -= cfs_rq->load_contribution;
819
820 if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
821 atomic_add(load_avg, &tg->load_weight);
822 cfs_rq->load_contribution += load_avg;
823 }
824}
825
826static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
2069dd75 827{
a7a4f8a7 828 u64 period = sysctl_sched_shares_window;
2069dd75 829 u64 now, delta;
e33078ba 830 unsigned long load = cfs_rq->load.weight;
2069dd75 831
64660c86 832 if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq))
2069dd75
PZ
833 return;
834
05ca62c6 835 now = rq_of(cfs_rq)->clock_task;
2069dd75
PZ
836 delta = now - cfs_rq->load_stamp;
837
e33078ba
PT
838 /* truncate load history at 4 idle periods */
839 if (cfs_rq->load_stamp > cfs_rq->load_last &&
840 now - cfs_rq->load_last > 4 * period) {
841 cfs_rq->load_period = 0;
842 cfs_rq->load_avg = 0;
f07333bf 843 delta = period - 1;
e33078ba
PT
844 }
845
2069dd75 846 cfs_rq->load_stamp = now;
3b3d190e 847 cfs_rq->load_unacc_exec_time = 0;
2069dd75 848 cfs_rq->load_period += delta;
e33078ba
PT
849 if (load) {
850 cfs_rq->load_last = now;
851 cfs_rq->load_avg += delta * load;
852 }
2069dd75 853
d6b55918
PT
854 /* consider updating load contribution on each fold or truncate */
855 if (global_update || cfs_rq->load_period > period
856 || !cfs_rq->load_period)
857 update_cfs_rq_load_contribution(cfs_rq, global_update);
858
2069dd75
PZ
859 while (cfs_rq->load_period > period) {
860 /*
861 * Inline assembly required to prevent the compiler
862 * optimising this loop into a divmod call.
863 * See __iter_div_u64_rem() for another example of this.
864 */
865 asm("" : "+rm" (cfs_rq->load_period));
866 cfs_rq->load_period /= 2;
867 cfs_rq->load_avg /= 2;
868 }
3d4b47b4 869
e33078ba
PT
870 if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
871 list_del_leaf_cfs_rq(cfs_rq);
2069dd75
PZ
872}
873
cf5f0acf
PZ
874static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
875{
876 long tg_weight;
877
878 /*
879 * Use this CPU's actual weight instead of the last load_contribution
880 * to gain a more accurate current total weight. See
881 * update_cfs_rq_load_contribution().
882 */
883 tg_weight = atomic_read(&tg->load_weight);
884 tg_weight -= cfs_rq->load_contribution;
885 tg_weight += cfs_rq->load.weight;
886
887 return tg_weight;
888}
889
6d5ab293 890static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac 891{
cf5f0acf 892 long tg_weight, load, shares;
3ff6dcac 893
cf5f0acf 894 tg_weight = calc_tg_weight(tg, cfs_rq);
6d5ab293 895 load = cfs_rq->load.weight;
3ff6dcac 896
3ff6dcac 897 shares = (tg->shares * load);
cf5f0acf
PZ
898 if (tg_weight)
899 shares /= tg_weight;
3ff6dcac
YZ
900
901 if (shares < MIN_SHARES)
902 shares = MIN_SHARES;
903 if (shares > tg->shares)
904 shares = tg->shares;
905
906 return shares;
907}
908
909static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
910{
911 if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
912 update_cfs_load(cfs_rq, 0);
6d5ab293 913 update_cfs_shares(cfs_rq);
3ff6dcac
YZ
914 }
915}
916# else /* CONFIG_SMP */
917static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
918{
919}
920
6d5ab293 921static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac
YZ
922{
923 return tg->shares;
924}
925
926static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
927{
928}
929# endif /* CONFIG_SMP */
2069dd75
PZ
930static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
931 unsigned long weight)
932{
19e5eebb
PT
933 if (se->on_rq) {
934 /* commit outstanding execution time */
935 if (cfs_rq->curr == se)
936 update_curr(cfs_rq);
2069dd75 937 account_entity_dequeue(cfs_rq, se);
19e5eebb 938 }
2069dd75
PZ
939
940 update_load_set(&se->load, weight);
941
942 if (se->on_rq)
943 account_entity_enqueue(cfs_rq, se);
944}
945
6d5ab293 946static void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
947{
948 struct task_group *tg;
949 struct sched_entity *se;
3ff6dcac 950 long shares;
2069dd75 951
2069dd75
PZ
952 tg = cfs_rq->tg;
953 se = tg->se[cpu_of(rq_of(cfs_rq))];
64660c86 954 if (!se || throttled_hierarchy(cfs_rq))
2069dd75 955 return;
3ff6dcac
YZ
956#ifndef CONFIG_SMP
957 if (likely(se->load.weight == tg->shares))
958 return;
959#endif
6d5ab293 960 shares = calc_cfs_shares(cfs_rq, tg);
2069dd75
PZ
961
962 reweight_entity(cfs_rq_of(se), se, shares);
963}
964#else /* CONFIG_FAIR_GROUP_SCHED */
d6b55918 965static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
2069dd75
PZ
966{
967}
968
6d5ab293 969static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
970{
971}
43365bd7
PT
972
973static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
974{
975}
2069dd75
PZ
976#endif /* CONFIG_FAIR_GROUP_SCHED */
977
9d85f21c
PT
978#ifdef CONFIG_SMP
979/*
980 * Approximate:
981 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
982 */
983static __always_inline u64 decay_load(u64 val, u64 n)
984{
985 for (; n && val; n--) {
986 val *= 4008;
987 val >>= 12;
988 }
989
990 return val;
991}
992
993/*
994 * We can represent the historical contribution to runnable average as the
995 * coefficients of a geometric series. To do this we sub-divide our runnable
996 * history into segments of approximately 1ms (1024us); label the segment that
997 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
998 *
999 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1000 * p0 p1 p2
1001 * (now) (~1ms ago) (~2ms ago)
1002 *
1003 * Let u_i denote the fraction of p_i that the entity was runnable.
1004 *
1005 * We then designate the fractions u_i as our co-efficients, yielding the
1006 * following representation of historical load:
1007 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1008 *
1009 * We choose y based on the with of a reasonably scheduling period, fixing:
1010 * y^32 = 0.5
1011 *
1012 * This means that the contribution to load ~32ms ago (u_32) will be weighted
1013 * approximately half as much as the contribution to load within the last ms
1014 * (u_0).
1015 *
1016 * When a period "rolls over" and we have new u_0`, multiplying the previous
1017 * sum again by y is sufficient to update:
1018 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1019 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1020 */
1021static __always_inline int __update_entity_runnable_avg(u64 now,
1022 struct sched_avg *sa,
1023 int runnable)
1024{
1025 u64 delta;
1026 int delta_w, decayed = 0;
1027
1028 delta = now - sa->last_runnable_update;
1029 /*
1030 * This should only happen when time goes backwards, which it
1031 * unfortunately does during sched clock init when we swap over to TSC.
1032 */
1033 if ((s64)delta < 0) {
1034 sa->last_runnable_update = now;
1035 return 0;
1036 }
1037
1038 /*
1039 * Use 1024ns as the unit of measurement since it's a reasonable
1040 * approximation of 1us and fast to compute.
1041 */
1042 delta >>= 10;
1043 if (!delta)
1044 return 0;
1045 sa->last_runnable_update = now;
1046
1047 /* delta_w is the amount already accumulated against our next period */
1048 delta_w = sa->runnable_avg_period % 1024;
1049 if (delta + delta_w >= 1024) {
1050 /* period roll-over */
1051 decayed = 1;
1052
1053 /*
1054 * Now that we know we're crossing a period boundary, figure
1055 * out how much from delta we need to complete the current
1056 * period and accrue it.
1057 */
1058 delta_w = 1024 - delta_w;
1059 BUG_ON(delta_w > delta);
1060 do {
1061 if (runnable)
1062 sa->runnable_avg_sum += delta_w;
1063 sa->runnable_avg_period += delta_w;
1064
1065 /*
1066 * Remainder of delta initiates a new period, roll over
1067 * the previous.
1068 */
1069 sa->runnable_avg_sum =
1070 decay_load(sa->runnable_avg_sum, 1);
1071 sa->runnable_avg_period =
1072 decay_load(sa->runnable_avg_period, 1);
1073
1074 delta -= delta_w;
1075 /* New period is empty */
1076 delta_w = 1024;
1077 } while (delta >= 1024);
1078 }
1079
1080 /* Remainder of delta accrued against u_0` */
1081 if (runnable)
1082 sa->runnable_avg_sum += delta;
1083 sa->runnable_avg_period += delta;
1084
1085 return decayed;
1086}
1087
9ee474f5
PT
1088/* Synchronize an entity's decay with its parenting cfs_rq.*/
1089static inline void __synchronize_entity_decay(struct sched_entity *se)
1090{
1091 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1092 u64 decays = atomic64_read(&cfs_rq->decay_counter);
1093
1094 decays -= se->avg.decay_count;
1095 if (!decays)
1096 return;
1097
1098 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1099 se->avg.decay_count = 0;
1100}
1101
2dac754e
PT
1102/* Compute the current contribution to load_avg by se, return any delta */
1103static long __update_entity_load_avg_contrib(struct sched_entity *se)
1104{
1105 long old_contrib = se->avg.load_avg_contrib;
1106
1107 if (!entity_is_task(se))
1108 return 0;
1109
1110 se->avg.load_avg_contrib = div64_u64(se->avg.runnable_avg_sum *
1111 se->load.weight,
1112 se->avg.runnable_avg_period + 1);
1113
1114 return se->avg.load_avg_contrib - old_contrib;
1115}
1116
9ee474f5
PT
1117static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1118 long load_contrib)
1119{
1120 if (likely(load_contrib < cfs_rq->blocked_load_avg))
1121 cfs_rq->blocked_load_avg -= load_contrib;
1122 else
1123 cfs_rq->blocked_load_avg = 0;
1124}
1125
9d85f21c 1126/* Update a sched_entity's runnable average */
9ee474f5
PT
1127static inline void update_entity_load_avg(struct sched_entity *se,
1128 int update_cfs_rq)
9d85f21c 1129{
2dac754e
PT
1130 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1131 long contrib_delta;
1132
1133 if (!__update_entity_runnable_avg(rq_of(cfs_rq)->clock_task, &se->avg,
1134 se->on_rq))
1135 return;
1136
1137 contrib_delta = __update_entity_load_avg_contrib(se);
9ee474f5
PT
1138
1139 if (!update_cfs_rq)
1140 return;
1141
2dac754e
PT
1142 if (se->on_rq)
1143 cfs_rq->runnable_load_avg += contrib_delta;
9ee474f5
PT
1144 else
1145 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1146}
1147
1148/*
1149 * Decay the load contributed by all blocked children and account this so that
1150 * their contribution may appropriately discounted when they wake up.
1151 */
1152static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq)
1153{
1154 u64 now = rq_of(cfs_rq)->clock_task >> 20;
1155 u64 decays;
1156
1157 decays = now - cfs_rq->last_decay;
1158 if (!decays)
1159 return;
1160
1161 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1162 decays);
1163 atomic64_add(decays, &cfs_rq->decay_counter);
1164
1165 cfs_rq->last_decay = now;
9d85f21c 1166}
18bf2805
BS
1167
1168static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1169{
1170 __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
1171}
2dac754e
PT
1172
1173/* Add the load generated by se into cfs_rq's child load-average */
1174static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1175 struct sched_entity *se,
1176 int wakeup)
2dac754e 1177{
9ee474f5
PT
1178 /* we track migrations using entity decay_count == 0 */
1179 if (unlikely(!se->avg.decay_count)) {
1180 se->avg.last_runnable_update = rq_of(cfs_rq)->clock_task;
1181 wakeup = 0;
1182 } else {
1183 __synchronize_entity_decay(se);
1184 }
1185
1186 if (wakeup)
1187 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
1188
1189 update_entity_load_avg(se, 0);
2dac754e 1190 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
9ee474f5 1191 update_cfs_rq_blocked_load(cfs_rq);
2dac754e
PT
1192}
1193
9ee474f5
PT
1194/*
1195 * Remove se's load from this cfs_rq child load-average, if the entity is
1196 * transitioning to a blocked state we track its projected decay using
1197 * blocked_load_avg.
1198 */
2dac754e 1199static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1200 struct sched_entity *se,
1201 int sleep)
2dac754e 1202{
9ee474f5
PT
1203 update_entity_load_avg(se, 1);
1204
2dac754e 1205 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
9ee474f5
PT
1206 if (sleep) {
1207 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1208 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1209 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
2dac754e 1210}
9d85f21c 1211#else
9ee474f5
PT
1212static inline void update_entity_load_avg(struct sched_entity *se,
1213 int update_cfs_rq) {}
18bf2805 1214static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2dac754e 1215static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1216 struct sched_entity *se,
1217 int wakeup) {}
2dac754e 1218static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
9ee474f5
PT
1219 struct sched_entity *se,
1220 int sleep) {}
1221static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq) {}
9d85f21c
PT
1222#endif
1223
2396af69 1224static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 1225{
bf0f6f24 1226#ifdef CONFIG_SCHEDSTATS
e414314c
PZ
1227 struct task_struct *tsk = NULL;
1228
1229 if (entity_is_task(se))
1230 tsk = task_of(se);
1231
41acab88
LDM
1232 if (se->statistics.sleep_start) {
1233 u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
bf0f6f24
IM
1234
1235 if ((s64)delta < 0)
1236 delta = 0;
1237
41acab88
LDM
1238 if (unlikely(delta > se->statistics.sleep_max))
1239 se->statistics.sleep_max = delta;
bf0f6f24 1240
8c79a045 1241 se->statistics.sleep_start = 0;
41acab88 1242 se->statistics.sum_sleep_runtime += delta;
9745512c 1243
768d0c27 1244 if (tsk) {
e414314c 1245 account_scheduler_latency(tsk, delta >> 10, 1);
768d0c27
PZ
1246 trace_sched_stat_sleep(tsk, delta);
1247 }
bf0f6f24 1248 }
41acab88
LDM
1249 if (se->statistics.block_start) {
1250 u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
bf0f6f24
IM
1251
1252 if ((s64)delta < 0)
1253 delta = 0;
1254
41acab88
LDM
1255 if (unlikely(delta > se->statistics.block_max))
1256 se->statistics.block_max = delta;
bf0f6f24 1257
8c79a045 1258 se->statistics.block_start = 0;
41acab88 1259 se->statistics.sum_sleep_runtime += delta;
30084fbd 1260
e414314c 1261 if (tsk) {
8f0dfc34 1262 if (tsk->in_iowait) {
41acab88
LDM
1263 se->statistics.iowait_sum += delta;
1264 se->statistics.iowait_count++;
768d0c27 1265 trace_sched_stat_iowait(tsk, delta);
8f0dfc34
AV
1266 }
1267
b781a602
AV
1268 trace_sched_stat_blocked(tsk, delta);
1269
e414314c
PZ
1270 /*
1271 * Blocking time is in units of nanosecs, so shift by
1272 * 20 to get a milliseconds-range estimation of the
1273 * amount of time that the task spent sleeping:
1274 */
1275 if (unlikely(prof_on == SLEEP_PROFILING)) {
1276 profile_hits(SLEEP_PROFILING,
1277 (void *)get_wchan(tsk),
1278 delta >> 20);
1279 }
1280 account_scheduler_latency(tsk, delta >> 10, 0);
30084fbd 1281 }
bf0f6f24
IM
1282 }
1283#endif
1284}
1285
ddc97297
PZ
1286static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1287{
1288#ifdef CONFIG_SCHED_DEBUG
1289 s64 d = se->vruntime - cfs_rq->min_vruntime;
1290
1291 if (d < 0)
1292 d = -d;
1293
1294 if (d > 3*sysctl_sched_latency)
1295 schedstat_inc(cfs_rq, nr_spread_over);
1296#endif
1297}
1298
aeb73b04
PZ
1299static void
1300place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1301{
1af5f730 1302 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 1303
2cb8600e
PZ
1304 /*
1305 * The 'current' period is already promised to the current tasks,
1306 * however the extra weight of the new task will slow them down a
1307 * little, place the new task so that it fits in the slot that
1308 * stays open at the end.
1309 */
94dfb5e7 1310 if (initial && sched_feat(START_DEBIT))
f9c0b095 1311 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 1312
a2e7a7eb 1313 /* sleeps up to a single latency don't count. */
5ca9880c 1314 if (!initial) {
a2e7a7eb 1315 unsigned long thresh = sysctl_sched_latency;
a7be37ac 1316
a2e7a7eb
MG
1317 /*
1318 * Halve their sleep time's effect, to allow
1319 * for a gentler effect of sleepers:
1320 */
1321 if (sched_feat(GENTLE_FAIR_SLEEPERS))
1322 thresh >>= 1;
51e0304c 1323
a2e7a7eb 1324 vruntime -= thresh;
aeb73b04
PZ
1325 }
1326
b5d9d734
MG
1327 /* ensure we never gain time by being placed backwards. */
1328 vruntime = max_vruntime(se->vruntime, vruntime);
1329
67e9fb2a 1330 se->vruntime = vruntime;
aeb73b04
PZ
1331}
1332
d3d9dc33
PT
1333static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1334
bf0f6f24 1335static void
88ec22d3 1336enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 1337{
88ec22d3
PZ
1338 /*
1339 * Update the normalized vruntime before updating min_vruntime
1340 * through callig update_curr().
1341 */
371fd7e7 1342 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
88ec22d3
PZ
1343 se->vruntime += cfs_rq->min_vruntime;
1344
bf0f6f24 1345 /*
a2a2d680 1346 * Update run-time statistics of the 'current'.
bf0f6f24 1347 */
b7cc0896 1348 update_curr(cfs_rq);
d6b55918 1349 update_cfs_load(cfs_rq, 0);
9ee474f5 1350 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
a992241d 1351 account_entity_enqueue(cfs_rq, se);
6d5ab293 1352 update_cfs_shares(cfs_rq);
bf0f6f24 1353
88ec22d3 1354 if (flags & ENQUEUE_WAKEUP) {
aeb73b04 1355 place_entity(cfs_rq, se, 0);
2396af69 1356 enqueue_sleeper(cfs_rq, se);
e9acbff6 1357 }
bf0f6f24 1358
d2417e5a 1359 update_stats_enqueue(cfs_rq, se);
ddc97297 1360 check_spread(cfs_rq, se);
83b699ed
SV
1361 if (se != cfs_rq->curr)
1362 __enqueue_entity(cfs_rq, se);
2069dd75 1363 se->on_rq = 1;
3d4b47b4 1364
d3d9dc33 1365 if (cfs_rq->nr_running == 1) {
3d4b47b4 1366 list_add_leaf_cfs_rq(cfs_rq);
d3d9dc33
PT
1367 check_enqueue_throttle(cfs_rq);
1368 }
bf0f6f24
IM
1369}
1370
2c13c919 1371static void __clear_buddies_last(struct sched_entity *se)
2002c695 1372{
2c13c919
RR
1373 for_each_sched_entity(se) {
1374 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1375 if (cfs_rq->last == se)
1376 cfs_rq->last = NULL;
1377 else
1378 break;
1379 }
1380}
2002c695 1381
2c13c919
RR
1382static void __clear_buddies_next(struct sched_entity *se)
1383{
1384 for_each_sched_entity(se) {
1385 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1386 if (cfs_rq->next == se)
1387 cfs_rq->next = NULL;
1388 else
1389 break;
1390 }
2002c695
PZ
1391}
1392
ac53db59
RR
1393static void __clear_buddies_skip(struct sched_entity *se)
1394{
1395 for_each_sched_entity(se) {
1396 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1397 if (cfs_rq->skip == se)
1398 cfs_rq->skip = NULL;
1399 else
1400 break;
1401 }
1402}
1403
a571bbea
PZ
1404static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1405{
2c13c919
RR
1406 if (cfs_rq->last == se)
1407 __clear_buddies_last(se);
1408
1409 if (cfs_rq->next == se)
1410 __clear_buddies_next(se);
ac53db59
RR
1411
1412 if (cfs_rq->skip == se)
1413 __clear_buddies_skip(se);
a571bbea
PZ
1414}
1415
6c16a6dc 1416static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d8b4986d 1417
bf0f6f24 1418static void
371fd7e7 1419dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 1420{
a2a2d680
DA
1421 /*
1422 * Update run-time statistics of the 'current'.
1423 */
1424 update_curr(cfs_rq);
9ee474f5 1425 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
a2a2d680 1426
19b6a2e3 1427 update_stats_dequeue(cfs_rq, se);
371fd7e7 1428 if (flags & DEQUEUE_SLEEP) {
67e9fb2a 1429#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
1430 if (entity_is_task(se)) {
1431 struct task_struct *tsk = task_of(se);
1432
1433 if (tsk->state & TASK_INTERRUPTIBLE)
41acab88 1434 se->statistics.sleep_start = rq_of(cfs_rq)->clock;
bf0f6f24 1435 if (tsk->state & TASK_UNINTERRUPTIBLE)
41acab88 1436 se->statistics.block_start = rq_of(cfs_rq)->clock;
bf0f6f24 1437 }
db36cc7d 1438#endif
67e9fb2a
PZ
1439 }
1440
2002c695 1441 clear_buddies(cfs_rq, se);
4793241b 1442
83b699ed 1443 if (se != cfs_rq->curr)
30cfdcfc 1444 __dequeue_entity(cfs_rq, se);
2069dd75 1445 se->on_rq = 0;
d6b55918 1446 update_cfs_load(cfs_rq, 0);
30cfdcfc 1447 account_entity_dequeue(cfs_rq, se);
88ec22d3
PZ
1448
1449 /*
1450 * Normalize the entity after updating the min_vruntime because the
1451 * update can refer to the ->curr item and we need to reflect this
1452 * movement in our normalized position.
1453 */
371fd7e7 1454 if (!(flags & DEQUEUE_SLEEP))
88ec22d3 1455 se->vruntime -= cfs_rq->min_vruntime;
1e876231 1456
d8b4986d
PT
1457 /* return excess runtime on last dequeue */
1458 return_cfs_rq_runtime(cfs_rq);
1459
1e876231
PZ
1460 update_min_vruntime(cfs_rq);
1461 update_cfs_shares(cfs_rq);
bf0f6f24
IM
1462}
1463
1464/*
1465 * Preempt the current task with a newly woken task if needed:
1466 */
7c92e54f 1467static void
2e09bf55 1468check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 1469{
11697830 1470 unsigned long ideal_runtime, delta_exec;
f4cfb33e
WX
1471 struct sched_entity *se;
1472 s64 delta;
11697830 1473
6d0f0ebd 1474 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 1475 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 1476 if (delta_exec > ideal_runtime) {
bf0f6f24 1477 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5
MG
1478 /*
1479 * The current task ran long enough, ensure it doesn't get
1480 * re-elected due to buddy favours.
1481 */
1482 clear_buddies(cfs_rq, curr);
f685ceac
MG
1483 return;
1484 }
1485
1486 /*
1487 * Ensure that a task that missed wakeup preemption by a
1488 * narrow margin doesn't have to wait for a full slice.
1489 * This also mitigates buddy induced latencies under load.
1490 */
f685ceac
MG
1491 if (delta_exec < sysctl_sched_min_granularity)
1492 return;
1493
f4cfb33e
WX
1494 se = __pick_first_entity(cfs_rq);
1495 delta = curr->vruntime - se->vruntime;
f685ceac 1496
f4cfb33e
WX
1497 if (delta < 0)
1498 return;
d7d82944 1499
f4cfb33e
WX
1500 if (delta > ideal_runtime)
1501 resched_task(rq_of(cfs_rq)->curr);
bf0f6f24
IM
1502}
1503
83b699ed 1504static void
8494f412 1505set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 1506{
83b699ed
SV
1507 /* 'current' is not kept within the tree. */
1508 if (se->on_rq) {
1509 /*
1510 * Any task has to be enqueued before it get to execute on
1511 * a CPU. So account for the time it spent waiting on the
1512 * runqueue.
1513 */
1514 update_stats_wait_end(cfs_rq, se);
1515 __dequeue_entity(cfs_rq, se);
1516 }
1517
79303e9e 1518 update_stats_curr_start(cfs_rq, se);
429d43bc 1519 cfs_rq->curr = se;
eba1ed4b
IM
1520#ifdef CONFIG_SCHEDSTATS
1521 /*
1522 * Track our maximum slice length, if the CPU's load is at
1523 * least twice that of our own weight (i.e. dont track it
1524 * when there are only lesser-weight tasks around):
1525 */
495eca49 1526 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
41acab88 1527 se->statistics.slice_max = max(se->statistics.slice_max,
eba1ed4b
IM
1528 se->sum_exec_runtime - se->prev_sum_exec_runtime);
1529 }
1530#endif
4a55b450 1531 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
1532}
1533
3f3a4904
PZ
1534static int
1535wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1536
ac53db59
RR
1537/*
1538 * Pick the next process, keeping these things in mind, in this order:
1539 * 1) keep things fair between processes/task groups
1540 * 2) pick the "next" process, since someone really wants that to run
1541 * 3) pick the "last" process, for cache locality
1542 * 4) do not run the "skip" process, if something else is available
1543 */
f4b6755f 1544static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
aa2ac252 1545{
ac53db59 1546 struct sched_entity *se = __pick_first_entity(cfs_rq);
f685ceac 1547 struct sched_entity *left = se;
f4b6755f 1548
ac53db59
RR
1549 /*
1550 * Avoid running the skip buddy, if running something else can
1551 * be done without getting too unfair.
1552 */
1553 if (cfs_rq->skip == se) {
1554 struct sched_entity *second = __pick_next_entity(se);
1555 if (second && wakeup_preempt_entity(second, left) < 1)
1556 se = second;
1557 }
aa2ac252 1558
f685ceac
MG
1559 /*
1560 * Prefer last buddy, try to return the CPU to a preempted task.
1561 */
1562 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1563 se = cfs_rq->last;
1564
ac53db59
RR
1565 /*
1566 * Someone really wants this to run. If it's not unfair, run it.
1567 */
1568 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1569 se = cfs_rq->next;
1570
f685ceac 1571 clear_buddies(cfs_rq, se);
4793241b
PZ
1572
1573 return se;
aa2ac252
PZ
1574}
1575
d3d9dc33
PT
1576static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1577
ab6cde26 1578static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
1579{
1580 /*
1581 * If still on the runqueue then deactivate_task()
1582 * was not called and update_curr() has to be done:
1583 */
1584 if (prev->on_rq)
b7cc0896 1585 update_curr(cfs_rq);
bf0f6f24 1586
d3d9dc33
PT
1587 /* throttle cfs_rqs exceeding runtime */
1588 check_cfs_rq_runtime(cfs_rq);
1589
ddc97297 1590 check_spread(cfs_rq, prev);
30cfdcfc 1591 if (prev->on_rq) {
5870db5b 1592 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
1593 /* Put 'current' back into the tree. */
1594 __enqueue_entity(cfs_rq, prev);
9d85f21c 1595 /* in !on_rq case, update occurred at dequeue */
9ee474f5 1596 update_entity_load_avg(prev, 1);
30cfdcfc 1597 }
429d43bc 1598 cfs_rq->curr = NULL;
bf0f6f24
IM
1599}
1600
8f4d37ec
PZ
1601static void
1602entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 1603{
bf0f6f24 1604 /*
30cfdcfc 1605 * Update run-time statistics of the 'current'.
bf0f6f24 1606 */
30cfdcfc 1607 update_curr(cfs_rq);
bf0f6f24 1608
9d85f21c
PT
1609 /*
1610 * Ensure that runnable average is periodically updated.
1611 */
9ee474f5
PT
1612 update_entity_load_avg(curr, 1);
1613 update_cfs_rq_blocked_load(cfs_rq);
9d85f21c 1614
43365bd7
PT
1615 /*
1616 * Update share accounting for long-running entities.
1617 */
1618 update_entity_shares_tick(cfs_rq);
1619
8f4d37ec
PZ
1620#ifdef CONFIG_SCHED_HRTICK
1621 /*
1622 * queued ticks are scheduled to match the slice, so don't bother
1623 * validating it and just reschedule.
1624 */
983ed7a6
HH
1625 if (queued) {
1626 resched_task(rq_of(cfs_rq)->curr);
1627 return;
1628 }
8f4d37ec
PZ
1629 /*
1630 * don't let the period tick interfere with the hrtick preemption
1631 */
1632 if (!sched_feat(DOUBLE_TICK) &&
1633 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
1634 return;
1635#endif
1636
2c2efaed 1637 if (cfs_rq->nr_running > 1)
2e09bf55 1638 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
1639}
1640
ab84d31e
PT
1641
1642/**************************************************
1643 * CFS bandwidth control machinery
1644 */
1645
1646#ifdef CONFIG_CFS_BANDWIDTH
029632fb
PZ
1647
1648#ifdef HAVE_JUMP_LABEL
c5905afb 1649static struct static_key __cfs_bandwidth_used;
029632fb
PZ
1650
1651static inline bool cfs_bandwidth_used(void)
1652{
c5905afb 1653 return static_key_false(&__cfs_bandwidth_used);
029632fb
PZ
1654}
1655
1656void account_cfs_bandwidth_used(int enabled, int was_enabled)
1657{
1658 /* only need to count groups transitioning between enabled/!enabled */
1659 if (enabled && !was_enabled)
c5905afb 1660 static_key_slow_inc(&__cfs_bandwidth_used);
029632fb 1661 else if (!enabled && was_enabled)
c5905afb 1662 static_key_slow_dec(&__cfs_bandwidth_used);
029632fb
PZ
1663}
1664#else /* HAVE_JUMP_LABEL */
1665static bool cfs_bandwidth_used(void)
1666{
1667 return true;
1668}
1669
1670void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
1671#endif /* HAVE_JUMP_LABEL */
1672
ab84d31e
PT
1673/*
1674 * default period for cfs group bandwidth.
1675 * default: 0.1s, units: nanoseconds
1676 */
1677static inline u64 default_cfs_period(void)
1678{
1679 return 100000000ULL;
1680}
ec12cb7f
PT
1681
1682static inline u64 sched_cfs_bandwidth_slice(void)
1683{
1684 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
1685}
1686
a9cf55b2
PT
1687/*
1688 * Replenish runtime according to assigned quota and update expiration time.
1689 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
1690 * additional synchronization around rq->lock.
1691 *
1692 * requires cfs_b->lock
1693 */
029632fb 1694void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
a9cf55b2
PT
1695{
1696 u64 now;
1697
1698 if (cfs_b->quota == RUNTIME_INF)
1699 return;
1700
1701 now = sched_clock_cpu(smp_processor_id());
1702 cfs_b->runtime = cfs_b->quota;
1703 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
1704}
1705
029632fb
PZ
1706static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
1707{
1708 return &tg->cfs_bandwidth;
1709}
1710
85dac906
PT
1711/* returns 0 on failure to allocate runtime */
1712static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f
PT
1713{
1714 struct task_group *tg = cfs_rq->tg;
1715 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
a9cf55b2 1716 u64 amount = 0, min_amount, expires;
ec12cb7f
PT
1717
1718 /* note: this is a positive sum as runtime_remaining <= 0 */
1719 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
1720
1721 raw_spin_lock(&cfs_b->lock);
1722 if (cfs_b->quota == RUNTIME_INF)
1723 amount = min_amount;
58088ad0 1724 else {
a9cf55b2
PT
1725 /*
1726 * If the bandwidth pool has become inactive, then at least one
1727 * period must have elapsed since the last consumption.
1728 * Refresh the global state and ensure bandwidth timer becomes
1729 * active.
1730 */
1731 if (!cfs_b->timer_active) {
1732 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0 1733 __start_cfs_bandwidth(cfs_b);
a9cf55b2 1734 }
58088ad0
PT
1735
1736 if (cfs_b->runtime > 0) {
1737 amount = min(cfs_b->runtime, min_amount);
1738 cfs_b->runtime -= amount;
1739 cfs_b->idle = 0;
1740 }
ec12cb7f 1741 }
a9cf55b2 1742 expires = cfs_b->runtime_expires;
ec12cb7f
PT
1743 raw_spin_unlock(&cfs_b->lock);
1744
1745 cfs_rq->runtime_remaining += amount;
a9cf55b2
PT
1746 /*
1747 * we may have advanced our local expiration to account for allowed
1748 * spread between our sched_clock and the one on which runtime was
1749 * issued.
1750 */
1751 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
1752 cfs_rq->runtime_expires = expires;
85dac906
PT
1753
1754 return cfs_rq->runtime_remaining > 0;
ec12cb7f
PT
1755}
1756
a9cf55b2
PT
1757/*
1758 * Note: This depends on the synchronization provided by sched_clock and the
1759 * fact that rq->clock snapshots this value.
1760 */
1761static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f 1762{
a9cf55b2
PT
1763 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1764 struct rq *rq = rq_of(cfs_rq);
1765
1766 /* if the deadline is ahead of our clock, nothing to do */
1767 if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0))
ec12cb7f
PT
1768 return;
1769
a9cf55b2
PT
1770 if (cfs_rq->runtime_remaining < 0)
1771 return;
1772
1773 /*
1774 * If the local deadline has passed we have to consider the
1775 * possibility that our sched_clock is 'fast' and the global deadline
1776 * has not truly expired.
1777 *
1778 * Fortunately we can check determine whether this the case by checking
1779 * whether the global deadline has advanced.
1780 */
1781
1782 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
1783 /* extend local deadline, drift is bounded above by 2 ticks */
1784 cfs_rq->runtime_expires += TICK_NSEC;
1785 } else {
1786 /* global deadline is ahead, expiration has passed */
1787 cfs_rq->runtime_remaining = 0;
1788 }
1789}
1790
1791static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1792 unsigned long delta_exec)
1793{
1794 /* dock delta_exec before expiring quota (as it could span periods) */
ec12cb7f 1795 cfs_rq->runtime_remaining -= delta_exec;
a9cf55b2
PT
1796 expire_cfs_rq_runtime(cfs_rq);
1797
1798 if (likely(cfs_rq->runtime_remaining > 0))
ec12cb7f
PT
1799 return;
1800
85dac906
PT
1801 /*
1802 * if we're unable to extend our runtime we resched so that the active
1803 * hierarchy can be throttled
1804 */
1805 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
1806 resched_task(rq_of(cfs_rq)->curr);
ec12cb7f
PT
1807}
1808
6c16a6dc
PZ
1809static __always_inline
1810void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
ec12cb7f 1811{
56f570e5 1812 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
ec12cb7f
PT
1813 return;
1814
1815 __account_cfs_rq_runtime(cfs_rq, delta_exec);
1816}
1817
85dac906
PT
1818static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
1819{
56f570e5 1820 return cfs_bandwidth_used() && cfs_rq->throttled;
85dac906
PT
1821}
1822
64660c86
PT
1823/* check whether cfs_rq, or any parent, is throttled */
1824static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
1825{
56f570e5 1826 return cfs_bandwidth_used() && cfs_rq->throttle_count;
64660c86
PT
1827}
1828
1829/*
1830 * Ensure that neither of the group entities corresponding to src_cpu or
1831 * dest_cpu are members of a throttled hierarchy when performing group
1832 * load-balance operations.
1833 */
1834static inline int throttled_lb_pair(struct task_group *tg,
1835 int src_cpu, int dest_cpu)
1836{
1837 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
1838
1839 src_cfs_rq = tg->cfs_rq[src_cpu];
1840 dest_cfs_rq = tg->cfs_rq[dest_cpu];
1841
1842 return throttled_hierarchy(src_cfs_rq) ||
1843 throttled_hierarchy(dest_cfs_rq);
1844}
1845
1846/* updated child weight may affect parent so we have to do this bottom up */
1847static int tg_unthrottle_up(struct task_group *tg, void *data)
1848{
1849 struct rq *rq = data;
1850 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
1851
1852 cfs_rq->throttle_count--;
1853#ifdef CONFIG_SMP
1854 if (!cfs_rq->throttle_count) {
1855 u64 delta = rq->clock_task - cfs_rq->load_stamp;
1856
1857 /* leaving throttled state, advance shares averaging windows */
1858 cfs_rq->load_stamp += delta;
1859 cfs_rq->load_last += delta;
1860
1861 /* update entity weight now that we are on_rq again */
1862 update_cfs_shares(cfs_rq);
1863 }
1864#endif
1865
1866 return 0;
1867}
1868
1869static int tg_throttle_down(struct task_group *tg, void *data)
1870{
1871 struct rq *rq = data;
1872 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
1873
1874 /* group is entering throttled state, record last load */
1875 if (!cfs_rq->throttle_count)
1876 update_cfs_load(cfs_rq, 0);
1877 cfs_rq->throttle_count++;
1878
1879 return 0;
1880}
1881
d3d9dc33 1882static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
85dac906
PT
1883{
1884 struct rq *rq = rq_of(cfs_rq);
1885 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1886 struct sched_entity *se;
1887 long task_delta, dequeue = 1;
1888
1889 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
1890
1891 /* account load preceding throttle */
64660c86
PT
1892 rcu_read_lock();
1893 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
1894 rcu_read_unlock();
85dac906
PT
1895
1896 task_delta = cfs_rq->h_nr_running;
1897 for_each_sched_entity(se) {
1898 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
1899 /* throttled entity or throttle-on-deactivate */
1900 if (!se->on_rq)
1901 break;
1902
1903 if (dequeue)
1904 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
1905 qcfs_rq->h_nr_running -= task_delta;
1906
1907 if (qcfs_rq->load.weight)
1908 dequeue = 0;
1909 }
1910
1911 if (!se)
1912 rq->nr_running -= task_delta;
1913
1914 cfs_rq->throttled = 1;
e8da1b18 1915 cfs_rq->throttled_timestamp = rq->clock;
85dac906
PT
1916 raw_spin_lock(&cfs_b->lock);
1917 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
1918 raw_spin_unlock(&cfs_b->lock);
1919}
1920
029632fb 1921void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
671fd9da
PT
1922{
1923 struct rq *rq = rq_of(cfs_rq);
1924 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1925 struct sched_entity *se;
1926 int enqueue = 1;
1927 long task_delta;
1928
1929 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
1930
1931 cfs_rq->throttled = 0;
1932 raw_spin_lock(&cfs_b->lock);
e8da1b18 1933 cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp;
671fd9da
PT
1934 list_del_rcu(&cfs_rq->throttled_list);
1935 raw_spin_unlock(&cfs_b->lock);
e8da1b18 1936 cfs_rq->throttled_timestamp = 0;
671fd9da 1937
64660c86
PT
1938 update_rq_clock(rq);
1939 /* update hierarchical throttle state */
1940 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
1941
671fd9da
PT
1942 if (!cfs_rq->load.weight)
1943 return;
1944
1945 task_delta = cfs_rq->h_nr_running;
1946 for_each_sched_entity(se) {
1947 if (se->on_rq)
1948 enqueue = 0;
1949
1950 cfs_rq = cfs_rq_of(se);
1951 if (enqueue)
1952 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
1953 cfs_rq->h_nr_running += task_delta;
1954
1955 if (cfs_rq_throttled(cfs_rq))
1956 break;
1957 }
1958
1959 if (!se)
1960 rq->nr_running += task_delta;
1961
1962 /* determine whether we need to wake up potentially idle cpu */
1963 if (rq->curr == rq->idle && rq->cfs.nr_running)
1964 resched_task(rq->curr);
1965}
1966
1967static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
1968 u64 remaining, u64 expires)
1969{
1970 struct cfs_rq *cfs_rq;
1971 u64 runtime = remaining;
1972
1973 rcu_read_lock();
1974 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
1975 throttled_list) {
1976 struct rq *rq = rq_of(cfs_rq);
1977
1978 raw_spin_lock(&rq->lock);
1979 if (!cfs_rq_throttled(cfs_rq))
1980 goto next;
1981
1982 runtime = -cfs_rq->runtime_remaining + 1;
1983 if (runtime > remaining)
1984 runtime = remaining;
1985 remaining -= runtime;
1986
1987 cfs_rq->runtime_remaining += runtime;
1988 cfs_rq->runtime_expires = expires;
1989
1990 /* we check whether we're throttled above */
1991 if (cfs_rq->runtime_remaining > 0)
1992 unthrottle_cfs_rq(cfs_rq);
1993
1994next:
1995 raw_spin_unlock(&rq->lock);
1996
1997 if (!remaining)
1998 break;
1999 }
2000 rcu_read_unlock();
2001
2002 return remaining;
2003}
2004
58088ad0
PT
2005/*
2006 * Responsible for refilling a task_group's bandwidth and unthrottling its
2007 * cfs_rqs as appropriate. If there has been no activity within the last
2008 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2009 * used to track this state.
2010 */
2011static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2012{
671fd9da
PT
2013 u64 runtime, runtime_expires;
2014 int idle = 1, throttled;
58088ad0
PT
2015
2016 raw_spin_lock(&cfs_b->lock);
2017 /* no need to continue the timer with no bandwidth constraint */
2018 if (cfs_b->quota == RUNTIME_INF)
2019 goto out_unlock;
2020
671fd9da
PT
2021 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2022 /* idle depends on !throttled (for the case of a large deficit) */
2023 idle = cfs_b->idle && !throttled;
e8da1b18 2024 cfs_b->nr_periods += overrun;
671fd9da 2025
a9cf55b2
PT
2026 /* if we're going inactive then everything else can be deferred */
2027 if (idle)
2028 goto out_unlock;
2029
2030 __refill_cfs_bandwidth_runtime(cfs_b);
2031
671fd9da
PT
2032 if (!throttled) {
2033 /* mark as potentially idle for the upcoming period */
2034 cfs_b->idle = 1;
2035 goto out_unlock;
2036 }
2037
e8da1b18
NR
2038 /* account preceding periods in which throttling occurred */
2039 cfs_b->nr_throttled += overrun;
2040
671fd9da
PT
2041 /*
2042 * There are throttled entities so we must first use the new bandwidth
2043 * to unthrottle them before making it generally available. This
2044 * ensures that all existing debts will be paid before a new cfs_rq is
2045 * allowed to run.
2046 */
2047 runtime = cfs_b->runtime;
2048 runtime_expires = cfs_b->runtime_expires;
2049 cfs_b->runtime = 0;
2050
2051 /*
2052 * This check is repeated as we are holding onto the new bandwidth
2053 * while we unthrottle. This can potentially race with an unthrottled
2054 * group trying to acquire new bandwidth from the global pool.
2055 */
2056 while (throttled && runtime > 0) {
2057 raw_spin_unlock(&cfs_b->lock);
2058 /* we can't nest cfs_b->lock while distributing bandwidth */
2059 runtime = distribute_cfs_runtime(cfs_b, runtime,
2060 runtime_expires);
2061 raw_spin_lock(&cfs_b->lock);
2062
2063 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2064 }
58088ad0 2065
671fd9da
PT
2066 /* return (any) remaining runtime */
2067 cfs_b->runtime = runtime;
2068 /*
2069 * While we are ensured activity in the period following an
2070 * unthrottle, this also covers the case in which the new bandwidth is
2071 * insufficient to cover the existing bandwidth deficit. (Forcing the
2072 * timer to remain active while there are any throttled entities.)
2073 */
2074 cfs_b->idle = 0;
58088ad0
PT
2075out_unlock:
2076 if (idle)
2077 cfs_b->timer_active = 0;
2078 raw_spin_unlock(&cfs_b->lock);
2079
2080 return idle;
2081}
d3d9dc33 2082
d8b4986d
PT
2083/* a cfs_rq won't donate quota below this amount */
2084static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2085/* minimum remaining period time to redistribute slack quota */
2086static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2087/* how long we wait to gather additional slack before distributing */
2088static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2089
2090/* are we near the end of the current quota period? */
2091static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2092{
2093 struct hrtimer *refresh_timer = &cfs_b->period_timer;
2094 u64 remaining;
2095
2096 /* if the call-back is running a quota refresh is already occurring */
2097 if (hrtimer_callback_running(refresh_timer))
2098 return 1;
2099
2100 /* is a quota refresh about to occur? */
2101 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2102 if (remaining < min_expire)
2103 return 1;
2104
2105 return 0;
2106}
2107
2108static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2109{
2110 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2111
2112 /* if there's a quota refresh soon don't bother with slack */
2113 if (runtime_refresh_within(cfs_b, min_left))
2114 return;
2115
2116 start_bandwidth_timer(&cfs_b->slack_timer,
2117 ns_to_ktime(cfs_bandwidth_slack_period));
2118}
2119
2120/* we know any runtime found here is valid as update_curr() precedes return */
2121static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2122{
2123 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2124 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2125
2126 if (slack_runtime <= 0)
2127 return;
2128
2129 raw_spin_lock(&cfs_b->lock);
2130 if (cfs_b->quota != RUNTIME_INF &&
2131 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2132 cfs_b->runtime += slack_runtime;
2133
2134 /* we are under rq->lock, defer unthrottling using a timer */
2135 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2136 !list_empty(&cfs_b->throttled_cfs_rq))
2137 start_cfs_slack_bandwidth(cfs_b);
2138 }
2139 raw_spin_unlock(&cfs_b->lock);
2140
2141 /* even if it's not valid for return we don't want to try again */
2142 cfs_rq->runtime_remaining -= slack_runtime;
2143}
2144
2145static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2146{
56f570e5
PT
2147 if (!cfs_bandwidth_used())
2148 return;
2149
fccfdc6f 2150 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
d8b4986d
PT
2151 return;
2152
2153 __return_cfs_rq_runtime(cfs_rq);
2154}
2155
2156/*
2157 * This is done with a timer (instead of inline with bandwidth return) since
2158 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2159 */
2160static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2161{
2162 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2163 u64 expires;
2164
2165 /* confirm we're still not at a refresh boundary */
2166 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2167 return;
2168
2169 raw_spin_lock(&cfs_b->lock);
2170 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2171 runtime = cfs_b->runtime;
2172 cfs_b->runtime = 0;
2173 }
2174 expires = cfs_b->runtime_expires;
2175 raw_spin_unlock(&cfs_b->lock);
2176
2177 if (!runtime)
2178 return;
2179
2180 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2181
2182 raw_spin_lock(&cfs_b->lock);
2183 if (expires == cfs_b->runtime_expires)
2184 cfs_b->runtime = runtime;
2185 raw_spin_unlock(&cfs_b->lock);
2186}
2187
d3d9dc33
PT
2188/*
2189 * When a group wakes up we want to make sure that its quota is not already
2190 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2191 * runtime as update_curr() throttling can not not trigger until it's on-rq.
2192 */
2193static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2194{
56f570e5
PT
2195 if (!cfs_bandwidth_used())
2196 return;
2197
d3d9dc33
PT
2198 /* an active group must be handled by the update_curr()->put() path */
2199 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2200 return;
2201
2202 /* ensure the group is not already throttled */
2203 if (cfs_rq_throttled(cfs_rq))
2204 return;
2205
2206 /* update runtime allocation */
2207 account_cfs_rq_runtime(cfs_rq, 0);
2208 if (cfs_rq->runtime_remaining <= 0)
2209 throttle_cfs_rq(cfs_rq);
2210}
2211
2212/* conditionally throttle active cfs_rq's from put_prev_entity() */
2213static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2214{
56f570e5
PT
2215 if (!cfs_bandwidth_used())
2216 return;
2217
d3d9dc33
PT
2218 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2219 return;
2220
2221 /*
2222 * it's possible for a throttled entity to be forced into a running
2223 * state (e.g. set_curr_task), in this case we're finished.
2224 */
2225 if (cfs_rq_throttled(cfs_rq))
2226 return;
2227
2228 throttle_cfs_rq(cfs_rq);
2229}
029632fb
PZ
2230
2231static inline u64 default_cfs_period(void);
2232static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
2233static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
2234
2235static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2236{
2237 struct cfs_bandwidth *cfs_b =
2238 container_of(timer, struct cfs_bandwidth, slack_timer);
2239 do_sched_cfs_slack_timer(cfs_b);
2240
2241 return HRTIMER_NORESTART;
2242}
2243
2244static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2245{
2246 struct cfs_bandwidth *cfs_b =
2247 container_of(timer, struct cfs_bandwidth, period_timer);
2248 ktime_t now;
2249 int overrun;
2250 int idle = 0;
2251
2252 for (;;) {
2253 now = hrtimer_cb_get_time(timer);
2254 overrun = hrtimer_forward(timer, now, cfs_b->period);
2255
2256 if (!overrun)
2257 break;
2258
2259 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2260 }
2261
2262 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2263}
2264
2265void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2266{
2267 raw_spin_lock_init(&cfs_b->lock);
2268 cfs_b->runtime = 0;
2269 cfs_b->quota = RUNTIME_INF;
2270 cfs_b->period = ns_to_ktime(default_cfs_period());
2271
2272 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2273 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2274 cfs_b->period_timer.function = sched_cfs_period_timer;
2275 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2276 cfs_b->slack_timer.function = sched_cfs_slack_timer;
2277}
2278
2279static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2280{
2281 cfs_rq->runtime_enabled = 0;
2282 INIT_LIST_HEAD(&cfs_rq->throttled_list);
2283}
2284
2285/* requires cfs_b->lock, may release to reprogram timer */
2286void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2287{
2288 /*
2289 * The timer may be active because we're trying to set a new bandwidth
2290 * period or because we're racing with the tear-down path
2291 * (timer_active==0 becomes visible before the hrtimer call-back
2292 * terminates). In either case we ensure that it's re-programmed
2293 */
2294 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2295 raw_spin_unlock(&cfs_b->lock);
2296 /* ensure cfs_b->lock is available while we wait */
2297 hrtimer_cancel(&cfs_b->period_timer);
2298
2299 raw_spin_lock(&cfs_b->lock);
2300 /* if someone else restarted the timer then we're done */
2301 if (cfs_b->timer_active)
2302 return;
2303 }
2304
2305 cfs_b->timer_active = 1;
2306 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2307}
2308
2309static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2310{
2311 hrtimer_cancel(&cfs_b->period_timer);
2312 hrtimer_cancel(&cfs_b->slack_timer);
2313}
2314
a4c96ae3 2315static void unthrottle_offline_cfs_rqs(struct rq *rq)
029632fb
PZ
2316{
2317 struct cfs_rq *cfs_rq;
2318
2319 for_each_leaf_cfs_rq(rq, cfs_rq) {
2320 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2321
2322 if (!cfs_rq->runtime_enabled)
2323 continue;
2324
2325 /*
2326 * clock_task is not advancing so we just need to make sure
2327 * there's some valid quota amount
2328 */
2329 cfs_rq->runtime_remaining = cfs_b->quota;
2330 if (cfs_rq_throttled(cfs_rq))
2331 unthrottle_cfs_rq(cfs_rq);
2332 }
2333}
2334
2335#else /* CONFIG_CFS_BANDWIDTH */
6c16a6dc
PZ
2336static __always_inline
2337void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {}
d3d9dc33
PT
2338static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2339static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
6c16a6dc 2340static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
85dac906
PT
2341
2342static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2343{
2344 return 0;
2345}
64660c86
PT
2346
2347static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2348{
2349 return 0;
2350}
2351
2352static inline int throttled_lb_pair(struct task_group *tg,
2353 int src_cpu, int dest_cpu)
2354{
2355 return 0;
2356}
029632fb
PZ
2357
2358void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2359
2360#ifdef CONFIG_FAIR_GROUP_SCHED
2361static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
ab84d31e
PT
2362#endif
2363
029632fb
PZ
2364static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2365{
2366 return NULL;
2367}
2368static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
a4c96ae3 2369static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
029632fb
PZ
2370
2371#endif /* CONFIG_CFS_BANDWIDTH */
2372
bf0f6f24
IM
2373/**************************************************
2374 * CFS operations on tasks:
2375 */
2376
8f4d37ec
PZ
2377#ifdef CONFIG_SCHED_HRTICK
2378static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2379{
8f4d37ec
PZ
2380 struct sched_entity *se = &p->se;
2381 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2382
2383 WARN_ON(task_rq(p) != rq);
2384
b39e66ea 2385 if (cfs_rq->nr_running > 1) {
8f4d37ec
PZ
2386 u64 slice = sched_slice(cfs_rq, se);
2387 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2388 s64 delta = slice - ran;
2389
2390 if (delta < 0) {
2391 if (rq->curr == p)
2392 resched_task(p);
2393 return;
2394 }
2395
2396 /*
2397 * Don't schedule slices shorter than 10000ns, that just
2398 * doesn't make sense. Rely on vruntime for fairness.
2399 */
31656519 2400 if (rq->curr != p)
157124c1 2401 delta = max_t(s64, 10000LL, delta);
8f4d37ec 2402
31656519 2403 hrtick_start(rq, delta);
8f4d37ec
PZ
2404 }
2405}
a4c2f00f
PZ
2406
2407/*
2408 * called from enqueue/dequeue and updates the hrtick when the
2409 * current task is from our class and nr_running is low enough
2410 * to matter.
2411 */
2412static void hrtick_update(struct rq *rq)
2413{
2414 struct task_struct *curr = rq->curr;
2415
b39e66ea 2416 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
a4c2f00f
PZ
2417 return;
2418
2419 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2420 hrtick_start_fair(rq, curr);
2421}
55e12e5e 2422#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
2423static inline void
2424hrtick_start_fair(struct rq *rq, struct task_struct *p)
2425{
2426}
a4c2f00f
PZ
2427
2428static inline void hrtick_update(struct rq *rq)
2429{
2430}
8f4d37ec
PZ
2431#endif
2432
bf0f6f24
IM
2433/*
2434 * The enqueue_task method is called before nr_running is
2435 * increased. Here we update the fair scheduling stats and
2436 * then put the task into the rbtree:
2437 */
ea87bb78 2438static void
371fd7e7 2439enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
2440{
2441 struct cfs_rq *cfs_rq;
62fb1851 2442 struct sched_entity *se = &p->se;
bf0f6f24
IM
2443
2444 for_each_sched_entity(se) {
62fb1851 2445 if (se->on_rq)
bf0f6f24
IM
2446 break;
2447 cfs_rq = cfs_rq_of(se);
88ec22d3 2448 enqueue_entity(cfs_rq, se, flags);
85dac906
PT
2449
2450 /*
2451 * end evaluation on encountering a throttled cfs_rq
2452 *
2453 * note: in the case of encountering a throttled cfs_rq we will
2454 * post the final h_nr_running increment below.
2455 */
2456 if (cfs_rq_throttled(cfs_rq))
2457 break;
953bfcd1 2458 cfs_rq->h_nr_running++;
85dac906 2459
88ec22d3 2460 flags = ENQUEUE_WAKEUP;
bf0f6f24 2461 }
8f4d37ec 2462
2069dd75 2463 for_each_sched_entity(se) {
0f317143 2464 cfs_rq = cfs_rq_of(se);
953bfcd1 2465 cfs_rq->h_nr_running++;
2069dd75 2466
85dac906
PT
2467 if (cfs_rq_throttled(cfs_rq))
2468 break;
2469
d6b55918 2470 update_cfs_load(cfs_rq, 0);
6d5ab293 2471 update_cfs_shares(cfs_rq);
9ee474f5 2472 update_entity_load_avg(se, 1);
2069dd75
PZ
2473 }
2474
18bf2805
BS
2475 if (!se) {
2476 update_rq_runnable_avg(rq, rq->nr_running);
85dac906 2477 inc_nr_running(rq);
18bf2805 2478 }
a4c2f00f 2479 hrtick_update(rq);
bf0f6f24
IM
2480}
2481
2f36825b
VP
2482static void set_next_buddy(struct sched_entity *se);
2483
bf0f6f24
IM
2484/*
2485 * The dequeue_task method is called before nr_running is
2486 * decreased. We remove the task from the rbtree and
2487 * update the fair scheduling stats:
2488 */
371fd7e7 2489static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
2490{
2491 struct cfs_rq *cfs_rq;
62fb1851 2492 struct sched_entity *se = &p->se;
2f36825b 2493 int task_sleep = flags & DEQUEUE_SLEEP;
bf0f6f24
IM
2494
2495 for_each_sched_entity(se) {
2496 cfs_rq = cfs_rq_of(se);
371fd7e7 2497 dequeue_entity(cfs_rq, se, flags);
85dac906
PT
2498
2499 /*
2500 * end evaluation on encountering a throttled cfs_rq
2501 *
2502 * note: in the case of encountering a throttled cfs_rq we will
2503 * post the final h_nr_running decrement below.
2504 */
2505 if (cfs_rq_throttled(cfs_rq))
2506 break;
953bfcd1 2507 cfs_rq->h_nr_running--;
2069dd75 2508
bf0f6f24 2509 /* Don't dequeue parent if it has other entities besides us */
2f36825b
VP
2510 if (cfs_rq->load.weight) {
2511 /*
2512 * Bias pick_next to pick a task from this cfs_rq, as
2513 * p is sleeping when it is within its sched_slice.
2514 */
2515 if (task_sleep && parent_entity(se))
2516 set_next_buddy(parent_entity(se));
9598c82d
PT
2517
2518 /* avoid re-evaluating load for this entity */
2519 se = parent_entity(se);
bf0f6f24 2520 break;
2f36825b 2521 }
371fd7e7 2522 flags |= DEQUEUE_SLEEP;
bf0f6f24 2523 }
8f4d37ec 2524
2069dd75 2525 for_each_sched_entity(se) {
0f317143 2526 cfs_rq = cfs_rq_of(se);
953bfcd1 2527 cfs_rq->h_nr_running--;
2069dd75 2528
85dac906
PT
2529 if (cfs_rq_throttled(cfs_rq))
2530 break;
2531
d6b55918 2532 update_cfs_load(cfs_rq, 0);
6d5ab293 2533 update_cfs_shares(cfs_rq);
9ee474f5 2534 update_entity_load_avg(se, 1);
2069dd75
PZ
2535 }
2536
18bf2805 2537 if (!se) {
85dac906 2538 dec_nr_running(rq);
18bf2805
BS
2539 update_rq_runnable_avg(rq, 1);
2540 }
a4c2f00f 2541 hrtick_update(rq);
bf0f6f24
IM
2542}
2543
e7693a36 2544#ifdef CONFIG_SMP
029632fb
PZ
2545/* Used instead of source_load when we know the type == 0 */
2546static unsigned long weighted_cpuload(const int cpu)
2547{
2548 return cpu_rq(cpu)->load.weight;
2549}
2550
2551/*
2552 * Return a low guess at the load of a migration-source cpu weighted
2553 * according to the scheduling class and "nice" value.
2554 *
2555 * We want to under-estimate the load of migration sources, to
2556 * balance conservatively.
2557 */
2558static unsigned long source_load(int cpu, int type)
2559{
2560 struct rq *rq = cpu_rq(cpu);
2561 unsigned long total = weighted_cpuload(cpu);
2562
2563 if (type == 0 || !sched_feat(LB_BIAS))
2564 return total;
2565
2566 return min(rq->cpu_load[type-1], total);
2567}
2568
2569/*
2570 * Return a high guess at the load of a migration-target cpu weighted
2571 * according to the scheduling class and "nice" value.
2572 */
2573static unsigned long target_load(int cpu, int type)
2574{
2575 struct rq *rq = cpu_rq(cpu);
2576 unsigned long total = weighted_cpuload(cpu);
2577
2578 if (type == 0 || !sched_feat(LB_BIAS))
2579 return total;
2580
2581 return max(rq->cpu_load[type-1], total);
2582}
2583
2584static unsigned long power_of(int cpu)
2585{
2586 return cpu_rq(cpu)->cpu_power;
2587}
2588
2589static unsigned long cpu_avg_load_per_task(int cpu)
2590{
2591 struct rq *rq = cpu_rq(cpu);
2592 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
2593
2594 if (nr_running)
2595 return rq->load.weight / nr_running;
2596
2597 return 0;
2598}
2599
098fb9db 2600
74f8e4b2 2601static void task_waking_fair(struct task_struct *p)
88ec22d3
PZ
2602{
2603 struct sched_entity *se = &p->se;
2604 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3fe1698b
PZ
2605 u64 min_vruntime;
2606
2607#ifndef CONFIG_64BIT
2608 u64 min_vruntime_copy;
88ec22d3 2609
3fe1698b
PZ
2610 do {
2611 min_vruntime_copy = cfs_rq->min_vruntime_copy;
2612 smp_rmb();
2613 min_vruntime = cfs_rq->min_vruntime;
2614 } while (min_vruntime != min_vruntime_copy);
2615#else
2616 min_vruntime = cfs_rq->min_vruntime;
2617#endif
88ec22d3 2618
3fe1698b 2619 se->vruntime -= min_vruntime;
88ec22d3
PZ
2620}
2621
bb3469ac 2622#ifdef CONFIG_FAIR_GROUP_SCHED
f5bfb7d9
PZ
2623/*
2624 * effective_load() calculates the load change as seen from the root_task_group
2625 *
2626 * Adding load to a group doesn't make a group heavier, but can cause movement
2627 * of group shares between cpus. Assuming the shares were perfectly aligned one
2628 * can calculate the shift in shares.
cf5f0acf
PZ
2629 *
2630 * Calculate the effective load difference if @wl is added (subtracted) to @tg
2631 * on this @cpu and results in a total addition (subtraction) of @wg to the
2632 * total group weight.
2633 *
2634 * Given a runqueue weight distribution (rw_i) we can compute a shares
2635 * distribution (s_i) using:
2636 *
2637 * s_i = rw_i / \Sum rw_j (1)
2638 *
2639 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
2640 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
2641 * shares distribution (s_i):
2642 *
2643 * rw_i = { 2, 4, 1, 0 }
2644 * s_i = { 2/7, 4/7, 1/7, 0 }
2645 *
2646 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
2647 * task used to run on and the CPU the waker is running on), we need to
2648 * compute the effect of waking a task on either CPU and, in case of a sync
2649 * wakeup, compute the effect of the current task going to sleep.
2650 *
2651 * So for a change of @wl to the local @cpu with an overall group weight change
2652 * of @wl we can compute the new shares distribution (s'_i) using:
2653 *
2654 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
2655 *
2656 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
2657 * differences in waking a task to CPU 0. The additional task changes the
2658 * weight and shares distributions like:
2659 *
2660 * rw'_i = { 3, 4, 1, 0 }
2661 * s'_i = { 3/8, 4/8, 1/8, 0 }
2662 *
2663 * We can then compute the difference in effective weight by using:
2664 *
2665 * dw_i = S * (s'_i - s_i) (3)
2666 *
2667 * Where 'S' is the group weight as seen by its parent.
2668 *
2669 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
2670 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
2671 * 4/7) times the weight of the group.
f5bfb7d9 2672 */
2069dd75 2673static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
bb3469ac 2674{
4be9daaa 2675 struct sched_entity *se = tg->se[cpu];
f1d239f7 2676
cf5f0acf 2677 if (!tg->parent) /* the trivial, non-cgroup case */
f1d239f7
PZ
2678 return wl;
2679
4be9daaa 2680 for_each_sched_entity(se) {
cf5f0acf 2681 long w, W;
4be9daaa 2682
977dda7c 2683 tg = se->my_q->tg;
bb3469ac 2684
cf5f0acf
PZ
2685 /*
2686 * W = @wg + \Sum rw_j
2687 */
2688 W = wg + calc_tg_weight(tg, se->my_q);
4be9daaa 2689
cf5f0acf
PZ
2690 /*
2691 * w = rw_i + @wl
2692 */
2693 w = se->my_q->load.weight + wl;
940959e9 2694
cf5f0acf
PZ
2695 /*
2696 * wl = S * s'_i; see (2)
2697 */
2698 if (W > 0 && w < W)
2699 wl = (w * tg->shares) / W;
977dda7c
PT
2700 else
2701 wl = tg->shares;
940959e9 2702
cf5f0acf
PZ
2703 /*
2704 * Per the above, wl is the new se->load.weight value; since
2705 * those are clipped to [MIN_SHARES, ...) do so now. See
2706 * calc_cfs_shares().
2707 */
977dda7c
PT
2708 if (wl < MIN_SHARES)
2709 wl = MIN_SHARES;
cf5f0acf
PZ
2710
2711 /*
2712 * wl = dw_i = S * (s'_i - s_i); see (3)
2713 */
977dda7c 2714 wl -= se->load.weight;
cf5f0acf
PZ
2715
2716 /*
2717 * Recursively apply this logic to all parent groups to compute
2718 * the final effective load change on the root group. Since
2719 * only the @tg group gets extra weight, all parent groups can
2720 * only redistribute existing shares. @wl is the shift in shares
2721 * resulting from this level per the above.
2722 */
4be9daaa 2723 wg = 0;
4be9daaa 2724 }
bb3469ac 2725
4be9daaa 2726 return wl;
bb3469ac
PZ
2727}
2728#else
4be9daaa 2729
83378269
PZ
2730static inline unsigned long effective_load(struct task_group *tg, int cpu,
2731 unsigned long wl, unsigned long wg)
4be9daaa 2732{
83378269 2733 return wl;
bb3469ac 2734}
4be9daaa 2735
bb3469ac
PZ
2736#endif
2737
c88d5910 2738static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
098fb9db 2739{
e37b6a7b 2740 s64 this_load, load;
c88d5910 2741 int idx, this_cpu, prev_cpu;
098fb9db 2742 unsigned long tl_per_task;
c88d5910 2743 struct task_group *tg;
83378269 2744 unsigned long weight;
b3137bc8 2745 int balanced;
098fb9db 2746
c88d5910
PZ
2747 idx = sd->wake_idx;
2748 this_cpu = smp_processor_id();
2749 prev_cpu = task_cpu(p);
2750 load = source_load(prev_cpu, idx);
2751 this_load = target_load(this_cpu, idx);
098fb9db 2752
b3137bc8
MG
2753 /*
2754 * If sync wakeup then subtract the (maximum possible)
2755 * effect of the currently running task from the load
2756 * of the current CPU:
2757 */
83378269
PZ
2758 if (sync) {
2759 tg = task_group(current);
2760 weight = current->se.load.weight;
2761
c88d5910 2762 this_load += effective_load(tg, this_cpu, -weight, -weight);
83378269
PZ
2763 load += effective_load(tg, prev_cpu, 0, -weight);
2764 }
b3137bc8 2765
83378269
PZ
2766 tg = task_group(p);
2767 weight = p->se.load.weight;
b3137bc8 2768
71a29aa7
PZ
2769 /*
2770 * In low-load situations, where prev_cpu is idle and this_cpu is idle
c88d5910
PZ
2771 * due to the sync cause above having dropped this_load to 0, we'll
2772 * always have an imbalance, but there's really nothing you can do
2773 * about that, so that's good too.
71a29aa7
PZ
2774 *
2775 * Otherwise check if either cpus are near enough in load to allow this
2776 * task to be woken on this_cpu.
2777 */
e37b6a7b
PT
2778 if (this_load > 0) {
2779 s64 this_eff_load, prev_eff_load;
e51fd5e2
PZ
2780
2781 this_eff_load = 100;
2782 this_eff_load *= power_of(prev_cpu);
2783 this_eff_load *= this_load +
2784 effective_load(tg, this_cpu, weight, weight);
2785
2786 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
2787 prev_eff_load *= power_of(this_cpu);
2788 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
2789
2790 balanced = this_eff_load <= prev_eff_load;
2791 } else
2792 balanced = true;
b3137bc8 2793
098fb9db 2794 /*
4ae7d5ce
IM
2795 * If the currently running task will sleep within
2796 * a reasonable amount of time then attract this newly
2797 * woken task:
098fb9db 2798 */
2fb7635c
PZ
2799 if (sync && balanced)
2800 return 1;
098fb9db 2801
41acab88 2802 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
098fb9db
IM
2803 tl_per_task = cpu_avg_load_per_task(this_cpu);
2804
c88d5910
PZ
2805 if (balanced ||
2806 (this_load <= load &&
2807 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
098fb9db
IM
2808 /*
2809 * This domain has SD_WAKE_AFFINE and
2810 * p is cache cold in this domain, and
2811 * there is no bad imbalance.
2812 */
c88d5910 2813 schedstat_inc(sd, ttwu_move_affine);
41acab88 2814 schedstat_inc(p, se.statistics.nr_wakeups_affine);
098fb9db
IM
2815
2816 return 1;
2817 }
2818 return 0;
2819}
2820
aaee1203
PZ
2821/*
2822 * find_idlest_group finds and returns the least busy CPU group within the
2823 * domain.
2824 */
2825static struct sched_group *
78e7ed53 2826find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5158f4e4 2827 int this_cpu, int load_idx)
e7693a36 2828{
b3bd3de6 2829 struct sched_group *idlest = NULL, *group = sd->groups;
aaee1203 2830 unsigned long min_load = ULONG_MAX, this_load = 0;
aaee1203 2831 int imbalance = 100 + (sd->imbalance_pct-100)/2;
e7693a36 2832
aaee1203
PZ
2833 do {
2834 unsigned long load, avg_load;
2835 int local_group;
2836 int i;
e7693a36 2837
aaee1203
PZ
2838 /* Skip over this group if it has no CPUs allowed */
2839 if (!cpumask_intersects(sched_group_cpus(group),
fa17b507 2840 tsk_cpus_allowed(p)))
aaee1203
PZ
2841 continue;
2842
2843 local_group = cpumask_test_cpu(this_cpu,
2844 sched_group_cpus(group));
2845
2846 /* Tally up the load of all CPUs in the group */
2847 avg_load = 0;
2848
2849 for_each_cpu(i, sched_group_cpus(group)) {
2850 /* Bias balancing toward cpus of our domain */
2851 if (local_group)
2852 load = source_load(i, load_idx);
2853 else
2854 load = target_load(i, load_idx);
2855
2856 avg_load += load;
2857 }
2858
2859 /* Adjust by relative CPU power of the group */
9c3f75cb 2860 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
aaee1203
PZ
2861
2862 if (local_group) {
2863 this_load = avg_load;
aaee1203
PZ
2864 } else if (avg_load < min_load) {
2865 min_load = avg_load;
2866 idlest = group;
2867 }
2868 } while (group = group->next, group != sd->groups);
2869
2870 if (!idlest || 100*this_load < imbalance*min_load)
2871 return NULL;
2872 return idlest;
2873}
2874
2875/*
2876 * find_idlest_cpu - find the idlest cpu among the cpus in group.
2877 */
2878static int
2879find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
2880{
2881 unsigned long load, min_load = ULONG_MAX;
2882 int idlest = -1;
2883 int i;
2884
2885 /* Traverse only the allowed CPUs */
fa17b507 2886 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
aaee1203
PZ
2887 load = weighted_cpuload(i);
2888
2889 if (load < min_load || (load == min_load && i == this_cpu)) {
2890 min_load = load;
2891 idlest = i;
e7693a36
GH
2892 }
2893 }
2894
aaee1203
PZ
2895 return idlest;
2896}
e7693a36 2897
a50bde51
PZ
2898/*
2899 * Try and locate an idle CPU in the sched_domain.
2900 */
99bd5e2f 2901static int select_idle_sibling(struct task_struct *p, int target)
a50bde51
PZ
2902{
2903 int cpu = smp_processor_id();
2904 int prev_cpu = task_cpu(p);
99bd5e2f 2905 struct sched_domain *sd;
37407ea7
LT
2906 struct sched_group *sg;
2907 int i;
a50bde51
PZ
2908
2909 /*
99bd5e2f
SS
2910 * If the task is going to be woken-up on this cpu and if it is
2911 * already idle, then it is the right target.
a50bde51 2912 */
99bd5e2f
SS
2913 if (target == cpu && idle_cpu(cpu))
2914 return cpu;
2915
2916 /*
2917 * If the task is going to be woken-up on the cpu where it previously
2918 * ran and if it is currently idle, then it the right target.
2919 */
2920 if (target == prev_cpu && idle_cpu(prev_cpu))
fe3bcfe1 2921 return prev_cpu;
a50bde51
PZ
2922
2923 /*
37407ea7 2924 * Otherwise, iterate the domains and find an elegible idle cpu.
a50bde51 2925 */
518cd623 2926 sd = rcu_dereference(per_cpu(sd_llc, target));
970e1789 2927 for_each_lower_domain(sd) {
37407ea7
LT
2928 sg = sd->groups;
2929 do {
2930 if (!cpumask_intersects(sched_group_cpus(sg),
2931 tsk_cpus_allowed(p)))
2932 goto next;
2933
2934 for_each_cpu(i, sched_group_cpus(sg)) {
2935 if (!idle_cpu(i))
2936 goto next;
2937 }
970e1789 2938
37407ea7
LT
2939 target = cpumask_first_and(sched_group_cpus(sg),
2940 tsk_cpus_allowed(p));
2941 goto done;
2942next:
2943 sg = sg->next;
2944 } while (sg != sd->groups);
2945 }
2946done:
a50bde51
PZ
2947 return target;
2948}
2949
aaee1203
PZ
2950/*
2951 * sched_balance_self: balance the current task (running on cpu) in domains
2952 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
2953 * SD_BALANCE_EXEC.
2954 *
2955 * Balance, ie. select the least loaded group.
2956 *
2957 * Returns the target CPU number, or the same CPU if no balancing is needed.
2958 *
2959 * preempt must be disabled.
2960 */
0017d735 2961static int
7608dec2 2962select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
aaee1203 2963{
29cd8bae 2964 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
c88d5910
PZ
2965 int cpu = smp_processor_id();
2966 int prev_cpu = task_cpu(p);
2967 int new_cpu = cpu;
99bd5e2f 2968 int want_affine = 0;
5158f4e4 2969 int sync = wake_flags & WF_SYNC;
c88d5910 2970
29baa747 2971 if (p->nr_cpus_allowed == 1)
76854c7e
MG
2972 return prev_cpu;
2973
0763a660 2974 if (sd_flag & SD_BALANCE_WAKE) {
fa17b507 2975 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
c88d5910
PZ
2976 want_affine = 1;
2977 new_cpu = prev_cpu;
2978 }
aaee1203 2979
dce840a0 2980 rcu_read_lock();
aaee1203 2981 for_each_domain(cpu, tmp) {
e4f42888
PZ
2982 if (!(tmp->flags & SD_LOAD_BALANCE))
2983 continue;
2984
fe3bcfe1 2985 /*
99bd5e2f
SS
2986 * If both cpu and prev_cpu are part of this domain,
2987 * cpu is a valid SD_WAKE_AFFINE target.
fe3bcfe1 2988 */
99bd5e2f
SS
2989 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
2990 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
2991 affine_sd = tmp;
29cd8bae 2992 break;
f03542a7 2993 }
29cd8bae 2994
f03542a7 2995 if (tmp->flags & sd_flag)
29cd8bae
PZ
2996 sd = tmp;
2997 }
2998
8b911acd 2999 if (affine_sd) {
f03542a7 3000 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
dce840a0
PZ
3001 prev_cpu = cpu;
3002
3003 new_cpu = select_idle_sibling(p, prev_cpu);
3004 goto unlock;
8b911acd 3005 }
e7693a36 3006
aaee1203 3007 while (sd) {
5158f4e4 3008 int load_idx = sd->forkexec_idx;
aaee1203 3009 struct sched_group *group;
c88d5910 3010 int weight;
098fb9db 3011
0763a660 3012 if (!(sd->flags & sd_flag)) {
aaee1203
PZ
3013 sd = sd->child;
3014 continue;
3015 }
098fb9db 3016
5158f4e4
PZ
3017 if (sd_flag & SD_BALANCE_WAKE)
3018 load_idx = sd->wake_idx;
098fb9db 3019
5158f4e4 3020 group = find_idlest_group(sd, p, cpu, load_idx);
aaee1203
PZ
3021 if (!group) {
3022 sd = sd->child;
3023 continue;
3024 }
4ae7d5ce 3025
d7c33c49 3026 new_cpu = find_idlest_cpu(group, p, cpu);
aaee1203
PZ
3027 if (new_cpu == -1 || new_cpu == cpu) {
3028 /* Now try balancing at a lower domain level of cpu */
3029 sd = sd->child;
3030 continue;
e7693a36 3031 }
aaee1203
PZ
3032
3033 /* Now try balancing at a lower domain level of new_cpu */
3034 cpu = new_cpu;
669c55e9 3035 weight = sd->span_weight;
aaee1203
PZ
3036 sd = NULL;
3037 for_each_domain(cpu, tmp) {
669c55e9 3038 if (weight <= tmp->span_weight)
aaee1203 3039 break;
0763a660 3040 if (tmp->flags & sd_flag)
aaee1203
PZ
3041 sd = tmp;
3042 }
3043 /* while loop will break here if sd == NULL */
e7693a36 3044 }
dce840a0
PZ
3045unlock:
3046 rcu_read_unlock();
e7693a36 3047
c88d5910 3048 return new_cpu;
e7693a36 3049}
0a74bef8
PT
3050
3051/*
3052 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3053 * cfs_rq_of(p) references at time of call are still valid and identify the
3054 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
3055 * other assumptions, including the state of rq->lock, should be made.
3056 */
3057static void
3058migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3059{
3060}
e7693a36
GH
3061#endif /* CONFIG_SMP */
3062
e52fb7c0
PZ
3063static unsigned long
3064wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
0bbd3336
PZ
3065{
3066 unsigned long gran = sysctl_sched_wakeup_granularity;
3067
3068 /*
e52fb7c0
PZ
3069 * Since its curr running now, convert the gran from real-time
3070 * to virtual-time in his units.
13814d42
MG
3071 *
3072 * By using 'se' instead of 'curr' we penalize light tasks, so
3073 * they get preempted easier. That is, if 'se' < 'curr' then
3074 * the resulting gran will be larger, therefore penalizing the
3075 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3076 * be smaller, again penalizing the lighter task.
3077 *
3078 * This is especially important for buddies when the leftmost
3079 * task is higher priority than the buddy.
0bbd3336 3080 */
f4ad9bd2 3081 return calc_delta_fair(gran, se);
0bbd3336
PZ
3082}
3083
464b7527
PZ
3084/*
3085 * Should 'se' preempt 'curr'.
3086 *
3087 * |s1
3088 * |s2
3089 * |s3
3090 * g
3091 * |<--->|c
3092 *
3093 * w(c, s1) = -1
3094 * w(c, s2) = 0
3095 * w(c, s3) = 1
3096 *
3097 */
3098static int
3099wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3100{
3101 s64 gran, vdiff = curr->vruntime - se->vruntime;
3102
3103 if (vdiff <= 0)
3104 return -1;
3105
e52fb7c0 3106 gran = wakeup_gran(curr, se);
464b7527
PZ
3107 if (vdiff > gran)
3108 return 1;
3109
3110 return 0;
3111}
3112
02479099
PZ
3113static void set_last_buddy(struct sched_entity *se)
3114{
69c80f3e
VP
3115 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3116 return;
3117
3118 for_each_sched_entity(se)
3119 cfs_rq_of(se)->last = se;
02479099
PZ
3120}
3121
3122static void set_next_buddy(struct sched_entity *se)
3123{
69c80f3e
VP
3124 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3125 return;
3126
3127 for_each_sched_entity(se)
3128 cfs_rq_of(se)->next = se;
02479099
PZ
3129}
3130
ac53db59
RR
3131static void set_skip_buddy(struct sched_entity *se)
3132{
69c80f3e
VP
3133 for_each_sched_entity(se)
3134 cfs_rq_of(se)->skip = se;
ac53db59
RR
3135}
3136
bf0f6f24
IM
3137/*
3138 * Preempt the current task with a newly woken task if needed:
3139 */
5a9b86f6 3140static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
3141{
3142 struct task_struct *curr = rq->curr;
8651a86c 3143 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 3144 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 3145 int scale = cfs_rq->nr_running >= sched_nr_latency;
2f36825b 3146 int next_buddy_marked = 0;
bf0f6f24 3147
4ae7d5ce
IM
3148 if (unlikely(se == pse))
3149 return;
3150
5238cdd3 3151 /*
ddcdf6e7 3152 * This is possible from callers such as move_task(), in which we
5238cdd3
PT
3153 * unconditionally check_prempt_curr() after an enqueue (which may have
3154 * lead to a throttle). This both saves work and prevents false
3155 * next-buddy nomination below.
3156 */
3157 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3158 return;
3159
2f36825b 3160 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3cb63d52 3161 set_next_buddy(pse);
2f36825b
VP
3162 next_buddy_marked = 1;
3163 }
57fdc26d 3164
aec0a514
BR
3165 /*
3166 * We can come here with TIF_NEED_RESCHED already set from new task
3167 * wake up path.
5238cdd3
PT
3168 *
3169 * Note: this also catches the edge-case of curr being in a throttled
3170 * group (e.g. via set_curr_task), since update_curr() (in the
3171 * enqueue of curr) will have resulted in resched being set. This
3172 * prevents us from potentially nominating it as a false LAST_BUDDY
3173 * below.
aec0a514
BR
3174 */
3175 if (test_tsk_need_resched(curr))
3176 return;
3177
a2f5c9ab
DH
3178 /* Idle tasks are by definition preempted by non-idle tasks. */
3179 if (unlikely(curr->policy == SCHED_IDLE) &&
3180 likely(p->policy != SCHED_IDLE))
3181 goto preempt;
3182
91c234b4 3183 /*
a2f5c9ab
DH
3184 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3185 * is driven by the tick):
91c234b4 3186 */
6bc912b7 3187 if (unlikely(p->policy != SCHED_NORMAL))
91c234b4 3188 return;
bf0f6f24 3189
464b7527 3190 find_matching_se(&se, &pse);
9bbd7374 3191 update_curr(cfs_rq_of(se));
002f128b 3192 BUG_ON(!pse);
2f36825b
VP
3193 if (wakeup_preempt_entity(se, pse) == 1) {
3194 /*
3195 * Bias pick_next to pick the sched entity that is
3196 * triggering this preemption.
3197 */
3198 if (!next_buddy_marked)
3199 set_next_buddy(pse);
3a7e73a2 3200 goto preempt;
2f36825b 3201 }
464b7527 3202
3a7e73a2 3203 return;
a65ac745 3204
3a7e73a2
PZ
3205preempt:
3206 resched_task(curr);
3207 /*
3208 * Only set the backward buddy when the current task is still
3209 * on the rq. This can happen when a wakeup gets interleaved
3210 * with schedule on the ->pre_schedule() or idle_balance()
3211 * point, either of which can * drop the rq lock.
3212 *
3213 * Also, during early boot the idle thread is in the fair class,
3214 * for obvious reasons its a bad idea to schedule back to it.
3215 */
3216 if (unlikely(!se->on_rq || curr == rq->idle))
3217 return;
3218
3219 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3220 set_last_buddy(se);
bf0f6f24
IM
3221}
3222
fb8d4724 3223static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24 3224{
8f4d37ec 3225 struct task_struct *p;
bf0f6f24
IM
3226 struct cfs_rq *cfs_rq = &rq->cfs;
3227 struct sched_entity *se;
3228
36ace27e 3229 if (!cfs_rq->nr_running)
bf0f6f24
IM
3230 return NULL;
3231
3232 do {
9948f4b2 3233 se = pick_next_entity(cfs_rq);
f4b6755f 3234 set_next_entity(cfs_rq, se);
bf0f6f24
IM
3235 cfs_rq = group_cfs_rq(se);
3236 } while (cfs_rq);
3237
8f4d37ec 3238 p = task_of(se);
b39e66ea
MG
3239 if (hrtick_enabled(rq))
3240 hrtick_start_fair(rq, p);
8f4d37ec
PZ
3241
3242 return p;
bf0f6f24
IM
3243}
3244
3245/*
3246 * Account for a descheduled task:
3247 */
31ee529c 3248static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
3249{
3250 struct sched_entity *se = &prev->se;
3251 struct cfs_rq *cfs_rq;
3252
3253 for_each_sched_entity(se) {
3254 cfs_rq = cfs_rq_of(se);
ab6cde26 3255 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
3256 }
3257}
3258
ac53db59
RR
3259/*
3260 * sched_yield() is very simple
3261 *
3262 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3263 */
3264static void yield_task_fair(struct rq *rq)
3265{
3266 struct task_struct *curr = rq->curr;
3267 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3268 struct sched_entity *se = &curr->se;
3269
3270 /*
3271 * Are we the only task in the tree?
3272 */
3273 if (unlikely(rq->nr_running == 1))
3274 return;
3275
3276 clear_buddies(cfs_rq, se);
3277
3278 if (curr->policy != SCHED_BATCH) {
3279 update_rq_clock(rq);
3280 /*
3281 * Update run-time statistics of the 'current'.
3282 */
3283 update_curr(cfs_rq);
916671c0
MG
3284 /*
3285 * Tell update_rq_clock() that we've just updated,
3286 * so we don't do microscopic update in schedule()
3287 * and double the fastpath cost.
3288 */
3289 rq->skip_clock_update = 1;
ac53db59
RR
3290 }
3291
3292 set_skip_buddy(se);
3293}
3294
d95f4122
MG
3295static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3296{
3297 struct sched_entity *se = &p->se;
3298
5238cdd3
PT
3299 /* throttled hierarchies are not runnable */
3300 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
d95f4122
MG
3301 return false;
3302
3303 /* Tell the scheduler that we'd really like pse to run next. */
3304 set_next_buddy(se);
3305
d95f4122
MG
3306 yield_task_fair(rq);
3307
3308 return true;
3309}
3310
681f3e68 3311#ifdef CONFIG_SMP
bf0f6f24
IM
3312/**************************************************
3313 * Fair scheduling class load-balancing methods:
3314 */
3315
ed387b78
HS
3316static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3317
ddcdf6e7 3318#define LBF_ALL_PINNED 0x01
367456c7 3319#define LBF_NEED_BREAK 0x02
88b8dac0 3320#define LBF_SOME_PINNED 0x04
ddcdf6e7
PZ
3321
3322struct lb_env {
3323 struct sched_domain *sd;
3324
ddcdf6e7 3325 struct rq *src_rq;
85c1e7da 3326 int src_cpu;
ddcdf6e7
PZ
3327
3328 int dst_cpu;
3329 struct rq *dst_rq;
3330
88b8dac0
SV
3331 struct cpumask *dst_grpmask;
3332 int new_dst_cpu;
ddcdf6e7 3333 enum cpu_idle_type idle;
bd939f45 3334 long imbalance;
b9403130
MW
3335 /* The set of CPUs under consideration for load-balancing */
3336 struct cpumask *cpus;
3337
ddcdf6e7 3338 unsigned int flags;
367456c7
PZ
3339
3340 unsigned int loop;
3341 unsigned int loop_break;
3342 unsigned int loop_max;
ddcdf6e7
PZ
3343};
3344
1e3c88bd 3345/*
ddcdf6e7 3346 * move_task - move a task from one runqueue to another runqueue.
1e3c88bd
PZ
3347 * Both runqueues must be locked.
3348 */
ddcdf6e7 3349static void move_task(struct task_struct *p, struct lb_env *env)
1e3c88bd 3350{
ddcdf6e7
PZ
3351 deactivate_task(env->src_rq, p, 0);
3352 set_task_cpu(p, env->dst_cpu);
3353 activate_task(env->dst_rq, p, 0);
3354 check_preempt_curr(env->dst_rq, p, 0);
1e3c88bd
PZ
3355}
3356
029632fb
PZ
3357/*
3358 * Is this task likely cache-hot:
3359 */
3360static int
3361task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3362{
3363 s64 delta;
3364
3365 if (p->sched_class != &fair_sched_class)
3366 return 0;
3367
3368 if (unlikely(p->policy == SCHED_IDLE))
3369 return 0;
3370
3371 /*
3372 * Buddy candidates are cache hot:
3373 */
3374 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3375 (&p->se == cfs_rq_of(&p->se)->next ||
3376 &p->se == cfs_rq_of(&p->se)->last))
3377 return 1;
3378
3379 if (sysctl_sched_migration_cost == -1)
3380 return 1;
3381 if (sysctl_sched_migration_cost == 0)
3382 return 0;
3383
3384 delta = now - p->se.exec_start;
3385
3386 return delta < (s64)sysctl_sched_migration_cost;
3387}
3388
1e3c88bd
PZ
3389/*
3390 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3391 */
3392static
8e45cb54 3393int can_migrate_task(struct task_struct *p, struct lb_env *env)
1e3c88bd
PZ
3394{
3395 int tsk_cache_hot = 0;
3396 /*
3397 * We do not migrate tasks that are:
3398 * 1) running (obviously), or
3399 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3400 * 3) are cache-hot on their current CPU.
3401 */
ddcdf6e7 3402 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
88b8dac0
SV
3403 int new_dst_cpu;
3404
41acab88 3405 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
88b8dac0
SV
3406
3407 /*
3408 * Remember if this task can be migrated to any other cpu in
3409 * our sched_group. We may want to revisit it if we couldn't
3410 * meet load balance goals by pulling other tasks on src_cpu.
3411 *
3412 * Also avoid computing new_dst_cpu if we have already computed
3413 * one in current iteration.
3414 */
3415 if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
3416 return 0;
3417
3418 new_dst_cpu = cpumask_first_and(env->dst_grpmask,
3419 tsk_cpus_allowed(p));
3420 if (new_dst_cpu < nr_cpu_ids) {
3421 env->flags |= LBF_SOME_PINNED;
3422 env->new_dst_cpu = new_dst_cpu;
3423 }
1e3c88bd
PZ
3424 return 0;
3425 }
88b8dac0
SV
3426
3427 /* Record that we found atleast one task that could run on dst_cpu */
8e45cb54 3428 env->flags &= ~LBF_ALL_PINNED;
1e3c88bd 3429
ddcdf6e7 3430 if (task_running(env->src_rq, p)) {
41acab88 3431 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
3432 return 0;
3433 }
3434
3435 /*
3436 * Aggressive migration if:
3437 * 1) task is cache cold, or
3438 * 2) too many balance attempts have failed.
3439 */
3440
ddcdf6e7 3441 tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd);
1e3c88bd 3442 if (!tsk_cache_hot ||
8e45cb54 3443 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
1e3c88bd
PZ
3444#ifdef CONFIG_SCHEDSTATS
3445 if (tsk_cache_hot) {
8e45cb54 3446 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
41acab88 3447 schedstat_inc(p, se.statistics.nr_forced_migrations);
1e3c88bd
PZ
3448 }
3449#endif
3450 return 1;
3451 }
3452
3453 if (tsk_cache_hot) {
41acab88 3454 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
1e3c88bd
PZ
3455 return 0;
3456 }
3457 return 1;
3458}
3459
897c395f
PZ
3460/*
3461 * move_one_task tries to move exactly one task from busiest to this_rq, as
3462 * part of active balancing operations within "domain".
3463 * Returns 1 if successful and 0 otherwise.
3464 *
3465 * Called with both runqueues locked.
3466 */
8e45cb54 3467static int move_one_task(struct lb_env *env)
897c395f
PZ
3468{
3469 struct task_struct *p, *n;
897c395f 3470
367456c7
PZ
3471 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
3472 if (throttled_lb_pair(task_group(p), env->src_rq->cpu, env->dst_cpu))
3473 continue;
897c395f 3474
367456c7
PZ
3475 if (!can_migrate_task(p, env))
3476 continue;
897c395f 3477
367456c7
PZ
3478 move_task(p, env);
3479 /*
3480 * Right now, this is only the second place move_task()
3481 * is called, so we can safely collect move_task()
3482 * stats here rather than inside move_task().
3483 */
3484 schedstat_inc(env->sd, lb_gained[env->idle]);
3485 return 1;
897c395f 3486 }
897c395f
PZ
3487 return 0;
3488}
3489
367456c7
PZ
3490static unsigned long task_h_load(struct task_struct *p);
3491
eb95308e
PZ
3492static const unsigned int sched_nr_migrate_break = 32;
3493
5d6523eb 3494/*
bd939f45 3495 * move_tasks tries to move up to imbalance weighted load from busiest to
5d6523eb
PZ
3496 * this_rq, as part of a balancing operation within domain "sd".
3497 * Returns 1 if successful and 0 otherwise.
3498 *
3499 * Called with both runqueues locked.
3500 */
3501static int move_tasks(struct lb_env *env)
1e3c88bd 3502{
5d6523eb
PZ
3503 struct list_head *tasks = &env->src_rq->cfs_tasks;
3504 struct task_struct *p;
367456c7
PZ
3505 unsigned long load;
3506 int pulled = 0;
1e3c88bd 3507
bd939f45 3508 if (env->imbalance <= 0)
5d6523eb 3509 return 0;
1e3c88bd 3510
5d6523eb
PZ
3511 while (!list_empty(tasks)) {
3512 p = list_first_entry(tasks, struct task_struct, se.group_node);
1e3c88bd 3513
367456c7
PZ
3514 env->loop++;
3515 /* We've more or less seen every task there is, call it quits */
5d6523eb 3516 if (env->loop > env->loop_max)
367456c7 3517 break;
5d6523eb
PZ
3518
3519 /* take a breather every nr_migrate tasks */
367456c7 3520 if (env->loop > env->loop_break) {
eb95308e 3521 env->loop_break += sched_nr_migrate_break;
8e45cb54 3522 env->flags |= LBF_NEED_BREAK;
ee00e66f 3523 break;
a195f004 3524 }
1e3c88bd 3525
5d6523eb 3526 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
367456c7
PZ
3527 goto next;
3528
3529 load = task_h_load(p);
5d6523eb 3530
eb95308e 3531 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
367456c7
PZ
3532 goto next;
3533
bd939f45 3534 if ((load / 2) > env->imbalance)
367456c7 3535 goto next;
1e3c88bd 3536
367456c7
PZ
3537 if (!can_migrate_task(p, env))
3538 goto next;
1e3c88bd 3539
ddcdf6e7 3540 move_task(p, env);
ee00e66f 3541 pulled++;
bd939f45 3542 env->imbalance -= load;
1e3c88bd
PZ
3543
3544#ifdef CONFIG_PREEMPT
ee00e66f
PZ
3545 /*
3546 * NEWIDLE balancing is a source of latency, so preemptible
3547 * kernels will stop after the first task is pulled to minimize
3548 * the critical section.
3549 */
5d6523eb 3550 if (env->idle == CPU_NEWLY_IDLE)
ee00e66f 3551 break;
1e3c88bd
PZ
3552#endif
3553
ee00e66f
PZ
3554 /*
3555 * We only want to steal up to the prescribed amount of
3556 * weighted load.
3557 */
bd939f45 3558 if (env->imbalance <= 0)
ee00e66f 3559 break;
367456c7
PZ
3560
3561 continue;
3562next:
5d6523eb 3563 list_move_tail(&p->se.group_node, tasks);
1e3c88bd 3564 }
5d6523eb 3565
1e3c88bd 3566 /*
ddcdf6e7
PZ
3567 * Right now, this is one of only two places move_task() is called,
3568 * so we can safely collect move_task() stats here rather than
3569 * inside move_task().
1e3c88bd 3570 */
8e45cb54 3571 schedstat_add(env->sd, lb_gained[env->idle], pulled);
1e3c88bd 3572
5d6523eb 3573 return pulled;
1e3c88bd
PZ
3574}
3575
230059de 3576#ifdef CONFIG_FAIR_GROUP_SCHED
9e3081ca
PZ
3577/*
3578 * update tg->load_weight by folding this cpu's load_avg
3579 */
67e86250 3580static int update_shares_cpu(struct task_group *tg, int cpu)
9e3081ca
PZ
3581{
3582 struct cfs_rq *cfs_rq;
3583 unsigned long flags;
3584 struct rq *rq;
9e3081ca
PZ
3585
3586 if (!tg->se[cpu])
3587 return 0;
3588
3589 rq = cpu_rq(cpu);
3590 cfs_rq = tg->cfs_rq[cpu];
3591
3592 raw_spin_lock_irqsave(&rq->lock, flags);
3593
3594 update_rq_clock(rq);
d6b55918 3595 update_cfs_load(cfs_rq, 1);
9ee474f5 3596 update_cfs_rq_blocked_load(cfs_rq);
9e3081ca
PZ
3597
3598 /*
3599 * We need to update shares after updating tg->load_weight in
3600 * order to adjust the weight of groups with long running tasks.
3601 */
6d5ab293 3602 update_cfs_shares(cfs_rq);
9e3081ca
PZ
3603
3604 raw_spin_unlock_irqrestore(&rq->lock, flags);
3605
3606 return 0;
3607}
3608
3609static void update_shares(int cpu)
3610{
3611 struct cfs_rq *cfs_rq;
3612 struct rq *rq = cpu_rq(cpu);
3613
3614 rcu_read_lock();
9763b67f
PZ
3615 /*
3616 * Iterates the task_group tree in a bottom up fashion, see
3617 * list_add_leaf_cfs_rq() for details.
3618 */
64660c86
PT
3619 for_each_leaf_cfs_rq(rq, cfs_rq) {
3620 /* throttled entities do not contribute to load */
3621 if (throttled_hierarchy(cfs_rq))
3622 continue;
3623
67e86250 3624 update_shares_cpu(cfs_rq->tg, cpu);
64660c86 3625 }
9e3081ca
PZ
3626 rcu_read_unlock();
3627}
3628
9763b67f
PZ
3629/*
3630 * Compute the cpu's hierarchical load factor for each task group.
3631 * This needs to be done in a top-down fashion because the load of a child
3632 * group is a fraction of its parents load.
3633 */
3634static int tg_load_down(struct task_group *tg, void *data)
3635{
3636 unsigned long load;
3637 long cpu = (long)data;
3638
3639 if (!tg->parent) {
3640 load = cpu_rq(cpu)->load.weight;
3641 } else {
3642 load = tg->parent->cfs_rq[cpu]->h_load;
3643 load *= tg->se[cpu]->load.weight;
3644 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
3645 }
3646
3647 tg->cfs_rq[cpu]->h_load = load;
3648
3649 return 0;
3650}
3651
3652static void update_h_load(long cpu)
3653{
a35b6466
PZ
3654 struct rq *rq = cpu_rq(cpu);
3655 unsigned long now = jiffies;
3656
3657 if (rq->h_load_throttle == now)
3658 return;
3659
3660 rq->h_load_throttle = now;
3661
367456c7 3662 rcu_read_lock();
9763b67f 3663 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
367456c7 3664 rcu_read_unlock();
9763b67f
PZ
3665}
3666
367456c7 3667static unsigned long task_h_load(struct task_struct *p)
230059de 3668{
367456c7
PZ
3669 struct cfs_rq *cfs_rq = task_cfs_rq(p);
3670 unsigned long load;
230059de 3671
367456c7
PZ
3672 load = p->se.load.weight;
3673 load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1);
230059de 3674
367456c7 3675 return load;
230059de
PZ
3676}
3677#else
9e3081ca
PZ
3678static inline void update_shares(int cpu)
3679{
3680}
3681
367456c7 3682static inline void update_h_load(long cpu)
230059de 3683{
230059de 3684}
230059de 3685
367456c7 3686static unsigned long task_h_load(struct task_struct *p)
1e3c88bd 3687{
367456c7 3688 return p->se.load.weight;
1e3c88bd 3689}
230059de 3690#endif
1e3c88bd 3691
1e3c88bd
PZ
3692/********** Helpers for find_busiest_group ************************/
3693/*
3694 * sd_lb_stats - Structure to store the statistics of a sched_domain
3695 * during load balancing.
3696 */
3697struct sd_lb_stats {
3698 struct sched_group *busiest; /* Busiest group in this sd */
3699 struct sched_group *this; /* Local group in this sd */
3700 unsigned long total_load; /* Total load of all groups in sd */
3701 unsigned long total_pwr; /* Total power of all groups in sd */
3702 unsigned long avg_load; /* Average load across all groups in sd */
3703
3704 /** Statistics of this group */
3705 unsigned long this_load;
3706 unsigned long this_load_per_task;
3707 unsigned long this_nr_running;
fab47622 3708 unsigned long this_has_capacity;
aae6d3dd 3709 unsigned int this_idle_cpus;
1e3c88bd
PZ
3710
3711 /* Statistics of the busiest group */
aae6d3dd 3712 unsigned int busiest_idle_cpus;
1e3c88bd
PZ
3713 unsigned long max_load;
3714 unsigned long busiest_load_per_task;
3715 unsigned long busiest_nr_running;
dd5feea1 3716 unsigned long busiest_group_capacity;
fab47622 3717 unsigned long busiest_has_capacity;
aae6d3dd 3718 unsigned int busiest_group_weight;
1e3c88bd
PZ
3719
3720 int group_imb; /* Is there imbalance in this sd */
1e3c88bd
PZ
3721};
3722
3723/*
3724 * sg_lb_stats - stats of a sched_group required for load_balancing
3725 */
3726struct sg_lb_stats {
3727 unsigned long avg_load; /*Avg load across the CPUs of the group */
3728 unsigned long group_load; /* Total load over the CPUs of the group */
3729 unsigned long sum_nr_running; /* Nr tasks running in the group */
3730 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3731 unsigned long group_capacity;
aae6d3dd
SS
3732 unsigned long idle_cpus;
3733 unsigned long group_weight;
1e3c88bd 3734 int group_imb; /* Is there an imbalance in the group ? */
fab47622 3735 int group_has_capacity; /* Is there extra capacity in the group? */
1e3c88bd
PZ
3736};
3737
1e3c88bd
PZ
3738/**
3739 * get_sd_load_idx - Obtain the load index for a given sched domain.
3740 * @sd: The sched_domain whose load_idx is to be obtained.
3741 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3742 */
3743static inline int get_sd_load_idx(struct sched_domain *sd,
3744 enum cpu_idle_type idle)
3745{
3746 int load_idx;
3747
3748 switch (idle) {
3749 case CPU_NOT_IDLE:
3750 load_idx = sd->busy_idx;
3751 break;
3752
3753 case CPU_NEWLY_IDLE:
3754 load_idx = sd->newidle_idx;
3755 break;
3756 default:
3757 load_idx = sd->idle_idx;
3758 break;
3759 }
3760
3761 return load_idx;
3762}
3763
1e3c88bd
PZ
3764unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
3765{
1399fa78 3766 return SCHED_POWER_SCALE;
1e3c88bd
PZ
3767}
3768
3769unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
3770{
3771 return default_scale_freq_power(sd, cpu);
3772}
3773
3774unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
3775{
669c55e9 3776 unsigned long weight = sd->span_weight;
1e3c88bd
PZ
3777 unsigned long smt_gain = sd->smt_gain;
3778
3779 smt_gain /= weight;
3780
3781 return smt_gain;
3782}
3783
3784unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
3785{
3786 return default_scale_smt_power(sd, cpu);
3787}
3788
3789unsigned long scale_rt_power(int cpu)
3790{
3791 struct rq *rq = cpu_rq(cpu);
b654f7de 3792 u64 total, available, age_stamp, avg;
1e3c88bd 3793
b654f7de
PZ
3794 /*
3795 * Since we're reading these variables without serialization make sure
3796 * we read them once before doing sanity checks on them.
3797 */
3798 age_stamp = ACCESS_ONCE(rq->age_stamp);
3799 avg = ACCESS_ONCE(rq->rt_avg);
3800
3801 total = sched_avg_period() + (rq->clock - age_stamp);
aa483808 3802
b654f7de 3803 if (unlikely(total < avg)) {
aa483808
VP
3804 /* Ensures that power won't end up being negative */
3805 available = 0;
3806 } else {
b654f7de 3807 available = total - avg;
aa483808 3808 }
1e3c88bd 3809
1399fa78
NR
3810 if (unlikely((s64)total < SCHED_POWER_SCALE))
3811 total = SCHED_POWER_SCALE;
1e3c88bd 3812
1399fa78 3813 total >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
3814
3815 return div_u64(available, total);
3816}
3817
3818static void update_cpu_power(struct sched_domain *sd, int cpu)
3819{
669c55e9 3820 unsigned long weight = sd->span_weight;
1399fa78 3821 unsigned long power = SCHED_POWER_SCALE;
1e3c88bd
PZ
3822 struct sched_group *sdg = sd->groups;
3823
1e3c88bd
PZ
3824 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
3825 if (sched_feat(ARCH_POWER))
3826 power *= arch_scale_smt_power(sd, cpu);
3827 else
3828 power *= default_scale_smt_power(sd, cpu);
3829
1399fa78 3830 power >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
3831 }
3832
9c3f75cb 3833 sdg->sgp->power_orig = power;
9d5efe05
SV
3834
3835 if (sched_feat(ARCH_POWER))
3836 power *= arch_scale_freq_power(sd, cpu);
3837 else
3838 power *= default_scale_freq_power(sd, cpu);
3839
1399fa78 3840 power >>= SCHED_POWER_SHIFT;
9d5efe05 3841
1e3c88bd 3842 power *= scale_rt_power(cpu);
1399fa78 3843 power >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
3844
3845 if (!power)
3846 power = 1;
3847
e51fd5e2 3848 cpu_rq(cpu)->cpu_power = power;
9c3f75cb 3849 sdg->sgp->power = power;
1e3c88bd
PZ
3850}
3851
029632fb 3852void update_group_power(struct sched_domain *sd, int cpu)
1e3c88bd
PZ
3853{
3854 struct sched_domain *child = sd->child;
3855 struct sched_group *group, *sdg = sd->groups;
3856 unsigned long power;
4ec4412e
VG
3857 unsigned long interval;
3858
3859 interval = msecs_to_jiffies(sd->balance_interval);
3860 interval = clamp(interval, 1UL, max_load_balance_interval);
3861 sdg->sgp->next_update = jiffies + interval;
1e3c88bd
PZ
3862
3863 if (!child) {
3864 update_cpu_power(sd, cpu);
3865 return;
3866 }
3867
3868 power = 0;
3869
74a5ce20
PZ
3870 if (child->flags & SD_OVERLAP) {
3871 /*
3872 * SD_OVERLAP domains cannot assume that child groups
3873 * span the current group.
3874 */
3875
3876 for_each_cpu(cpu, sched_group_cpus(sdg))
3877 power += power_of(cpu);
3878 } else {
3879 /*
3880 * !SD_OVERLAP domains can assume that child groups
3881 * span the current group.
3882 */
3883
3884 group = child->groups;
3885 do {
3886 power += group->sgp->power;
3887 group = group->next;
3888 } while (group != child->groups);
3889 }
1e3c88bd 3890
c3decf0d 3891 sdg->sgp->power_orig = sdg->sgp->power = power;
1e3c88bd
PZ
3892}
3893
9d5efe05
SV
3894/*
3895 * Try and fix up capacity for tiny siblings, this is needed when
3896 * things like SD_ASYM_PACKING need f_b_g to select another sibling
3897 * which on its own isn't powerful enough.
3898 *
3899 * See update_sd_pick_busiest() and check_asym_packing().
3900 */
3901static inline int
3902fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
3903{
3904 /*
1399fa78 3905 * Only siblings can have significantly less than SCHED_POWER_SCALE
9d5efe05 3906 */
a6c75f2f 3907 if (!(sd->flags & SD_SHARE_CPUPOWER))
9d5efe05
SV
3908 return 0;
3909
3910 /*
3911 * If ~90% of the cpu_power is still there, we're good.
3912 */
9c3f75cb 3913 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
9d5efe05
SV
3914 return 1;
3915
3916 return 0;
3917}
3918
1e3c88bd
PZ
3919/**
3920 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
cd96891d 3921 * @env: The load balancing environment.
1e3c88bd 3922 * @group: sched_group whose statistics are to be updated.
1e3c88bd 3923 * @load_idx: Load index of sched_domain of this_cpu for load calc.
1e3c88bd 3924 * @local_group: Does group contain this_cpu.
1e3c88bd
PZ
3925 * @balance: Should we balance.
3926 * @sgs: variable to hold the statistics for this group.
3927 */
bd939f45
PZ
3928static inline void update_sg_lb_stats(struct lb_env *env,
3929 struct sched_group *group, int load_idx,
b9403130 3930 int local_group, int *balance, struct sg_lb_stats *sgs)
1e3c88bd 3931{
e44bc5c5
PZ
3932 unsigned long nr_running, max_nr_running, min_nr_running;
3933 unsigned long load, max_cpu_load, min_cpu_load;
04f733b4 3934 unsigned int balance_cpu = -1, first_idle_cpu = 0;
dd5feea1 3935 unsigned long avg_load_per_task = 0;
bd939f45 3936 int i;
1e3c88bd 3937
871e35bc 3938 if (local_group)
c1174876 3939 balance_cpu = group_balance_cpu(group);
1e3c88bd
PZ
3940
3941 /* Tally up the load of all CPUs in the group */
1e3c88bd
PZ
3942 max_cpu_load = 0;
3943 min_cpu_load = ~0UL;
2582f0eb 3944 max_nr_running = 0;
e44bc5c5 3945 min_nr_running = ~0UL;
1e3c88bd 3946
b9403130 3947 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
1e3c88bd
PZ
3948 struct rq *rq = cpu_rq(i);
3949
e44bc5c5
PZ
3950 nr_running = rq->nr_running;
3951
1e3c88bd
PZ
3952 /* Bias balancing toward cpus of our domain */
3953 if (local_group) {
c1174876
PZ
3954 if (idle_cpu(i) && !first_idle_cpu &&
3955 cpumask_test_cpu(i, sched_group_mask(group))) {
04f733b4 3956 first_idle_cpu = 1;
1e3c88bd
PZ
3957 balance_cpu = i;
3958 }
04f733b4
PZ
3959
3960 load = target_load(i, load_idx);
1e3c88bd
PZ
3961 } else {
3962 load = source_load(i, load_idx);
e44bc5c5 3963 if (load > max_cpu_load)
1e3c88bd
PZ
3964 max_cpu_load = load;
3965 if (min_cpu_load > load)
3966 min_cpu_load = load;
e44bc5c5
PZ
3967
3968 if (nr_running > max_nr_running)
3969 max_nr_running = nr_running;
3970 if (min_nr_running > nr_running)
3971 min_nr_running = nr_running;
1e3c88bd
PZ
3972 }
3973
3974 sgs->group_load += load;
e44bc5c5 3975 sgs->sum_nr_running += nr_running;
1e3c88bd 3976 sgs->sum_weighted_load += weighted_cpuload(i);
aae6d3dd
SS
3977 if (idle_cpu(i))
3978 sgs->idle_cpus++;
1e3c88bd
PZ
3979 }
3980
3981 /*
3982 * First idle cpu or the first cpu(busiest) in this sched group
3983 * is eligible for doing load balancing at this and above
3984 * domains. In the newly idle case, we will allow all the cpu's
3985 * to do the newly idle load balance.
3986 */
4ec4412e 3987 if (local_group) {
bd939f45 3988 if (env->idle != CPU_NEWLY_IDLE) {
04f733b4 3989 if (balance_cpu != env->dst_cpu) {
4ec4412e
VG
3990 *balance = 0;
3991 return;
3992 }
bd939f45 3993 update_group_power(env->sd, env->dst_cpu);
4ec4412e 3994 } else if (time_after_eq(jiffies, group->sgp->next_update))
bd939f45 3995 update_group_power(env->sd, env->dst_cpu);
1e3c88bd
PZ
3996 }
3997
3998 /* Adjust by relative CPU power of the group */
9c3f75cb 3999 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
1e3c88bd 4000
1e3c88bd
PZ
4001 /*
4002 * Consider the group unbalanced when the imbalance is larger
866ab43e 4003 * than the average weight of a task.
1e3c88bd
PZ
4004 *
4005 * APZ: with cgroup the avg task weight can vary wildly and
4006 * might not be a suitable number - should we keep a
4007 * normalized nr_running number somewhere that negates
4008 * the hierarchy?
4009 */
dd5feea1
SS
4010 if (sgs->sum_nr_running)
4011 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd 4012
e44bc5c5
PZ
4013 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
4014 (max_nr_running - min_nr_running) > 1)
1e3c88bd
PZ
4015 sgs->group_imb = 1;
4016
9c3f75cb 4017 sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
1399fa78 4018 SCHED_POWER_SCALE);
9d5efe05 4019 if (!sgs->group_capacity)
bd939f45 4020 sgs->group_capacity = fix_small_capacity(env->sd, group);
aae6d3dd 4021 sgs->group_weight = group->group_weight;
fab47622
NR
4022
4023 if (sgs->group_capacity > sgs->sum_nr_running)
4024 sgs->group_has_capacity = 1;
1e3c88bd
PZ
4025}
4026
532cb4c4
MN
4027/**
4028 * update_sd_pick_busiest - return 1 on busiest group
cd96891d 4029 * @env: The load balancing environment.
532cb4c4
MN
4030 * @sds: sched_domain statistics
4031 * @sg: sched_group candidate to be checked for being the busiest
b6b12294 4032 * @sgs: sched_group statistics
532cb4c4
MN
4033 *
4034 * Determine if @sg is a busier group than the previously selected
4035 * busiest group.
4036 */
bd939f45 4037static bool update_sd_pick_busiest(struct lb_env *env,
532cb4c4
MN
4038 struct sd_lb_stats *sds,
4039 struct sched_group *sg,
bd939f45 4040 struct sg_lb_stats *sgs)
532cb4c4
MN
4041{
4042 if (sgs->avg_load <= sds->max_load)
4043 return false;
4044
4045 if (sgs->sum_nr_running > sgs->group_capacity)
4046 return true;
4047
4048 if (sgs->group_imb)
4049 return true;
4050
4051 /*
4052 * ASYM_PACKING needs to move all the work to the lowest
4053 * numbered CPUs in the group, therefore mark all groups
4054 * higher than ourself as busy.
4055 */
bd939f45
PZ
4056 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4057 env->dst_cpu < group_first_cpu(sg)) {
532cb4c4
MN
4058 if (!sds->busiest)
4059 return true;
4060
4061 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4062 return true;
4063 }
4064
4065 return false;
4066}
4067
1e3c88bd 4068/**
461819ac 4069 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
cd96891d 4070 * @env: The load balancing environment.
1e3c88bd
PZ
4071 * @balance: Should we balance.
4072 * @sds: variable to hold the statistics for this sched_domain.
4073 */
bd939f45 4074static inline void update_sd_lb_stats(struct lb_env *env,
b9403130 4075 int *balance, struct sd_lb_stats *sds)
1e3c88bd 4076{
bd939f45
PZ
4077 struct sched_domain *child = env->sd->child;
4078 struct sched_group *sg = env->sd->groups;
1e3c88bd
PZ
4079 struct sg_lb_stats sgs;
4080 int load_idx, prefer_sibling = 0;
4081
4082 if (child && child->flags & SD_PREFER_SIBLING)
4083 prefer_sibling = 1;
4084
bd939f45 4085 load_idx = get_sd_load_idx(env->sd, env->idle);
1e3c88bd
PZ
4086
4087 do {
4088 int local_group;
4089
bd939f45 4090 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
1e3c88bd 4091 memset(&sgs, 0, sizeof(sgs));
b9403130 4092 update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
1e3c88bd 4093
8f190fb3 4094 if (local_group && !(*balance))
1e3c88bd
PZ
4095 return;
4096
4097 sds->total_load += sgs.group_load;
9c3f75cb 4098 sds->total_pwr += sg->sgp->power;
1e3c88bd
PZ
4099
4100 /*
4101 * In case the child domain prefers tasks go to siblings
532cb4c4 4102 * first, lower the sg capacity to one so that we'll try
75dd321d
NR
4103 * and move all the excess tasks away. We lower the capacity
4104 * of a group only if the local group has the capacity to fit
4105 * these excess tasks, i.e. nr_running < group_capacity. The
4106 * extra check prevents the case where you always pull from the
4107 * heaviest group when it is already under-utilized (possible
4108 * with a large weight task outweighs the tasks on the system).
1e3c88bd 4109 */
75dd321d 4110 if (prefer_sibling && !local_group && sds->this_has_capacity)
1e3c88bd
PZ
4111 sgs.group_capacity = min(sgs.group_capacity, 1UL);
4112
4113 if (local_group) {
4114 sds->this_load = sgs.avg_load;
532cb4c4 4115 sds->this = sg;
1e3c88bd
PZ
4116 sds->this_nr_running = sgs.sum_nr_running;
4117 sds->this_load_per_task = sgs.sum_weighted_load;
fab47622 4118 sds->this_has_capacity = sgs.group_has_capacity;
aae6d3dd 4119 sds->this_idle_cpus = sgs.idle_cpus;
bd939f45 4120 } else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
1e3c88bd 4121 sds->max_load = sgs.avg_load;
532cb4c4 4122 sds->busiest = sg;
1e3c88bd 4123 sds->busiest_nr_running = sgs.sum_nr_running;
aae6d3dd 4124 sds->busiest_idle_cpus = sgs.idle_cpus;
dd5feea1 4125 sds->busiest_group_capacity = sgs.group_capacity;
1e3c88bd 4126 sds->busiest_load_per_task = sgs.sum_weighted_load;
fab47622 4127 sds->busiest_has_capacity = sgs.group_has_capacity;
aae6d3dd 4128 sds->busiest_group_weight = sgs.group_weight;
1e3c88bd
PZ
4129 sds->group_imb = sgs.group_imb;
4130 }
4131
532cb4c4 4132 sg = sg->next;
bd939f45 4133 } while (sg != env->sd->groups);
532cb4c4
MN
4134}
4135
532cb4c4
MN
4136/**
4137 * check_asym_packing - Check to see if the group is packed into the
4138 * sched doman.
4139 *
4140 * This is primarily intended to used at the sibling level. Some
4141 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4142 * case of POWER7, it can move to lower SMT modes only when higher
4143 * threads are idle. When in lower SMT modes, the threads will
4144 * perform better since they share less core resources. Hence when we
4145 * have idle threads, we want them to be the higher ones.
4146 *
4147 * This packing function is run on idle threads. It checks to see if
4148 * the busiest CPU in this domain (core in the P7 case) has a higher
4149 * CPU number than the packing function is being run on. Here we are
4150 * assuming lower CPU number will be equivalent to lower a SMT thread
4151 * number.
4152 *
b6b12294
MN
4153 * Returns 1 when packing is required and a task should be moved to
4154 * this CPU. The amount of the imbalance is returned in *imbalance.
4155 *
cd96891d 4156 * @env: The load balancing environment.
532cb4c4 4157 * @sds: Statistics of the sched_domain which is to be packed
532cb4c4 4158 */
bd939f45 4159static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
532cb4c4
MN
4160{
4161 int busiest_cpu;
4162
bd939f45 4163 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
4164 return 0;
4165
4166 if (!sds->busiest)
4167 return 0;
4168
4169 busiest_cpu = group_first_cpu(sds->busiest);
bd939f45 4170 if (env->dst_cpu > busiest_cpu)
532cb4c4
MN
4171 return 0;
4172
bd939f45
PZ
4173 env->imbalance = DIV_ROUND_CLOSEST(
4174 sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
4175
532cb4c4 4176 return 1;
1e3c88bd
PZ
4177}
4178
4179/**
4180 * fix_small_imbalance - Calculate the minor imbalance that exists
4181 * amongst the groups of a sched_domain, during
4182 * load balancing.
cd96891d 4183 * @env: The load balancing environment.
1e3c88bd 4184 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 4185 */
bd939f45
PZ
4186static inline
4187void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd
PZ
4188{
4189 unsigned long tmp, pwr_now = 0, pwr_move = 0;
4190 unsigned int imbn = 2;
dd5feea1 4191 unsigned long scaled_busy_load_per_task;
1e3c88bd
PZ
4192
4193 if (sds->this_nr_running) {
4194 sds->this_load_per_task /= sds->this_nr_running;
4195 if (sds->busiest_load_per_task >
4196 sds->this_load_per_task)
4197 imbn = 1;
bd939f45 4198 } else {
1e3c88bd 4199 sds->this_load_per_task =
bd939f45
PZ
4200 cpu_avg_load_per_task(env->dst_cpu);
4201 }
1e3c88bd 4202
dd5feea1 4203 scaled_busy_load_per_task = sds->busiest_load_per_task
1399fa78 4204 * SCHED_POWER_SCALE;
9c3f75cb 4205 scaled_busy_load_per_task /= sds->busiest->sgp->power;
dd5feea1
SS
4206
4207 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4208 (scaled_busy_load_per_task * imbn)) {
bd939f45 4209 env->imbalance = sds->busiest_load_per_task;
1e3c88bd
PZ
4210 return;
4211 }
4212
4213 /*
4214 * OK, we don't have enough imbalance to justify moving tasks,
4215 * however we may be able to increase total CPU power used by
4216 * moving them.
4217 */
4218
9c3f75cb 4219 pwr_now += sds->busiest->sgp->power *
1e3c88bd 4220 min(sds->busiest_load_per_task, sds->max_load);
9c3f75cb 4221 pwr_now += sds->this->sgp->power *
1e3c88bd 4222 min(sds->this_load_per_task, sds->this_load);
1399fa78 4223 pwr_now /= SCHED_POWER_SCALE;
1e3c88bd
PZ
4224
4225 /* Amount of load we'd subtract */
1399fa78 4226 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
9c3f75cb 4227 sds->busiest->sgp->power;
1e3c88bd 4228 if (sds->max_load > tmp)
9c3f75cb 4229 pwr_move += sds->busiest->sgp->power *
1e3c88bd
PZ
4230 min(sds->busiest_load_per_task, sds->max_load - tmp);
4231
4232 /* Amount of load we'd add */
9c3f75cb 4233 if (sds->max_load * sds->busiest->sgp->power <
1399fa78 4234 sds->busiest_load_per_task * SCHED_POWER_SCALE)
9c3f75cb
PZ
4235 tmp = (sds->max_load * sds->busiest->sgp->power) /
4236 sds->this->sgp->power;
1e3c88bd 4237 else
1399fa78 4238 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
9c3f75cb
PZ
4239 sds->this->sgp->power;
4240 pwr_move += sds->this->sgp->power *
1e3c88bd 4241 min(sds->this_load_per_task, sds->this_load + tmp);
1399fa78 4242 pwr_move /= SCHED_POWER_SCALE;
1e3c88bd
PZ
4243
4244 /* Move if we gain throughput */
4245 if (pwr_move > pwr_now)
bd939f45 4246 env->imbalance = sds->busiest_load_per_task;
1e3c88bd
PZ
4247}
4248
4249/**
4250 * calculate_imbalance - Calculate the amount of imbalance present within the
4251 * groups of a given sched_domain during load balance.
bd939f45 4252 * @env: load balance environment
1e3c88bd 4253 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 4254 */
bd939f45 4255static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 4256{
dd5feea1
SS
4257 unsigned long max_pull, load_above_capacity = ~0UL;
4258
4259 sds->busiest_load_per_task /= sds->busiest_nr_running;
4260 if (sds->group_imb) {
4261 sds->busiest_load_per_task =
4262 min(sds->busiest_load_per_task, sds->avg_load);
4263 }
4264
1e3c88bd
PZ
4265 /*
4266 * In the presence of smp nice balancing, certain scenarios can have
4267 * max load less than avg load(as we skip the groups at or below
4268 * its cpu_power, while calculating max_load..)
4269 */
4270 if (sds->max_load < sds->avg_load) {
bd939f45
PZ
4271 env->imbalance = 0;
4272 return fix_small_imbalance(env, sds);
1e3c88bd
PZ
4273 }
4274
dd5feea1
SS
4275 if (!sds->group_imb) {
4276 /*
4277 * Don't want to pull so many tasks that a group would go idle.
4278 */
4279 load_above_capacity = (sds->busiest_nr_running -
4280 sds->busiest_group_capacity);
4281
1399fa78 4282 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
dd5feea1 4283
9c3f75cb 4284 load_above_capacity /= sds->busiest->sgp->power;
dd5feea1
SS
4285 }
4286
4287 /*
4288 * We're trying to get all the cpus to the average_load, so we don't
4289 * want to push ourselves above the average load, nor do we wish to
4290 * reduce the max loaded cpu below the average load. At the same time,
4291 * we also don't want to reduce the group load below the group capacity
4292 * (so that we can implement power-savings policies etc). Thus we look
4293 * for the minimum possible imbalance.
4294 * Be careful of negative numbers as they'll appear as very large values
4295 * with unsigned longs.
4296 */
4297 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
4298
4299 /* How much load to actually move to equalise the imbalance */
bd939f45 4300 env->imbalance = min(max_pull * sds->busiest->sgp->power,
9c3f75cb 4301 (sds->avg_load - sds->this_load) * sds->this->sgp->power)
1399fa78 4302 / SCHED_POWER_SCALE;
1e3c88bd
PZ
4303
4304 /*
4305 * if *imbalance is less than the average load per runnable task
25985edc 4306 * there is no guarantee that any tasks will be moved so we'll have
1e3c88bd
PZ
4307 * a think about bumping its value to force at least one task to be
4308 * moved
4309 */
bd939f45
PZ
4310 if (env->imbalance < sds->busiest_load_per_task)
4311 return fix_small_imbalance(env, sds);
1e3c88bd
PZ
4312
4313}
fab47622 4314
1e3c88bd
PZ
4315/******* find_busiest_group() helpers end here *********************/
4316
4317/**
4318 * find_busiest_group - Returns the busiest group within the sched_domain
4319 * if there is an imbalance. If there isn't an imbalance, and
4320 * the user has opted for power-savings, it returns a group whose
4321 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4322 * such a group exists.
4323 *
4324 * Also calculates the amount of weighted load which should be moved
4325 * to restore balance.
4326 *
cd96891d 4327 * @env: The load balancing environment.
1e3c88bd
PZ
4328 * @balance: Pointer to a variable indicating if this_cpu
4329 * is the appropriate cpu to perform load balancing at this_level.
4330 *
4331 * Returns: - the busiest group if imbalance exists.
4332 * - If no imbalance and user has opted for power-savings balance,
4333 * return the least loaded group whose CPUs can be
4334 * put to idle by rebalancing its tasks onto our group.
4335 */
4336static struct sched_group *
b9403130 4337find_busiest_group(struct lb_env *env, int *balance)
1e3c88bd
PZ
4338{
4339 struct sd_lb_stats sds;
4340
4341 memset(&sds, 0, sizeof(sds));
4342
4343 /*
4344 * Compute the various statistics relavent for load balancing at
4345 * this level.
4346 */
b9403130 4347 update_sd_lb_stats(env, balance, &sds);
1e3c88bd 4348
cc57aa8f
PZ
4349 /*
4350 * this_cpu is not the appropriate cpu to perform load balancing at
4351 * this level.
1e3c88bd 4352 */
8f190fb3 4353 if (!(*balance))
1e3c88bd
PZ
4354 goto ret;
4355
bd939f45
PZ
4356 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
4357 check_asym_packing(env, &sds))
532cb4c4
MN
4358 return sds.busiest;
4359
cc57aa8f 4360 /* There is no busy sibling group to pull tasks from */
1e3c88bd
PZ
4361 if (!sds.busiest || sds.busiest_nr_running == 0)
4362 goto out_balanced;
4363
1399fa78 4364 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
b0432d8f 4365
866ab43e
PZ
4366 /*
4367 * If the busiest group is imbalanced the below checks don't
4368 * work because they assumes all things are equal, which typically
4369 * isn't true due to cpus_allowed constraints and the like.
4370 */
4371 if (sds.group_imb)
4372 goto force_balance;
4373
cc57aa8f 4374 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
bd939f45 4375 if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
fab47622
NR
4376 !sds.busiest_has_capacity)
4377 goto force_balance;
4378
cc57aa8f
PZ
4379 /*
4380 * If the local group is more busy than the selected busiest group
4381 * don't try and pull any tasks.
4382 */
1e3c88bd
PZ
4383 if (sds.this_load >= sds.max_load)
4384 goto out_balanced;
4385
cc57aa8f
PZ
4386 /*
4387 * Don't pull any tasks if this group is already above the domain
4388 * average load.
4389 */
1e3c88bd
PZ
4390 if (sds.this_load >= sds.avg_load)
4391 goto out_balanced;
4392
bd939f45 4393 if (env->idle == CPU_IDLE) {
aae6d3dd
SS
4394 /*
4395 * This cpu is idle. If the busiest group load doesn't
4396 * have more tasks than the number of available cpu's and
4397 * there is no imbalance between this and busiest group
4398 * wrt to idle cpu's, it is balanced.
4399 */
c186fafe 4400 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
aae6d3dd
SS
4401 sds.busiest_nr_running <= sds.busiest_group_weight)
4402 goto out_balanced;
c186fafe
PZ
4403 } else {
4404 /*
4405 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4406 * imbalance_pct to be conservative.
4407 */
bd939f45 4408 if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
c186fafe 4409 goto out_balanced;
aae6d3dd 4410 }
1e3c88bd 4411
fab47622 4412force_balance:
1e3c88bd 4413 /* Looks like there is an imbalance. Compute it */
bd939f45 4414 calculate_imbalance(env, &sds);
1e3c88bd
PZ
4415 return sds.busiest;
4416
4417out_balanced:
1e3c88bd 4418ret:
bd939f45 4419 env->imbalance = 0;
1e3c88bd
PZ
4420 return NULL;
4421}
4422
4423/*
4424 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4425 */
bd939f45 4426static struct rq *find_busiest_queue(struct lb_env *env,
b9403130 4427 struct sched_group *group)
1e3c88bd
PZ
4428{
4429 struct rq *busiest = NULL, *rq;
4430 unsigned long max_load = 0;
4431 int i;
4432
4433 for_each_cpu(i, sched_group_cpus(group)) {
4434 unsigned long power = power_of(i);
1399fa78
NR
4435 unsigned long capacity = DIV_ROUND_CLOSEST(power,
4436 SCHED_POWER_SCALE);
1e3c88bd
PZ
4437 unsigned long wl;
4438
9d5efe05 4439 if (!capacity)
bd939f45 4440 capacity = fix_small_capacity(env->sd, group);
9d5efe05 4441
b9403130 4442 if (!cpumask_test_cpu(i, env->cpus))
1e3c88bd
PZ
4443 continue;
4444
4445 rq = cpu_rq(i);
6e40f5bb 4446 wl = weighted_cpuload(i);
1e3c88bd 4447
6e40f5bb
TG
4448 /*
4449 * When comparing with imbalance, use weighted_cpuload()
4450 * which is not scaled with the cpu power.
4451 */
bd939f45 4452 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
1e3c88bd
PZ
4453 continue;
4454
6e40f5bb
TG
4455 /*
4456 * For the load comparisons with the other cpu's, consider
4457 * the weighted_cpuload() scaled with the cpu power, so that
4458 * the load can be moved away from the cpu that is potentially
4459 * running at a lower capacity.
4460 */
1399fa78 4461 wl = (wl * SCHED_POWER_SCALE) / power;
6e40f5bb 4462
1e3c88bd
PZ
4463 if (wl > max_load) {
4464 max_load = wl;
4465 busiest = rq;
4466 }
4467 }
4468
4469 return busiest;
4470}
4471
4472/*
4473 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
4474 * so long as it is large enough.
4475 */
4476#define MAX_PINNED_INTERVAL 512
4477
4478/* Working cpumask for load_balance and load_balance_newidle. */
029632fb 4479DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
1e3c88bd 4480
bd939f45 4481static int need_active_balance(struct lb_env *env)
1af3ed3d 4482{
bd939f45
PZ
4483 struct sched_domain *sd = env->sd;
4484
4485 if (env->idle == CPU_NEWLY_IDLE) {
532cb4c4
MN
4486
4487 /*
4488 * ASYM_PACKING needs to force migrate tasks from busy but
4489 * higher numbered CPUs in order to pack all tasks in the
4490 * lowest numbered CPUs.
4491 */
bd939f45 4492 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
532cb4c4 4493 return 1;
1af3ed3d
PZ
4494 }
4495
4496 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
4497}
4498
969c7921
TH
4499static int active_load_balance_cpu_stop(void *data);
4500
1e3c88bd
PZ
4501/*
4502 * Check this_cpu to ensure it is balanced within domain. Attempt to move
4503 * tasks if there is an imbalance.
4504 */
4505static int load_balance(int this_cpu, struct rq *this_rq,
4506 struct sched_domain *sd, enum cpu_idle_type idle,
4507 int *balance)
4508{
88b8dac0
SV
4509 int ld_moved, cur_ld_moved, active_balance = 0;
4510 int lb_iterations, max_lb_iterations;
1e3c88bd 4511 struct sched_group *group;
1e3c88bd
PZ
4512 struct rq *busiest;
4513 unsigned long flags;
4514 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4515
8e45cb54
PZ
4516 struct lb_env env = {
4517 .sd = sd,
ddcdf6e7
PZ
4518 .dst_cpu = this_cpu,
4519 .dst_rq = this_rq,
88b8dac0 4520 .dst_grpmask = sched_group_cpus(sd->groups),
8e45cb54 4521 .idle = idle,
eb95308e 4522 .loop_break = sched_nr_migrate_break,
b9403130 4523 .cpus = cpus,
8e45cb54
PZ
4524 };
4525
1e3c88bd 4526 cpumask_copy(cpus, cpu_active_mask);
88b8dac0 4527 max_lb_iterations = cpumask_weight(env.dst_grpmask);
1e3c88bd 4528
1e3c88bd
PZ
4529 schedstat_inc(sd, lb_count[idle]);
4530
4531redo:
b9403130 4532 group = find_busiest_group(&env, balance);
1e3c88bd
PZ
4533
4534 if (*balance == 0)
4535 goto out_balanced;
4536
4537 if (!group) {
4538 schedstat_inc(sd, lb_nobusyg[idle]);
4539 goto out_balanced;
4540 }
4541
b9403130 4542 busiest = find_busiest_queue(&env, group);
1e3c88bd
PZ
4543 if (!busiest) {
4544 schedstat_inc(sd, lb_nobusyq[idle]);
4545 goto out_balanced;
4546 }
4547
78feefc5 4548 BUG_ON(busiest == env.dst_rq);
1e3c88bd 4549
bd939f45 4550 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
1e3c88bd
PZ
4551
4552 ld_moved = 0;
88b8dac0 4553 lb_iterations = 1;
1e3c88bd
PZ
4554 if (busiest->nr_running > 1) {
4555 /*
4556 * Attempt to move tasks. If find_busiest_group has found
4557 * an imbalance but busiest->nr_running <= 1, the group is
4558 * still unbalanced. ld_moved simply stays zero, so it is
4559 * correctly treated as an imbalance.
4560 */
8e45cb54 4561 env.flags |= LBF_ALL_PINNED;
c82513e5
PZ
4562 env.src_cpu = busiest->cpu;
4563 env.src_rq = busiest;
4564 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
8e45cb54 4565
a35b6466 4566 update_h_load(env.src_cpu);
5d6523eb 4567more_balance:
1e3c88bd 4568 local_irq_save(flags);
78feefc5 4569 double_rq_lock(env.dst_rq, busiest);
88b8dac0
SV
4570
4571 /*
4572 * cur_ld_moved - load moved in current iteration
4573 * ld_moved - cumulative load moved across iterations
4574 */
4575 cur_ld_moved = move_tasks(&env);
4576 ld_moved += cur_ld_moved;
78feefc5 4577 double_rq_unlock(env.dst_rq, busiest);
1e3c88bd
PZ
4578 local_irq_restore(flags);
4579
5d6523eb
PZ
4580 if (env.flags & LBF_NEED_BREAK) {
4581 env.flags &= ~LBF_NEED_BREAK;
4582 goto more_balance;
4583 }
4584
1e3c88bd
PZ
4585 /*
4586 * some other cpu did the load balance for us.
4587 */
88b8dac0
SV
4588 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
4589 resched_cpu(env.dst_cpu);
4590
4591 /*
4592 * Revisit (affine) tasks on src_cpu that couldn't be moved to
4593 * us and move them to an alternate dst_cpu in our sched_group
4594 * where they can run. The upper limit on how many times we
4595 * iterate on same src_cpu is dependent on number of cpus in our
4596 * sched_group.
4597 *
4598 * This changes load balance semantics a bit on who can move
4599 * load to a given_cpu. In addition to the given_cpu itself
4600 * (or a ilb_cpu acting on its behalf where given_cpu is
4601 * nohz-idle), we now have balance_cpu in a position to move
4602 * load to given_cpu. In rare situations, this may cause
4603 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
4604 * _independently_ and at _same_ time to move some load to
4605 * given_cpu) causing exceess load to be moved to given_cpu.
4606 * This however should not happen so much in practice and
4607 * moreover subsequent load balance cycles should correct the
4608 * excess load moved.
4609 */
4610 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0 &&
4611 lb_iterations++ < max_lb_iterations) {
4612
78feefc5 4613 env.dst_rq = cpu_rq(env.new_dst_cpu);
88b8dac0
SV
4614 env.dst_cpu = env.new_dst_cpu;
4615 env.flags &= ~LBF_SOME_PINNED;
4616 env.loop = 0;
4617 env.loop_break = sched_nr_migrate_break;
4618 /*
4619 * Go back to "more_balance" rather than "redo" since we
4620 * need to continue with same src_cpu.
4621 */
4622 goto more_balance;
4623 }
1e3c88bd
PZ
4624
4625 /* All tasks on this runqueue were pinned by CPU affinity */
8e45cb54 4626 if (unlikely(env.flags & LBF_ALL_PINNED)) {
1e3c88bd 4627 cpumask_clear_cpu(cpu_of(busiest), cpus);
bbf18b19
PN
4628 if (!cpumask_empty(cpus)) {
4629 env.loop = 0;
4630 env.loop_break = sched_nr_migrate_break;
1e3c88bd 4631 goto redo;
bbf18b19 4632 }
1e3c88bd
PZ
4633 goto out_balanced;
4634 }
4635 }
4636
4637 if (!ld_moved) {
4638 schedstat_inc(sd, lb_failed[idle]);
58b26c4c
VP
4639 /*
4640 * Increment the failure counter only on periodic balance.
4641 * We do not want newidle balance, which can be very
4642 * frequent, pollute the failure counter causing
4643 * excessive cache_hot migrations and active balances.
4644 */
4645 if (idle != CPU_NEWLY_IDLE)
4646 sd->nr_balance_failed++;
1e3c88bd 4647
bd939f45 4648 if (need_active_balance(&env)) {
1e3c88bd
PZ
4649 raw_spin_lock_irqsave(&busiest->lock, flags);
4650
969c7921
TH
4651 /* don't kick the active_load_balance_cpu_stop,
4652 * if the curr task on busiest cpu can't be
4653 * moved to this_cpu
1e3c88bd
PZ
4654 */
4655 if (!cpumask_test_cpu(this_cpu,
fa17b507 4656 tsk_cpus_allowed(busiest->curr))) {
1e3c88bd
PZ
4657 raw_spin_unlock_irqrestore(&busiest->lock,
4658 flags);
8e45cb54 4659 env.flags |= LBF_ALL_PINNED;
1e3c88bd
PZ
4660 goto out_one_pinned;
4661 }
4662
969c7921
TH
4663 /*
4664 * ->active_balance synchronizes accesses to
4665 * ->active_balance_work. Once set, it's cleared
4666 * only after active load balance is finished.
4667 */
1e3c88bd
PZ
4668 if (!busiest->active_balance) {
4669 busiest->active_balance = 1;
4670 busiest->push_cpu = this_cpu;
4671 active_balance = 1;
4672 }
4673 raw_spin_unlock_irqrestore(&busiest->lock, flags);
969c7921 4674
bd939f45 4675 if (active_balance) {
969c7921
TH
4676 stop_one_cpu_nowait(cpu_of(busiest),
4677 active_load_balance_cpu_stop, busiest,
4678 &busiest->active_balance_work);
bd939f45 4679 }
1e3c88bd
PZ
4680
4681 /*
4682 * We've kicked active balancing, reset the failure
4683 * counter.
4684 */
4685 sd->nr_balance_failed = sd->cache_nice_tries+1;
4686 }
4687 } else
4688 sd->nr_balance_failed = 0;
4689
4690 if (likely(!active_balance)) {
4691 /* We were unbalanced, so reset the balancing interval */
4692 sd->balance_interval = sd->min_interval;
4693 } else {
4694 /*
4695 * If we've begun active balancing, start to back off. This
4696 * case may not be covered by the all_pinned logic if there
4697 * is only 1 task on the busy runqueue (because we don't call
4698 * move_tasks).
4699 */
4700 if (sd->balance_interval < sd->max_interval)
4701 sd->balance_interval *= 2;
4702 }
4703
1e3c88bd
PZ
4704 goto out;
4705
4706out_balanced:
4707 schedstat_inc(sd, lb_balanced[idle]);
4708
4709 sd->nr_balance_failed = 0;
4710
4711out_one_pinned:
4712 /* tune up the balancing interval */
8e45cb54 4713 if (((env.flags & LBF_ALL_PINNED) &&
5b54b56b 4714 sd->balance_interval < MAX_PINNED_INTERVAL) ||
1e3c88bd
PZ
4715 (sd->balance_interval < sd->max_interval))
4716 sd->balance_interval *= 2;
4717
46e49b38 4718 ld_moved = 0;
1e3c88bd 4719out:
1e3c88bd
PZ
4720 return ld_moved;
4721}
4722
1e3c88bd
PZ
4723/*
4724 * idle_balance is called by schedule() if this_cpu is about to become
4725 * idle. Attempts to pull tasks from other CPUs.
4726 */
029632fb 4727void idle_balance(int this_cpu, struct rq *this_rq)
1e3c88bd
PZ
4728{
4729 struct sched_domain *sd;
4730 int pulled_task = 0;
4731 unsigned long next_balance = jiffies + HZ;
4732
4733 this_rq->idle_stamp = this_rq->clock;
4734
4735 if (this_rq->avg_idle < sysctl_sched_migration_cost)
4736 return;
4737
18bf2805
BS
4738 update_rq_runnable_avg(this_rq, 1);
4739
f492e12e
PZ
4740 /*
4741 * Drop the rq->lock, but keep IRQ/preempt disabled.
4742 */
4743 raw_spin_unlock(&this_rq->lock);
4744
c66eaf61 4745 update_shares(this_cpu);
dce840a0 4746 rcu_read_lock();
1e3c88bd
PZ
4747 for_each_domain(this_cpu, sd) {
4748 unsigned long interval;
f492e12e 4749 int balance = 1;
1e3c88bd
PZ
4750
4751 if (!(sd->flags & SD_LOAD_BALANCE))
4752 continue;
4753
f492e12e 4754 if (sd->flags & SD_BALANCE_NEWIDLE) {
1e3c88bd 4755 /* If we've pulled tasks over stop searching: */
f492e12e
PZ
4756 pulled_task = load_balance(this_cpu, this_rq,
4757 sd, CPU_NEWLY_IDLE, &balance);
4758 }
1e3c88bd
PZ
4759
4760 interval = msecs_to_jiffies(sd->balance_interval);
4761 if (time_after(next_balance, sd->last_balance + interval))
4762 next_balance = sd->last_balance + interval;
d5ad140b
NR
4763 if (pulled_task) {
4764 this_rq->idle_stamp = 0;
1e3c88bd 4765 break;
d5ad140b 4766 }
1e3c88bd 4767 }
dce840a0 4768 rcu_read_unlock();
f492e12e
PZ
4769
4770 raw_spin_lock(&this_rq->lock);
4771
1e3c88bd
PZ
4772 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
4773 /*
4774 * We are going idle. next_balance may be set based on
4775 * a busy processor. So reset next_balance.
4776 */
4777 this_rq->next_balance = next_balance;
4778 }
4779}
4780
4781/*
969c7921
TH
4782 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
4783 * running tasks off the busiest CPU onto idle CPUs. It requires at
4784 * least 1 task to be running on each physical CPU where possible, and
4785 * avoids physical / logical imbalances.
1e3c88bd 4786 */
969c7921 4787static int active_load_balance_cpu_stop(void *data)
1e3c88bd 4788{
969c7921
TH
4789 struct rq *busiest_rq = data;
4790 int busiest_cpu = cpu_of(busiest_rq);
1e3c88bd 4791 int target_cpu = busiest_rq->push_cpu;
969c7921 4792 struct rq *target_rq = cpu_rq(target_cpu);
1e3c88bd 4793 struct sched_domain *sd;
969c7921
TH
4794
4795 raw_spin_lock_irq(&busiest_rq->lock);
4796
4797 /* make sure the requested cpu hasn't gone down in the meantime */
4798 if (unlikely(busiest_cpu != smp_processor_id() ||
4799 !busiest_rq->active_balance))
4800 goto out_unlock;
1e3c88bd
PZ
4801
4802 /* Is there any task to move? */
4803 if (busiest_rq->nr_running <= 1)
969c7921 4804 goto out_unlock;
1e3c88bd
PZ
4805
4806 /*
4807 * This condition is "impossible", if it occurs
4808 * we need to fix it. Originally reported by
4809 * Bjorn Helgaas on a 128-cpu setup.
4810 */
4811 BUG_ON(busiest_rq == target_rq);
4812
4813 /* move a task from busiest_rq to target_rq */
4814 double_lock_balance(busiest_rq, target_rq);
1e3c88bd
PZ
4815
4816 /* Search for an sd spanning us and the target CPU. */
dce840a0 4817 rcu_read_lock();
1e3c88bd
PZ
4818 for_each_domain(target_cpu, sd) {
4819 if ((sd->flags & SD_LOAD_BALANCE) &&
4820 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
4821 break;
4822 }
4823
4824 if (likely(sd)) {
8e45cb54
PZ
4825 struct lb_env env = {
4826 .sd = sd,
ddcdf6e7
PZ
4827 .dst_cpu = target_cpu,
4828 .dst_rq = target_rq,
4829 .src_cpu = busiest_rq->cpu,
4830 .src_rq = busiest_rq,
8e45cb54
PZ
4831 .idle = CPU_IDLE,
4832 };
4833
1e3c88bd
PZ
4834 schedstat_inc(sd, alb_count);
4835
8e45cb54 4836 if (move_one_task(&env))
1e3c88bd
PZ
4837 schedstat_inc(sd, alb_pushed);
4838 else
4839 schedstat_inc(sd, alb_failed);
4840 }
dce840a0 4841 rcu_read_unlock();
1e3c88bd 4842 double_unlock_balance(busiest_rq, target_rq);
969c7921
TH
4843out_unlock:
4844 busiest_rq->active_balance = 0;
4845 raw_spin_unlock_irq(&busiest_rq->lock);
4846 return 0;
1e3c88bd
PZ
4847}
4848
4849#ifdef CONFIG_NO_HZ
83cd4fe2
VP
4850/*
4851 * idle load balancing details
83cd4fe2
VP
4852 * - When one of the busy CPUs notice that there may be an idle rebalancing
4853 * needed, they will kick the idle load balancer, which then does idle
4854 * load balancing for all the idle CPUs.
4855 */
1e3c88bd 4856static struct {
83cd4fe2 4857 cpumask_var_t idle_cpus_mask;
0b005cf5 4858 atomic_t nr_cpus;
83cd4fe2
VP
4859 unsigned long next_balance; /* in jiffy units */
4860} nohz ____cacheline_aligned;
1e3c88bd 4861
8e7fbcbc 4862static inline int find_new_ilb(int call_cpu)
1e3c88bd 4863{
0b005cf5 4864 int ilb = cpumask_first(nohz.idle_cpus_mask);
1e3c88bd 4865
786d6dc7
SS
4866 if (ilb < nr_cpu_ids && idle_cpu(ilb))
4867 return ilb;
4868
4869 return nr_cpu_ids;
1e3c88bd 4870}
1e3c88bd 4871
83cd4fe2
VP
4872/*
4873 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
4874 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
4875 * CPU (if there is one).
4876 */
4877static void nohz_balancer_kick(int cpu)
4878{
4879 int ilb_cpu;
4880
4881 nohz.next_balance++;
4882
0b005cf5 4883 ilb_cpu = find_new_ilb(cpu);
83cd4fe2 4884
0b005cf5
SS
4885 if (ilb_cpu >= nr_cpu_ids)
4886 return;
83cd4fe2 4887
cd490c5b 4888 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
1c792db7
SS
4889 return;
4890 /*
4891 * Use smp_send_reschedule() instead of resched_cpu().
4892 * This way we generate a sched IPI on the target cpu which
4893 * is idle. And the softirq performing nohz idle load balance
4894 * will be run before returning from the IPI.
4895 */
4896 smp_send_reschedule(ilb_cpu);
83cd4fe2
VP
4897 return;
4898}
4899
c1cc017c 4900static inline void nohz_balance_exit_idle(int cpu)
71325960
SS
4901{
4902 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
4903 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
4904 atomic_dec(&nohz.nr_cpus);
4905 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
4906 }
4907}
4908
69e1e811
SS
4909static inline void set_cpu_sd_state_busy(void)
4910{
4911 struct sched_domain *sd;
4912 int cpu = smp_processor_id();
4913
4914 if (!test_bit(NOHZ_IDLE, nohz_flags(cpu)))
4915 return;
4916 clear_bit(NOHZ_IDLE, nohz_flags(cpu));
4917
4918 rcu_read_lock();
4919 for_each_domain(cpu, sd)
4920 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
4921 rcu_read_unlock();
4922}
4923
4924void set_cpu_sd_state_idle(void)
4925{
4926 struct sched_domain *sd;
4927 int cpu = smp_processor_id();
4928
4929 if (test_bit(NOHZ_IDLE, nohz_flags(cpu)))
4930 return;
4931 set_bit(NOHZ_IDLE, nohz_flags(cpu));
4932
4933 rcu_read_lock();
4934 for_each_domain(cpu, sd)
4935 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
4936 rcu_read_unlock();
4937}
4938
1e3c88bd 4939/*
c1cc017c 4940 * This routine will record that the cpu is going idle with tick stopped.
0b005cf5 4941 * This info will be used in performing idle load balancing in the future.
1e3c88bd 4942 */
c1cc017c 4943void nohz_balance_enter_idle(int cpu)
1e3c88bd 4944{
71325960
SS
4945 /*
4946 * If this cpu is going down, then nothing needs to be done.
4947 */
4948 if (!cpu_active(cpu))
4949 return;
4950
c1cc017c
AS
4951 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
4952 return;
1e3c88bd 4953
c1cc017c
AS
4954 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
4955 atomic_inc(&nohz.nr_cpus);
4956 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
1e3c88bd 4957}
71325960
SS
4958
4959static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
4960 unsigned long action, void *hcpu)
4961{
4962 switch (action & ~CPU_TASKS_FROZEN) {
4963 case CPU_DYING:
c1cc017c 4964 nohz_balance_exit_idle(smp_processor_id());
71325960
SS
4965 return NOTIFY_OK;
4966 default:
4967 return NOTIFY_DONE;
4968 }
4969}
1e3c88bd
PZ
4970#endif
4971
4972static DEFINE_SPINLOCK(balancing);
4973
49c022e6
PZ
4974/*
4975 * Scale the max load_balance interval with the number of CPUs in the system.
4976 * This trades load-balance latency on larger machines for less cross talk.
4977 */
029632fb 4978void update_max_interval(void)
49c022e6
PZ
4979{
4980 max_load_balance_interval = HZ*num_online_cpus()/10;
4981}
4982
1e3c88bd
PZ
4983/*
4984 * It checks each scheduling domain to see if it is due to be balanced,
4985 * and initiates a balancing operation if so.
4986 *
4987 * Balancing parameters are set up in arch_init_sched_domains.
4988 */
4989static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4990{
4991 int balance = 1;
4992 struct rq *rq = cpu_rq(cpu);
4993 unsigned long interval;
04f733b4 4994 struct sched_domain *sd;
1e3c88bd
PZ
4995 /* Earliest time when we have to do rebalance again */
4996 unsigned long next_balance = jiffies + 60*HZ;
4997 int update_next_balance = 0;
4998 int need_serialize;
4999
2069dd75
PZ
5000 update_shares(cpu);
5001
dce840a0 5002 rcu_read_lock();
1e3c88bd
PZ
5003 for_each_domain(cpu, sd) {
5004 if (!(sd->flags & SD_LOAD_BALANCE))
5005 continue;
5006
5007 interval = sd->balance_interval;
5008 if (idle != CPU_IDLE)
5009 interval *= sd->busy_factor;
5010
5011 /* scale ms to jiffies */
5012 interval = msecs_to_jiffies(interval);
49c022e6 5013 interval = clamp(interval, 1UL, max_load_balance_interval);
1e3c88bd
PZ
5014
5015 need_serialize = sd->flags & SD_SERIALIZE;
5016
5017 if (need_serialize) {
5018 if (!spin_trylock(&balancing))
5019 goto out;
5020 }
5021
5022 if (time_after_eq(jiffies, sd->last_balance + interval)) {
5023 if (load_balance(cpu, rq, sd, idle, &balance)) {
5024 /*
5025 * We've pulled tasks over so either we're no
c186fafe 5026 * longer idle.
1e3c88bd
PZ
5027 */
5028 idle = CPU_NOT_IDLE;
5029 }
5030 sd->last_balance = jiffies;
5031 }
5032 if (need_serialize)
5033 spin_unlock(&balancing);
5034out:
5035 if (time_after(next_balance, sd->last_balance + interval)) {
5036 next_balance = sd->last_balance + interval;
5037 update_next_balance = 1;
5038 }
5039
5040 /*
5041 * Stop the load balance at this level. There is another
5042 * CPU in our sched group which is doing load balancing more
5043 * actively.
5044 */
5045 if (!balance)
5046 break;
5047 }
dce840a0 5048 rcu_read_unlock();
1e3c88bd
PZ
5049
5050 /*
5051 * next_balance will be updated only when there is a need.
5052 * When the cpu is attached to null domain for ex, it will not be
5053 * updated.
5054 */
5055 if (likely(update_next_balance))
5056 rq->next_balance = next_balance;
5057}
5058
83cd4fe2 5059#ifdef CONFIG_NO_HZ
1e3c88bd 5060/*
83cd4fe2 5061 * In CONFIG_NO_HZ case, the idle balance kickee will do the
1e3c88bd
PZ
5062 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5063 */
83cd4fe2
VP
5064static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5065{
5066 struct rq *this_rq = cpu_rq(this_cpu);
5067 struct rq *rq;
5068 int balance_cpu;
5069
1c792db7
SS
5070 if (idle != CPU_IDLE ||
5071 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5072 goto end;
83cd4fe2
VP
5073
5074 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8a6d42d1 5075 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
83cd4fe2
VP
5076 continue;
5077
5078 /*
5079 * If this cpu gets work to do, stop the load balancing
5080 * work being done for other cpus. Next load
5081 * balancing owner will pick it up.
5082 */
1c792db7 5083 if (need_resched())
83cd4fe2 5084 break;
83cd4fe2 5085
5ed4f1d9
VG
5086 rq = cpu_rq(balance_cpu);
5087
5088 raw_spin_lock_irq(&rq->lock);
5089 update_rq_clock(rq);
5090 update_idle_cpu_load(rq);
5091 raw_spin_unlock_irq(&rq->lock);
83cd4fe2
VP
5092
5093 rebalance_domains(balance_cpu, CPU_IDLE);
5094
83cd4fe2
VP
5095 if (time_after(this_rq->next_balance, rq->next_balance))
5096 this_rq->next_balance = rq->next_balance;
5097 }
5098 nohz.next_balance = this_rq->next_balance;
1c792db7
SS
5099end:
5100 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
83cd4fe2
VP
5101}
5102
5103/*
0b005cf5
SS
5104 * Current heuristic for kicking the idle load balancer in the presence
5105 * of an idle cpu is the system.
5106 * - This rq has more than one task.
5107 * - At any scheduler domain level, this cpu's scheduler group has multiple
5108 * busy cpu's exceeding the group's power.
5109 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5110 * domain span are idle.
83cd4fe2
VP
5111 */
5112static inline int nohz_kick_needed(struct rq *rq, int cpu)
5113{
5114 unsigned long now = jiffies;
0b005cf5 5115 struct sched_domain *sd;
83cd4fe2 5116
1c792db7 5117 if (unlikely(idle_cpu(cpu)))
83cd4fe2
VP
5118 return 0;
5119
1c792db7
SS
5120 /*
5121 * We may be recently in ticked or tickless idle mode. At the first
5122 * busy tick after returning from idle, we will update the busy stats.
5123 */
69e1e811 5124 set_cpu_sd_state_busy();
c1cc017c 5125 nohz_balance_exit_idle(cpu);
0b005cf5
SS
5126
5127 /*
5128 * None are in tickless mode and hence no need for NOHZ idle load
5129 * balancing.
5130 */
5131 if (likely(!atomic_read(&nohz.nr_cpus)))
5132 return 0;
1c792db7
SS
5133
5134 if (time_before(now, nohz.next_balance))
83cd4fe2
VP
5135 return 0;
5136
0b005cf5
SS
5137 if (rq->nr_running >= 2)
5138 goto need_kick;
83cd4fe2 5139
067491b7 5140 rcu_read_lock();
0b005cf5
SS
5141 for_each_domain(cpu, sd) {
5142 struct sched_group *sg = sd->groups;
5143 struct sched_group_power *sgp = sg->sgp;
5144 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
83cd4fe2 5145
0b005cf5 5146 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
067491b7 5147 goto need_kick_unlock;
0b005cf5
SS
5148
5149 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5150 && (cpumask_first_and(nohz.idle_cpus_mask,
5151 sched_domain_span(sd)) < cpu))
067491b7 5152 goto need_kick_unlock;
0b005cf5
SS
5153
5154 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5155 break;
83cd4fe2 5156 }
067491b7 5157 rcu_read_unlock();
83cd4fe2 5158 return 0;
067491b7
PZ
5159
5160need_kick_unlock:
5161 rcu_read_unlock();
0b005cf5
SS
5162need_kick:
5163 return 1;
83cd4fe2
VP
5164}
5165#else
5166static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5167#endif
5168
5169/*
5170 * run_rebalance_domains is triggered when needed from the scheduler tick.
5171 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
5172 */
1e3c88bd
PZ
5173static void run_rebalance_domains(struct softirq_action *h)
5174{
5175 int this_cpu = smp_processor_id();
5176 struct rq *this_rq = cpu_rq(this_cpu);
6eb57e0d 5177 enum cpu_idle_type idle = this_rq->idle_balance ?
1e3c88bd
PZ
5178 CPU_IDLE : CPU_NOT_IDLE;
5179
5180 rebalance_domains(this_cpu, idle);
5181
1e3c88bd 5182 /*
83cd4fe2 5183 * If this cpu has a pending nohz_balance_kick, then do the
1e3c88bd
PZ
5184 * balancing on behalf of the other idle cpus whose ticks are
5185 * stopped.
5186 */
83cd4fe2 5187 nohz_idle_balance(this_cpu, idle);
1e3c88bd
PZ
5188}
5189
5190static inline int on_null_domain(int cpu)
5191{
90a6501f 5192 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
1e3c88bd
PZ
5193}
5194
5195/*
5196 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
1e3c88bd 5197 */
029632fb 5198void trigger_load_balance(struct rq *rq, int cpu)
1e3c88bd 5199{
1e3c88bd
PZ
5200 /* Don't need to rebalance while attached to NULL domain */
5201 if (time_after_eq(jiffies, rq->next_balance) &&
5202 likely(!on_null_domain(cpu)))
5203 raise_softirq(SCHED_SOFTIRQ);
83cd4fe2 5204#ifdef CONFIG_NO_HZ
1c792db7 5205 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
83cd4fe2
VP
5206 nohz_balancer_kick(cpu);
5207#endif
1e3c88bd
PZ
5208}
5209
0bcdcf28
CE
5210static void rq_online_fair(struct rq *rq)
5211{
5212 update_sysctl();
5213}
5214
5215static void rq_offline_fair(struct rq *rq)
5216{
5217 update_sysctl();
a4c96ae3
PB
5218
5219 /* Ensure any throttled groups are reachable by pick_next_task */
5220 unthrottle_offline_cfs_rqs(rq);
0bcdcf28
CE
5221}
5222
55e12e5e 5223#endif /* CONFIG_SMP */
e1d1484f 5224
bf0f6f24
IM
5225/*
5226 * scheduler tick hitting a task of our scheduling class:
5227 */
8f4d37ec 5228static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
5229{
5230 struct cfs_rq *cfs_rq;
5231 struct sched_entity *se = &curr->se;
5232
5233 for_each_sched_entity(se) {
5234 cfs_rq = cfs_rq_of(se);
8f4d37ec 5235 entity_tick(cfs_rq, se, queued);
bf0f6f24 5236 }
18bf2805
BS
5237
5238 update_rq_runnable_avg(rq, 1);
bf0f6f24
IM
5239}
5240
5241/*
cd29fe6f
PZ
5242 * called on fork with the child task as argument from the parent's context
5243 * - child not yet on the tasklist
5244 * - preemption disabled
bf0f6f24 5245 */
cd29fe6f 5246static void task_fork_fair(struct task_struct *p)
bf0f6f24 5247{
4fc420c9
DN
5248 struct cfs_rq *cfs_rq;
5249 struct sched_entity *se = &p->se, *curr;
00bf7bfc 5250 int this_cpu = smp_processor_id();
cd29fe6f
PZ
5251 struct rq *rq = this_rq();
5252 unsigned long flags;
5253
05fa785c 5254 raw_spin_lock_irqsave(&rq->lock, flags);
bf0f6f24 5255
861d034e
PZ
5256 update_rq_clock(rq);
5257
4fc420c9
DN
5258 cfs_rq = task_cfs_rq(current);
5259 curr = cfs_rq->curr;
5260
b0a0f667
PM
5261 if (unlikely(task_cpu(p) != this_cpu)) {
5262 rcu_read_lock();
cd29fe6f 5263 __set_task_cpu(p, this_cpu);
b0a0f667
PM
5264 rcu_read_unlock();
5265 }
bf0f6f24 5266
7109c442 5267 update_curr(cfs_rq);
cd29fe6f 5268
b5d9d734
MG
5269 if (curr)
5270 se->vruntime = curr->vruntime;
aeb73b04 5271 place_entity(cfs_rq, se, 1);
4d78e7b6 5272
cd29fe6f 5273 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 5274 /*
edcb60a3
IM
5275 * Upon rescheduling, sched_class::put_prev_task() will place
5276 * 'current' within the tree based on its new key value.
5277 */
4d78e7b6 5278 swap(curr->vruntime, se->vruntime);
aec0a514 5279 resched_task(rq->curr);
4d78e7b6 5280 }
bf0f6f24 5281
88ec22d3
PZ
5282 se->vruntime -= cfs_rq->min_vruntime;
5283
05fa785c 5284 raw_spin_unlock_irqrestore(&rq->lock, flags);
bf0f6f24
IM
5285}
5286
cb469845
SR
5287/*
5288 * Priority of the task has changed. Check to see if we preempt
5289 * the current task.
5290 */
da7a735e
PZ
5291static void
5292prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 5293{
da7a735e
PZ
5294 if (!p->se.on_rq)
5295 return;
5296
cb469845
SR
5297 /*
5298 * Reschedule if we are currently running on this runqueue and
5299 * our priority decreased, or if we are not currently running on
5300 * this runqueue and our priority is higher than the current's
5301 */
da7a735e 5302 if (rq->curr == p) {
cb469845
SR
5303 if (p->prio > oldprio)
5304 resched_task(rq->curr);
5305 } else
15afe09b 5306 check_preempt_curr(rq, p, 0);
cb469845
SR
5307}
5308
da7a735e
PZ
5309static void switched_from_fair(struct rq *rq, struct task_struct *p)
5310{
5311 struct sched_entity *se = &p->se;
5312 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5313
5314 /*
5315 * Ensure the task's vruntime is normalized, so that when its
5316 * switched back to the fair class the enqueue_entity(.flags=0) will
5317 * do the right thing.
5318 *
5319 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5320 * have normalized the vruntime, if it was !on_rq, then only when
5321 * the task is sleeping will it still have non-normalized vruntime.
5322 */
5323 if (!se->on_rq && p->state != TASK_RUNNING) {
5324 /*
5325 * Fix up our vruntime so that the current sleep doesn't
5326 * cause 'unlimited' sleep bonus.
5327 */
5328 place_entity(cfs_rq, se, 0);
5329 se->vruntime -= cfs_rq->min_vruntime;
5330 }
9ee474f5
PT
5331
5332#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
5333 /*
5334 * Remove our load from contribution when we leave sched_fair
5335 * and ensure we don't carry in an old decay_count if we
5336 * switch back.
5337 */
5338 if (p->se.avg.decay_count) {
5339 struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
5340 __synchronize_entity_decay(&p->se);
5341 subtract_blocked_load_contrib(cfs_rq,
5342 p->se.avg.load_avg_contrib);
5343 }
5344#endif
da7a735e
PZ
5345}
5346
cb469845
SR
5347/*
5348 * We switched to the sched_fair class.
5349 */
da7a735e 5350static void switched_to_fair(struct rq *rq, struct task_struct *p)
cb469845 5351{
da7a735e
PZ
5352 if (!p->se.on_rq)
5353 return;
5354
cb469845
SR
5355 /*
5356 * We were most likely switched from sched_rt, so
5357 * kick off the schedule if running, otherwise just see
5358 * if we can still preempt the current task.
5359 */
da7a735e 5360 if (rq->curr == p)
cb469845
SR
5361 resched_task(rq->curr);
5362 else
15afe09b 5363 check_preempt_curr(rq, p, 0);
cb469845
SR
5364}
5365
83b699ed
SV
5366/* Account for a task changing its policy or group.
5367 *
5368 * This routine is mostly called to set cfs_rq->curr field when a task
5369 * migrates between groups/classes.
5370 */
5371static void set_curr_task_fair(struct rq *rq)
5372{
5373 struct sched_entity *se = &rq->curr->se;
5374
ec12cb7f
PT
5375 for_each_sched_entity(se) {
5376 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5377
5378 set_next_entity(cfs_rq, se);
5379 /* ensure bandwidth has been allocated on our new cfs_rq */
5380 account_cfs_rq_runtime(cfs_rq, 0);
5381 }
83b699ed
SV
5382}
5383
029632fb
PZ
5384void init_cfs_rq(struct cfs_rq *cfs_rq)
5385{
5386 cfs_rq->tasks_timeline = RB_ROOT;
029632fb
PZ
5387 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5388#ifndef CONFIG_64BIT
5389 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5390#endif
9ee474f5
PT
5391#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
5392 atomic64_set(&cfs_rq->decay_counter, 1);
5393#endif
029632fb
PZ
5394}
5395
810b3817 5396#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 5397static void task_move_group_fair(struct task_struct *p, int on_rq)
810b3817 5398{
b2b5ce02
PZ
5399 /*
5400 * If the task was not on the rq at the time of this cgroup movement
5401 * it must have been asleep, sleeping tasks keep their ->vruntime
5402 * absolute on their old rq until wakeup (needed for the fair sleeper
5403 * bonus in place_entity()).
5404 *
5405 * If it was on the rq, we've just 'preempted' it, which does convert
5406 * ->vruntime to a relative base.
5407 *
5408 * Make sure both cases convert their relative position when migrating
5409 * to another cgroup's rq. This does somewhat interfere with the
5410 * fair sleeper stuff for the first placement, but who cares.
5411 */
7ceff013
DN
5412 /*
5413 * When !on_rq, vruntime of the task has usually NOT been normalized.
5414 * But there are some cases where it has already been normalized:
5415 *
5416 * - Moving a forked child which is waiting for being woken up by
5417 * wake_up_new_task().
62af3783
DN
5418 * - Moving a task which has been woken up by try_to_wake_up() and
5419 * waiting for actually being woken up by sched_ttwu_pending().
7ceff013
DN
5420 *
5421 * To prevent boost or penalty in the new cfs_rq caused by delta
5422 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
5423 */
62af3783 5424 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
7ceff013
DN
5425 on_rq = 1;
5426
b2b5ce02
PZ
5427 if (!on_rq)
5428 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
5429 set_task_rq(p, task_cpu(p));
88ec22d3 5430 if (!on_rq)
b2b5ce02 5431 p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
810b3817 5432}
029632fb
PZ
5433
5434void free_fair_sched_group(struct task_group *tg)
5435{
5436 int i;
5437
5438 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
5439
5440 for_each_possible_cpu(i) {
5441 if (tg->cfs_rq)
5442 kfree(tg->cfs_rq[i]);
5443 if (tg->se)
5444 kfree(tg->se[i]);
5445 }
5446
5447 kfree(tg->cfs_rq);
5448 kfree(tg->se);
5449}
5450
5451int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
5452{
5453 struct cfs_rq *cfs_rq;
5454 struct sched_entity *se;
5455 int i;
5456
5457 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
5458 if (!tg->cfs_rq)
5459 goto err;
5460 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
5461 if (!tg->se)
5462 goto err;
5463
5464 tg->shares = NICE_0_LOAD;
5465
5466 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
5467
5468 for_each_possible_cpu(i) {
5469 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
5470 GFP_KERNEL, cpu_to_node(i));
5471 if (!cfs_rq)
5472 goto err;
5473
5474 se = kzalloc_node(sizeof(struct sched_entity),
5475 GFP_KERNEL, cpu_to_node(i));
5476 if (!se)
5477 goto err_free_rq;
5478
5479 init_cfs_rq(cfs_rq);
5480 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
5481 }
5482
5483 return 1;
5484
5485err_free_rq:
5486 kfree(cfs_rq);
5487err:
5488 return 0;
5489}
5490
5491void unregister_fair_sched_group(struct task_group *tg, int cpu)
5492{
5493 struct rq *rq = cpu_rq(cpu);
5494 unsigned long flags;
5495
5496 /*
5497 * Only empty task groups can be destroyed; so we can speculatively
5498 * check on_list without danger of it being re-added.
5499 */
5500 if (!tg->cfs_rq[cpu]->on_list)
5501 return;
5502
5503 raw_spin_lock_irqsave(&rq->lock, flags);
5504 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
5505 raw_spin_unlock_irqrestore(&rq->lock, flags);
5506}
5507
5508void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
5509 struct sched_entity *se, int cpu,
5510 struct sched_entity *parent)
5511{
5512 struct rq *rq = cpu_rq(cpu);
5513
5514 cfs_rq->tg = tg;
5515 cfs_rq->rq = rq;
5516#ifdef CONFIG_SMP
5517 /* allow initial update_cfs_load() to truncate */
5518 cfs_rq->load_stamp = 1;
810b3817 5519#endif
029632fb
PZ
5520 init_cfs_rq_runtime(cfs_rq);
5521
5522 tg->cfs_rq[cpu] = cfs_rq;
5523 tg->se[cpu] = se;
5524
5525 /* se could be NULL for root_task_group */
5526 if (!se)
5527 return;
5528
5529 if (!parent)
5530 se->cfs_rq = &rq->cfs;
5531 else
5532 se->cfs_rq = parent->my_q;
5533
5534 se->my_q = cfs_rq;
5535 update_load_set(&se->load, 0);
5536 se->parent = parent;
5537}
5538
5539static DEFINE_MUTEX(shares_mutex);
5540
5541int sched_group_set_shares(struct task_group *tg, unsigned long shares)
5542{
5543 int i;
5544 unsigned long flags;
5545
5546 /*
5547 * We can't change the weight of the root cgroup.
5548 */
5549 if (!tg->se[0])
5550 return -EINVAL;
5551
5552 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
5553
5554 mutex_lock(&shares_mutex);
5555 if (tg->shares == shares)
5556 goto done;
5557
5558 tg->shares = shares;
5559 for_each_possible_cpu(i) {
5560 struct rq *rq = cpu_rq(i);
5561 struct sched_entity *se;
5562
5563 se = tg->se[i];
5564 /* Propagate contribution to hierarchy */
5565 raw_spin_lock_irqsave(&rq->lock, flags);
5566 for_each_sched_entity(se)
5567 update_cfs_shares(group_cfs_rq(se));
5568 raw_spin_unlock_irqrestore(&rq->lock, flags);
5569 }
5570
5571done:
5572 mutex_unlock(&shares_mutex);
5573 return 0;
5574}
5575#else /* CONFIG_FAIR_GROUP_SCHED */
5576
5577void free_fair_sched_group(struct task_group *tg) { }
5578
5579int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
5580{
5581 return 1;
5582}
5583
5584void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
5585
5586#endif /* CONFIG_FAIR_GROUP_SCHED */
5587
810b3817 5588
6d686f45 5589static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
5590{
5591 struct sched_entity *se = &task->se;
0d721cea
PW
5592 unsigned int rr_interval = 0;
5593
5594 /*
5595 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
5596 * idle runqueue:
5597 */
0d721cea
PW
5598 if (rq->cfs.load.weight)
5599 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
0d721cea
PW
5600
5601 return rr_interval;
5602}
5603
bf0f6f24
IM
5604/*
5605 * All the scheduling class methods:
5606 */
029632fb 5607const struct sched_class fair_sched_class = {
5522d5d5 5608 .next = &idle_sched_class,
bf0f6f24
IM
5609 .enqueue_task = enqueue_task_fair,
5610 .dequeue_task = dequeue_task_fair,
5611 .yield_task = yield_task_fair,
d95f4122 5612 .yield_to_task = yield_to_task_fair,
bf0f6f24 5613
2e09bf55 5614 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
5615
5616 .pick_next_task = pick_next_task_fair,
5617 .put_prev_task = put_prev_task_fair,
5618
681f3e68 5619#ifdef CONFIG_SMP
4ce72a2c 5620 .select_task_rq = select_task_rq_fair,
0a74bef8 5621 .migrate_task_rq = migrate_task_rq_fair,
4ce72a2c 5622
0bcdcf28
CE
5623 .rq_online = rq_online_fair,
5624 .rq_offline = rq_offline_fair,
88ec22d3
PZ
5625
5626 .task_waking = task_waking_fair,
681f3e68 5627#endif
bf0f6f24 5628
83b699ed 5629 .set_curr_task = set_curr_task_fair,
bf0f6f24 5630 .task_tick = task_tick_fair,
cd29fe6f 5631 .task_fork = task_fork_fair,
cb469845
SR
5632
5633 .prio_changed = prio_changed_fair,
da7a735e 5634 .switched_from = switched_from_fair,
cb469845 5635 .switched_to = switched_to_fair,
810b3817 5636
0d721cea
PW
5637 .get_rr_interval = get_rr_interval_fair,
5638
810b3817 5639#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 5640 .task_move_group = task_move_group_fair,
810b3817 5641#endif
bf0f6f24
IM
5642};
5643
5644#ifdef CONFIG_SCHED_DEBUG
029632fb 5645void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 5646{
bf0f6f24
IM
5647 struct cfs_rq *cfs_rq;
5648
5973e5b9 5649 rcu_read_lock();
c3b64f1e 5650 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 5651 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 5652 rcu_read_unlock();
bf0f6f24
IM
5653}
5654#endif
029632fb
PZ
5655
5656__init void init_sched_fair_class(void)
5657{
5658#ifdef CONFIG_SMP
5659 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
5660
5661#ifdef CONFIG_NO_HZ
554cecaf 5662 nohz.next_balance = jiffies;
029632fb 5663 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
71325960 5664 cpu_notifier(sched_ilb_notifier, 0);
029632fb
PZ
5665#endif
5666#endif /* SMP */
5667
5668}